repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
costypetrisor/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
Castronova/EMIT | gui/controller/SimulationPlotCtrl.py | 1 | 14321 | import wx
import csv
from gui.views.SimulationsPlotView import SimulationsPlotView
from utilities import geometry
import matplotlib
from sprint import *
from utilities.gui import get_todays_date
class SimulationsPlotCtrl(SimulationsPlotView):
def __init__(self, parent, columns=None):
SimulationsPlotView.__init__(self, parent)
if columns:
self.table.set_columns(columns)
self.data = {} # Dictionary to hold the data respective to the row ID
self.geometries = {} # Holds the geometries respective to the row ID
self.start_date_object = wx.DateTime_Now() - 1 * wx.DateSpan_Day() # Default date is yesterday
self.end_date_object = wx.DateTime_Now() # Default date is today
self._row_start_date = None
self._row_end_date = None
self.start_date_picker.SetValue(self.start_date_object)
self.end_date_picker.SetValue(self.end_date_object)
# Adding room for the x axis labels to be visible
self.temporal_plot.add_padding_to_plot(bottom=0.15)
self.spatial_plot.add_padding_to_plot(bottom=0.15)
# Tool tips
self.export_button.SetToolTip(wx.ToolTip("Export data of highlighted region"))
# Pop up menu
self.popup_menu = wx.Menu()
export_menu = self.popup_menu.Append(1, "Export All")
# Bindings
self.refresh_button.Bind(wx.EVT_BUTTON, self.on_refresh)
self.export_button.Bind(wx.EVT_BUTTON, self.on_export)
self.table.Bind(wx.EVT_LIST_ITEM_SELECTED, self.on_row_selected)
self.start_date_picker.Bind(wx.EVT_DATE_CHANGED, self.on_start_date_change)
self.end_date_picker.Bind(wx.EVT_DATE_CHANGED, self.on_end_date_change)
self.spatial_plot.canvas.mpl_connect('pick_event', self.on_pick_spatial)
self.table.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.on_table_right_click)
self.Bind(wx.EVT_MENU, self.on_export_menu, export_menu)
def on_pick_spatial(self, event):
if isinstance(event.artist, matplotlib.collections.PathCollection):
self.spatial_plot.highlight_vertex(event)
elif isinstance(event.artist, matplotlib.collections.PolyCollection):
self.spatial_plot.highlight_polygon(event)
elif isinstance(event.artist, matplotlib.collections.LineCollection):
self.spatial_plot.highlight_line(event)
else:
print "More to come"
self.plot_highlighted_timeseries()
def get_data_start_end_date(self):
"""
Returns the start and end date of the highlighted regions.
If nothing is highlighted return the start and end date of the selected row
Return -1 if no row is selected
:return:
"""
ID = self.get_selected_id()
if ID == -1:
return -1, -1 # No selected row return -1
row_data = self.get_highlighted_timeseries_data()
if not len(row_data):
row = self.table.get_selected_row() # Nothing is highlighted
return row[3], row[4]
for data in row_data:
date_object, value = data
d = []
for i in range(len(date_object)):
d.append((date_object[i], value[i]))
start_index, end_index = self.parse_data_to_range(d)
return d[start_index][0], d[end_index][0]
def get_geometries(self, ID):
"""
Converts the geometry string to objects
:param ID: Int
:return: a list of geometry objects
"""
geometries = []
for item in self.geometries[ID]:
geometries.append(geometry.fromWKT(item)[0])
return geometries
def get_highlighted_geometry(self):
"""
Return index and object or coordinate of the highlighted spatial
:return: type(dict)
"""
if len(self.spatial_plot.get_highlighted_polygons()):
return self.spatial_plot.get_highlighted_polygons()
if len(self.spatial_plot.get_highlighted_vertices()):
return self.spatial_plot.get_highlighted_vertices()
if len(self.spatial_plot.get_highlighted_lines()):
return self.spatial_plot.get_highlighted_lines()
return {}
def get_highlighted_timeseries_data(self):
"""
Gets the time series data for the highlighted geometries
Returns empty list if nothing is highlighted
:return: type(list)
"""
ID = self.get_selected_id()
if ID == -1:
return [] # No selected row
row_data = self.data[ID]
time_series_data = []
geometry = self.get_highlighted_geometry()
for key, value in geometry.iteritems():
time_series_data.append(row_data[key])
return time_series_data
def get_selected_id(self):
"""
:return: the ID type(Int) of the selected row or -1 if no row is selected
"""
row = self.table.get_selected_row()
if row:
return int(row[0])
return -1
def parse_data_to_range(self, data):
"""
Gets two indexs where the data can be sliced to get the data within
the dates in the picker
:param data: type(list[(date1, value1), (date2, value2), ..., (dateN, valueN)])
:return:
"""
start_index = 0
end_index = -1
date = wx.DateTime()
for i in range(len(data)):
date.ParseFormat(str(data[i][0]), "%Y-%m-%d %H:%M:%S")
if self.start_date_object >= date:
start_index = i
else:
break
for i in range(len(data)-1, 0, -1):
date.ParseFormat(str(data[i][0]), "%Y-%m-%d %H:%M:%S")
if self.end_date_object <= date:
end_index = i
else:
break
return start_index, end_index
def plot_highlighted_timeseries(self):
"""
Plots the time series for the highlighted geometries
:return:
"""
if self.get_selected_id() == -1:
return
time_series_data = self.get_highlighted_timeseries_data()
self.temporal_plot.clear_plot()
self.temporal_plot.rotate_x_axis_label()
units = self.table.get_selected_row()[2]
name = self.get_geometries(self.get_selected_id())[0]
name = name.GetGeometryName()
for data in time_series_data:
date_object, value = data
d = []
for i in range(len(date_object)):
d.append((date_object[i], value[i]))
start_index, end_index = self.parse_data_to_range(d)
self.temporal_plot.plot_dates(d[start_index : end_index], name, None, units)
def plot_spatial(self, ID, title):
"""
Plots the spatial of the selected row
:param ID: type(Int). Must match a row the selected row's ID
:return:
"""
self.spatial_plot.clear_plot()
geometries = self.get_geometries(ID)
self.spatial_plot.rotate_x_axis_label()
self.spatial_plot.plot_geometry(geometries, title)
self.spatial_plot.set_legend([title])
self.spatial_plot.redraw()
##########################
# EVENTS
##########################
def on_end_date_change(self, event):
"""
Prevents the end date from being set to before the start date and
prevent the end date from being set to a day after today
:param event:
:return:
"""
if not self._row_end_date:
return # End date has not been set. Select a row to set date
if self.end_date_picker.GetValue() > self._row_end_date:
self.end_date_picker.SetValue(self._row_end_date)
elif self.start_date_picker.GetValue() > self.end_date_picker.GetValue(): # Prevent start date to overlap end
self.end_date_picker.SetValue(self.end_date_object)
elif self.end_date_picker.GetValue() > wx.DateTime_Now():
self.end_date_picker.SetValue(self.end_date_object) # Prevent end date to be set to after today
else:
self.end_date_object = self.end_date_picker.GetValue()
def on_export(self, event=None, export_all=False):
"""
Exports all the data pertaining to the selected row
Exports only one row, the top most selected row
File format: CSV
:param event:
:return:
"""
ID = self.get_selected_id()
if ID == -1:
sPrint("Select a row before exporting", messageType=MessageType.INFO)
return # No selected row
if not len(self.get_highlighted_geometry()) and not export_all:
sPrint("Highlight a region to export", messageType=MessageType.INFO)
return # Nothing is highlighted
file_browser = wx.FileDialog(parent=self, message="Choose Path",
wildcard="CSV Files (*.csv)|*.csv", style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if file_browser.ShowModal() == wx.ID_OK:
path = file_browser.GetPath()
header_line_break = '#' + 100 * '-'
disclaimer = "# NOTICE: this data set was exported by the EMIT model coupling framework. " \
"Use at your own risk"
file_handler = open(path, "w")
writer = csv.writer(file_handler, delimiter=',')
row = self.table.get_selected_row()
row_data = self.data[ID]
start_date, end_date = self.get_data_start_end_date()
writer.writerow([header_line_break])
writer.writerow([disclaimer])
writer.writerow(["#"])
writer.writerow(["# Date created: %s" % str("Day simulation was created")])
writer.writerow(["# Date exported: %s" % get_todays_date()])
writer.writerow(["# ID: %s" % ID])
writer.writerow(["# Variable: %s" % row[1]])
writer.writerow(["# Units: %s" % row[2]])
writer.writerow(["# Begin date: %s" % str(start_date)])
writer.writerow(["# End date: %s" % str(end_date)])
writer.writerow(["# Description: %s" % row[5]])
writer.writerow(["# Organization: %s" % row[6]])
writer.writerow(["#"])
writer.writerow([header_line_break])
if not export_all:
row_data = self.get_highlighted_timeseries_data()
columns = ["Dates", "Values", " "] * len(row_data) # Dynamically add columns with spacer
writer.writerow(columns)
rows = []
dates, values = zip(*row_data) # Unzip row_data, separate into dates and values
for i in range(len(dates)):
data = zip(*[dates[i], values[i]]) # Unzip the dates and values
if not export_all:
start_index, end_index = self.parse_data_to_range(data)
data = data[start_index: end_index]
for j in range(len(data)):
rows.append(data[j]) # A list of date and value. example [(date, value)]
# Keeps track of where to divide the rows list in order to write multiple columns and rows
count = len(rows) / len(row_data)
for i in range(count):
row = []
for j in range(len(row_data)):
data = rows[i + (count * j)]
row.append(data)
# Convert the list(row) into separate values to write to different columns
write_row = []
for item in row:
write_row.append(item[0])
write_row.append(item[1])
write_row.append("") # Add spacer column
writer.writerow(write_row)
file_handler.close()
def on_export_menu(self, event):
"""
Handles what happens when pop up export is clicked
:param event:
:return:
"""
self.on_export(export_all=True)
def on_row_selected(self, event):
"""
Set the date pickers to match the start and end date of the row selected dates
The date variable needs to be reset in order to get the start dates to stick
:param event:
:return:
"""
self.spatial_plot.reset_highlighter()
self.temporal_plot.clear_plot()
date = wx.DateTime()
start_date_string = self.table.get_selected_row()[3]
if date.ParseFormat(start_date_string, "%Y-%m-%d") == -1:
raise Exception("start_date_string is not in the right format")
self._row_start_date = date
self.start_date_picker.SetValue(date)
self.start_date_object = date
date = wx.DateTime() # Need to reset the date
end_date_string = self.table.get_selected_row()[4]
if str(end_date_string) == "None":
self.end_date_picker.SetValue(wx.DateTime_Now())
elif date.ParseFormat(end_date_string, "%Y-%m-%d") == -1:
raise Exception("end_date_string is not in the right format")
else:
self.end_date_picker.SetValue(date)
self.end_date_object = date
self._row_end_date = self.end_date_picker.GetValue()
# Plot Spatial
self.plot_spatial(self.get_selected_id(), self.table.get_selected_row()[1])
def on_refresh(self, event):
self.plot_highlighted_timeseries()
def on_start_date_change(self, event):
"""
Prevents the start date from being set to after the end date and
prevent start date from being set to before the row's start date
:param event:
:return:
"""
if not self._row_start_date:
return # Start date has not been set
if self.start_date_picker.GetValue() < self._row_start_date:
self.start_date_picker.SetValue(self._row_start_date)
elif self.start_date_picker.GetValue() > self.end_date_picker.GetValue():
self.start_date_picker.SetValue(self.start_date_object)
else:
self.start_date_object = self.start_date_picker.GetValue()
def on_table_right_click(self, event):
self.PopupMenu(self.popup_menu)
| gpl-2.0 |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/font_manager.py | 7 | 47824 | """
A module for finding, managing, and using fonts across platforms.
This module provides a single :class:`FontManager` instance that can
be shared across backends and platforms. The :func:`findfont`
function returns the best TrueType (TTF) font file in the local or
system font path that matches the specified :class:`FontProperties`
instance. The :class:`FontManager` also handles Adobe Font Metrics
(AFM) font files for use by the PostScript backend.
The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.
Future versions may implement the Level 2 or 2.1 specifications.
Experimental support is included for using `fontconfig` on Unix
variant platforms (Linux, OS X, Solaris). To enable it, set the
constant ``USE_FONTCONFIG`` in this file to ``True``. Fontconfig has
the advantage that it is the standard way to look up fonts on X11
platforms, so if a font is installed, it is much more likely to be
found.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import cPickle as pickle
"""
KNOWN ISSUES
- documentation
- font variant is untested
- font stretch is incomplete
- font size is incomplete
- font size_adjust is incomplete
- default font algorithm needs improvement and testing
- setWeights function needs improvement
- 'light' is an invalid weight value, remove it.
- update_fonts not implemented
Authors : John Hunter <[email protected]>
Paul Barrett <[email protected]>
Michael Droettboom <[email protected]>
Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005)
License : matplotlib license (PSF compatible)
The font directory code is from ttfquery,
see license/LICENSE_TTFQUERY.
"""
import os, sys, warnings
try:
set
except NameError:
from sets import Set as set
from collections import Iterable
import matplotlib
from matplotlib import afm
from matplotlib import ft2font
from matplotlib import rcParams, get_cachedir
from matplotlib.cbook import is_string_like
import matplotlib.cbook as cbook
from matplotlib.compat import subprocess
from matplotlib.fontconfig_pattern import \
parse_fontconfig_pattern, generate_fontconfig_pattern
USE_FONTCONFIG = False
verbose = matplotlib.verbose
font_scalings = {
'xx-small' : 0.579,
'x-small' : 0.694,
'small' : 0.833,
'medium' : 1.0,
'large' : 1.200,
'x-large' : 1.440,
'xx-large' : 1.728,
'larger' : 1.2,
'smaller' : 0.833,
None : 1.0}
stretch_dict = {
'ultra-condensed' : 100,
'extra-condensed' : 200,
'condensed' : 300,
'semi-condensed' : 400,
'normal' : 500,
'semi-expanded' : 600,
'expanded' : 700,
'extra-expanded' : 800,
'ultra-expanded' : 900}
weight_dict = {
'ultralight' : 100,
'light' : 200,
'normal' : 400,
'regular' : 400,
'book' : 400,
'medium' : 500,
'roman' : 500,
'semibold' : 600,
'demibold' : 600,
'demi' : 600,
'bold' : 700,
'heavy' : 800,
'extra bold' : 800,
'black' : 900}
font_family_aliases = set([
'serif',
'sans-serif',
'sans serif',
'cursive',
'fantasy',
'monospace',
'sans'])
# OS Font paths
MSFolders = \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
MSFontDirectories = [
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF/",
"/usr/X11/lib/X11/fonts",
# here is the new standard location for fonts
"/usr/share/fonts/",
# documented as a good place to install new fonts
"/usr/local/share/fonts/",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype/",
]
OSXFontDirectories = [
"/Library/Fonts/",
"/Network/Library/Fonts/",
"/System/Library/Fonts/",
# fonts installed via MacPorts
"/opt/local/share/fonts"
""
]
if not USE_FONTCONFIG and sys.platform != 'win32':
home = os.environ.get('HOME')
if home is not None:
# user fonts on OSX
path = os.path.join(home, 'Library', 'Fonts')
OSXFontDirectories.append(path)
path = os.path.join(home, '.fonts')
X11FontDirectories.append(path)
def get_fontext_synonyms(fontext):
"""
Return a list of file extensions extensions that are synonyms for
the given file extension *fileext*.
"""
return {'ttf': ('ttf', 'otf'),
'otf': ('ttf', 'otf'),
'afm': ('afm',)}[fontext]
def list_fonts(directory, extensions):
"""
Return a list of all fonts matching any of the extensions,
possibly upper-cased, found recursively under the directory.
"""
pattern = ';'.join(['*.%s;*.%s' % (ext, ext.upper())
for ext in extensions])
return cbook.listFiles(directory, pattern)
def win32FontDirectory():
"""
Return the user-specified font directory for Win32. This is
looked up from the registry key::
\\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts
If the key is not found, $WINDIR/Fonts will be returned.
"""
try:
from matplotlib.externals.six.moves import winreg
except ImportError:
pass # Fall through to default
else:
try:
user = winreg.OpenKey(winreg.HKEY_CURRENT_USER, MSFolders)
try:
try:
return winreg.QueryValueEx(user, 'Fonts')[0]
except OSError:
pass # Fall through to default
finally:
winreg.CloseKey(user)
except OSError:
pass # Fall through to default
return os.path.join(os.environ['WINDIR'], 'Fonts')
def win32InstalledFonts(directory=None, fontext='ttf'):
"""
Search for fonts in the specified font directory, or use the
system directories if none given. A list of TrueType font
filenames are returned by default, or AFM fonts if *fontext* ==
'afm'.
"""
from matplotlib.externals.six.moves import winreg
if directory is None:
directory = win32FontDirectory()
fontext = get_fontext_synonyms(fontext)
key, items = None, {}
for fontdir in MSFontDirectories:
try:
local = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, fontdir)
except OSError:
continue
if not local:
return list_fonts(directory, fontext)
try:
for j in range(winreg.QueryInfoKey(local)[1]):
try:
key, direc, any = winreg.EnumValue( local, j)
if not is_string_like(direc):
continue
if not os.path.dirname(direc):
direc = os.path.join(directory, direc)
direc = os.path.abspath(direc).lower()
if os.path.splitext(direc)[1][1:] in fontext:
items[direc] = 1
except EnvironmentError:
continue
except WindowsError:
continue
except MemoryError:
continue
return list(six.iterkeys(items))
finally:
winreg.CloseKey(local)
return None
def OSXInstalledFonts(directories=None, fontext='ttf'):
"""
Get list of font files on OS X - ignores font suffix by default.
"""
if directories is None:
directories = OSXFontDirectories
fontext = get_fontext_synonyms(fontext)
files = []
for path in directories:
if fontext is None:
files.extend(cbook.listFiles(path, '*'))
else:
files.extend(list_fonts(path, fontext))
return files
def get_fontconfig_fonts(fontext='ttf'):
"""
Grab a list of all the fonts that are being tracked by fontconfig
by making a system call to ``fc-list``. This is an easy way to
grab all of the fonts the user wants to be made available to
applications, without needing knowing where all of them reside.
"""
fontext = get_fontext_synonyms(fontext)
fontfiles = {}
try:
warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
pipe = subprocess.Popen(['fc-list', '--format=%{file}\\n'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = pipe.communicate()[0]
except (OSError, IOError):
# Calling fc-list did not work, so we'll just return nothing
return fontfiles
if pipe.returncode == 0:
# The line breaks between results are in ascii, but each entry
# is in in sys.filesystemencoding().
for fname in output.split(b'\n'):
try:
fname = six.text_type(fname, sys.getfilesystemencoding())
except UnicodeDecodeError:
continue
if (os.path.splitext(fname)[1][1:] in fontext and
os.path.exists(fname)):
fontfiles[fname] = 1
return fontfiles
def findSystemFonts(fontpaths=None, fontext='ttf'):
"""
Search for fonts in the specified font paths. If no paths are
given, will use a standard set of system paths, as well as the
list of fonts tracked by fontconfig if fontconfig is installed and
available. A list of TrueType fonts are returned by default with
AFM fonts as an option.
"""
fontfiles = {}
fontexts = get_fontext_synonyms(fontext)
if fontpaths is None:
if sys.platform == 'win32':
fontdir = win32FontDirectory()
fontpaths = [fontdir]
# now get all installed fonts directly...
for f in win32InstalledFonts(fontdir):
base, ext = os.path.splitext(f)
if len(ext)>1 and ext[1:].lower() in fontexts:
fontfiles[f] = 1
else:
fontpaths = X11FontDirectories
# check for OS X & load its fonts if present
if sys.platform == 'darwin':
for f in OSXInstalledFonts(fontext=fontext):
fontfiles[f] = 1
for f in get_fontconfig_fonts(fontext):
fontfiles[f] = 1
elif isinstance(fontpaths, six.string_types):
fontpaths = [fontpaths]
for path in fontpaths:
files = list_fonts(path, fontexts)
for fname in files:
fontfiles[os.path.abspath(fname)] = 1
return [fname for fname in six.iterkeys(fontfiles) if os.path.exists(fname)]
def weight_as_number(weight):
"""
Return the weight property as a numeric value. String values
are converted to their corresponding numeric value.
"""
if isinstance(weight, six.string_types):
try:
weight = weight_dict[weight.lower()]
except KeyError:
weight = 400
elif weight in range(100, 1000, 100):
pass
else:
raise ValueError('weight not a valid integer')
return weight
class FontEntry(object):
"""
A class for storing Font properties. It is used when populating
the font lookup dictionary.
"""
def __init__(self,
fname ='',
name ='',
style ='normal',
variant='normal',
weight ='normal',
stretch='normal',
size ='medium',
):
self.fname = fname
self.name = name
self.style = style
self.variant = variant
self.weight = weight
self.stretch = stretch
try:
self.size = str(float(size))
except ValueError:
self.size = size
def __repr__(self):
return "<Font '%s' (%s) %s %s %s %s>" % (
self.name, os.path.basename(self.fname), self.style, self.variant,
self.weight, self.stretch)
def ttfFontProperty(font):
"""
A function for populating the :class:`FontKey` by extracting
information from the TrueType font file.
*font* is a :class:`FT2Font` instance.
"""
name = font.family_name
# Styles are: italic, oblique, and normal (default)
sfnt = font.get_sfnt()
sfnt2 = sfnt.get((1,0,0,2))
sfnt4 = sfnt.get((1,0,0,4))
if sfnt2:
sfnt2 = sfnt2.decode('macroman').lower()
else:
sfnt2 = ''
if sfnt4:
sfnt4 = sfnt4.decode('macroman').lower()
else:
sfnt4 = ''
if sfnt4.find('oblique') >= 0:
style = 'oblique'
elif sfnt4.find('italic') >= 0:
style = 'italic'
elif sfnt2.find('regular') >= 0:
style = 'normal'
elif font.style_flags & ft2font.ITALIC:
style = 'italic'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = None
for w in six.iterkeys(weight_dict):
if sfnt4.find(w) >= 0:
weight = w
break
if not weight:
if font.style_flags & ft2font.BOLD:
weight = 700
else:
weight = 400
weight = weight_as_number(weight)
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
if sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or \
sfnt4.find('cond') >= 0:
stretch = 'condensed'
elif sfnt4.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g., 12pt
# Percentage values are in 'em's. Most robust specification.
# !!!! Incomplete
if font.scalable:
size = 'scalable'
else:
size = str(float(font.get_fontsize()))
# !!!! Incomplete
size_adjust = None
return FontEntry(font.fname, name, style, variant, weight, stretch, size)
def afmFontProperty(fontpath, font):
"""
A function for populating a :class:`FontKey` instance by
extracting information from the AFM font file.
*font* is a class:`AFM` instance.
"""
name = font.get_familyname()
fontname = font.get_fontname().lower()
# Styles are: italic, oblique, and normal (default)
if font.get_angle() != 0 or name.lower().find('italic') >= 0:
style = 'italic'
elif name.lower().find('oblique') >= 0:
style = 'oblique'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = weight_as_number(font.get_weight().lower())
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
if fontname.find('narrow') >= 0 or fontname.find('condensed') >= 0 or \
fontname.find('cond') >= 0:
stretch = 'condensed'
elif fontname.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif fontname.find('wide') >= 0 or fontname.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g., 12pt
# Percentage values are in 'em's. Most robust specification.
# All AFM fonts are apparently scalable.
size = 'scalable'
# !!!! Incomplete
size_adjust = None
return FontEntry(fontpath, name, style, variant, weight, stretch, size)
def createFontList(fontfiles, fontext='ttf'):
"""
A function to create a font lookup list. The default is to create
a list of TrueType fonts. An AFM font list can optionally be
created.
"""
fontlist = []
# Add fonts from list of known font files.
seen = {}
for fpath in fontfiles:
verbose.report('createFontDict: %s' % (fpath), 'debug')
fname = os.path.split(fpath)[1]
if fname in seen: continue
else: seen[fname] = 1
if fontext == 'afm':
try:
fh = open(fpath, 'rb')
except:
verbose.report("Could not open font file %s" % fpath)
continue
try:
try:
font = afm.AFM(fh)
finally:
fh.close()
except RuntimeError:
verbose.report("Could not parse font file %s"%fpath)
continue
try:
prop = afmFontProperty(fpath, font)
except KeyError:
continue
else:
try:
font = ft2font.FT2Font(fpath)
except RuntimeError:
verbose.report("Could not open font file %s"%fpath)
continue
except UnicodeError:
verbose.report("Cannot handle unicode filenames")
#print >> sys.stderr, 'Bad file is', fpath
continue
try:
prop = ttfFontProperty(font)
except (KeyError, RuntimeError):
continue
fontlist.append(prop)
return fontlist
class FontProperties(object):
"""
A class for storing and manipulating font properties.
The font properties are those described in the `W3C Cascading
Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font
specification. The six properties are:
- family: A list of font names in decreasing order of priority.
The items may include a generic font family name, either
'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'.
In that case, the actual font to be used will be looked up
from the associated rcParam in :file:`matplotlibrc`.
- style: Either 'normal', 'italic' or 'oblique'.
- variant: Either 'normal' or 'small-caps'.
- stretch: A numeric value in the range 0-1000 or one of
'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'normal', 'semi-expanded', 'expanded',
'extra-expanded' or 'ultra-expanded'
- weight: A numeric value in the range 0-1000 or one of
'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'
- size: Either an relative value of 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large' or an
absolute font size, e.g., 12
The default font property for TrueType fonts (as specified in the
default :file:`matplotlibrc` file) is::
sans-serif, normal, normal, normal, normal, scalable.
Alternatively, a font may be specified using an absolute path to a
.ttf file, by using the *fname* kwarg.
The preferred usage of font sizes is to use the relative values,
e.g., 'large', instead of absolute font sizes, e.g., 12. This
approach allows all text sizes to be made larger or smaller based
on the font manager's default font size.
This class will also accept a `fontconfig
<http://www.fontconfig.org/>`_ pattern, if it is the only argument
provided. See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_. This support
does not require fontconfig to be installed. We are merely
borrowing its pattern syntax for use here.
Note that matplotlib's internal font manager and fontconfig use a
different algorithm to lookup fonts, so the results of the same pattern
may be different in matplotlib than in other applications that use
fontconfig.
"""
def __init__(self,
family = None,
style = None,
variant= None,
weight = None,
stretch= None,
size = None,
fname = None, # if this is set, it's a hardcoded filename to use
_init = None # used only by copy()
):
self._family = None
self._slant = None
self._variant = None
self._weight = None
self._stretch = None
self._size = None
self._file = None
# This is used only by copy()
if _init is not None:
self.__dict__.update(_init.__dict__)
return
if is_string_like(family):
# Treat family as a fontconfig pattern if it is the only
# parameter provided.
if (style is None and
variant is None and
weight is None and
stretch is None and
size is None and
fname is None):
self.set_fontconfig_pattern(family)
return
self.set_family(family)
self.set_style(style)
self.set_variant(variant)
self.set_weight(weight)
self.set_stretch(stretch)
self.set_file(fname)
self.set_size(size)
def _parse_fontconfig_pattern(self, pattern):
return parse_fontconfig_pattern(pattern)
def __hash__(self):
l = (tuple(self.get_family()),
self.get_slant(),
self.get_variant(),
self.get_weight(),
self.get_stretch(),
self.get_size_in_points(),
self.get_file())
return hash(l)
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return hash(self) != hash(other)
def __str__(self):
return self.get_fontconfig_pattern()
def get_family(self):
"""
Return a list of font names that comprise the font family.
"""
if self._family is None:
family = rcParams['font.family']
if is_string_like(family):
return [family]
return family
return self._family
def get_name(self):
"""
Return the name of the font that best matches the font
properties.
"""
return ft2font.FT2Font(findfont(self)).family_name
def get_style(self):
"""
Return the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if self._slant is None:
return rcParams['font.style']
return self._slant
get_slant = get_style
def get_variant(self):
"""
Return the font variant. Values are: 'normal' or
'small-caps'.
"""
if self._variant is None:
return rcParams['font.variant']
return self._variant
def get_weight(self):
"""
Set the font weight. Options are: A numeric value in the
range 0-1000 or one of 'light', 'normal', 'regular', 'book',
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
'heavy', 'extra bold', 'black'
"""
if self._weight is None:
return rcParams['font.weight']
return self._weight
def get_stretch(self):
"""
Return the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
"""
if self._stretch is None:
return rcParams['font.stretch']
return self._stretch
def get_size(self):
"""
Return the font size.
"""
if self._size is None:
return rcParams['font.size']
return self._size
def get_size_in_points(self):
if self._size is not None:
try:
return float(self._size)
except ValueError:
pass
default_size = FontManager.get_default_size()
return default_size * font_scalings.get(self._size)
def get_file(self):
"""
Return the filename of the associated font.
"""
return self._file
def get_fontconfig_pattern(self):
"""
Get a fontconfig pattern suitable for looking up the font as
specified with fontconfig's ``fc-match`` utility.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
return generate_fontconfig_pattern(self)
def set_family(self, family):
"""
Change the font family. May be either an alias (generic name
is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace', a real font name or a list of real
font names. Real font names are not supported when
`text.usetex` is `True`.
"""
if family is None:
family = rcParams['font.family']
if is_string_like(family):
family = [six.text_type(family)]
elif (not is_string_like(family) and isinstance(family, Iterable)):
family = [six.text_type(f) for f in family]
self._family = family
set_name = set_family
def set_style(self, style):
"""
Set the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if style is None:
style = rcParams['font.style']
if style not in ('normal', 'italic', 'oblique', None):
raise ValueError("style must be normal, italic or oblique")
self._slant = style
set_slant = set_style
def set_variant(self, variant):
"""
Set the font variant. Values are: 'normal' or 'small-caps'.
"""
if variant is None:
variant = rcParams['font.variant']
if variant not in ('normal', 'small-caps', None):
raise ValueError("variant must be normal or small-caps")
self._variant = variant
def set_weight(self, weight):
"""
Set the font weight. May be either a numeric value in the
range 0-1000 or one of 'ultralight', 'light', 'normal',
'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'
"""
if weight is None:
weight = rcParams['font.weight']
try:
weight = int(weight)
if weight < 0 or weight > 1000:
raise ValueError()
except ValueError:
if weight not in weight_dict:
raise ValueError("weight is invalid")
weight = weight_dict[weight]
self._weight = weight
def set_stretch(self, stretch):
"""
Set the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded' or
'ultra-expanded', or a numeric value in the range 0-1000.
"""
if stretch is None:
stretch = rcParams['font.stretch']
try:
stretch = int(stretch)
if stretch < 0 or stretch > 1000:
raise ValueError()
except ValueError:
if stretch not in stretch_dict:
raise ValueError("stretch is invalid")
self._stretch = stretch
def set_size(self, size):
"""
Set the font size. Either an relative value of 'xx-small',
'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
or an absolute font size, e.g., 12.
"""
if size is None:
size = rcParams['font.size']
try:
size = float(size)
except ValueError:
if size is not None and size not in font_scalings:
raise ValueError(
"Size is invalid. Valid font size are " + ", ".join(
str(i) for i in font_scalings.keys()))
self._size = size
def set_file(self, file):
"""
Set the filename of the fontfile to use. In this case, all
other properties will be ignored.
"""
self._file = file
def set_fontconfig_pattern(self, pattern):
"""
Set the properties by parsing a fontconfig *pattern*.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
for key, val in six.iteritems(self._parse_fontconfig_pattern(pattern)):
if type(val) == list:
getattr(self, "set_" + key)(val[0])
else:
getattr(self, "set_" + key)(val)
def copy(self):
"""Return a deep copy of self"""
return FontProperties(_init = self)
def ttfdict_to_fnames(d):
"""
flatten a ttfdict to all the filenames it contains
"""
fnames = []
for named in six.itervalues(d):
for styled in six.itervalues(named):
for variantd in six.itervalues(styled):
for weightd in six.itervalues(variantd):
for stretchd in six.itervalues(weightd):
for fname in six.itervalues(stretchd):
fnames.append(fname)
return fnames
def pickle_dump(data, filename):
"""
Equivalent to pickle.dump(data, open(filename, 'w'))
but closes the file to prevent filehandle leakage.
"""
with open(filename, 'wb') as fh:
pickle.dump(data, fh)
def pickle_load(filename):
"""
Equivalent to pickle.load(open(filename, 'r'))
but closes the file to prevent filehandle leakage.
"""
with open(filename, 'rb') as fh:
data = pickle.load(fh)
return data
class TempCache(object):
"""
A class to store temporary caches that are (a) not saved to disk
and (b) invalidated whenever certain font-related
rcParams---namely the family lookup lists---are changed or the
font cache is reloaded. This avoids the expensive linear search
through all fonts every time a font is looked up.
"""
# A list of rcparam names that, when changed, invalidated this
# cache.
invalidating_rcparams = (
'font.serif', 'font.sans-serif', 'font.cursive', 'font.fantasy',
'font.monospace')
def __init__(self):
self._lookup_cache = {}
self._last_rcParams = self.make_rcparams_key()
def make_rcparams_key(self):
return [id(fontManager)] + [
rcParams[param] for param in self.invalidating_rcparams]
def get(self, prop):
key = self.make_rcparams_key()
if key != self._last_rcParams:
self._lookup_cache = {}
self._last_rcParams = key
return self._lookup_cache.get(prop)
def set(self, prop, value):
key = self.make_rcparams_key()
if key != self._last_rcParams:
self._lookup_cache = {}
self._last_rcParams = key
self._lookup_cache[prop] = value
class FontManager(object):
"""
On import, the :class:`FontManager` singleton instance creates a
list of TrueType fonts based on the font properties: name, style,
variant, weight, stretch, and size. The :meth:`findfont` method
does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, a
default font is returned.
"""
# Increment this version number whenever the font cache data
# format or behavior has changed and requires a existing font
# cache files to be rebuilt.
__version__ = 101
def __init__(self, size=None, weight='normal'):
self._version = self.__version__
self.__default_weight = weight
self.default_size = size
paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'),
os.path.join(rcParams['datapath'], 'fonts', 'afm'),
os.path.join(rcParams['datapath'], 'fonts', 'pdfcorefonts')]
# Create list of font paths
for pathname in ['TTFPATH', 'AFMPATH']:
if pathname in os.environ:
ttfpath = os.environ[pathname]
if ttfpath.find(';') >= 0: #win32 style
paths.extend(ttfpath.split(';'))
elif ttfpath.find(':') >= 0: # unix style
paths.extend(ttfpath.split(':'))
else:
paths.append(ttfpath)
verbose.report('font search path %s'%(str(paths)))
# Load TrueType fonts and create font dictionary.
self.ttffiles = findSystemFonts(paths) + findSystemFonts()
self.defaultFamily = {
'ttf': 'Bitstream Vera Sans',
'afm': 'Helvetica'}
self.defaultFont = {}
for fname in self.ttffiles:
verbose.report('trying fontname %s' % fname, 'debug')
if fname.lower().find('vera.ttf')>=0:
self.defaultFont['ttf'] = fname
break
else:
# use anything
self.defaultFont['ttf'] = self.ttffiles[0]
self.ttflist = createFontList(self.ttffiles)
self.afmfiles = findSystemFonts(paths, fontext='afm') + \
findSystemFonts(fontext='afm')
self.afmlist = createFontList(self.afmfiles, fontext='afm')
if len(self.afmfiles):
self.defaultFont['afm'] = self.afmfiles[0]
else:
self.defaultFont['afm'] = None
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
@staticmethod
def get_default_size():
"""
Return the default font size.
"""
return rcParams['font.size']
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
def update_fonts(self, filenames):
"""
Update the font dictionary with new font files.
Currently not implemented.
"""
# !!!! Needs implementing
raise NotImplementedError
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Returns a match score between the list of font families in
*families* and the font family name *family2*.
An exact match at the head of the list returns 0.0.
A match further down the list will return between 0 and 1.
No match will return 1.0.
"""
if not isinstance(families, (list, tuple)):
families = [families]
elif len(families) == 0:
return 1.0
family2 = family2.lower()
step = 1 / len(families)
for i, family1 in enumerate(families):
family1 = family1.lower()
if family1 in font_family_aliases:
if family1 in ('sans', 'sans serif'):
family1 = 'sans-serif'
options = rcParams['font.' + family1]
options = [x.lower() for x in options]
if family2 in options:
idx = options.index(family2)
return (i + (idx / len(options))) * step
elif family1 == family2:
# The score should be weighted by where in the
# list the font was found.
return i * step
return 1.0
def score_style(self, style1, style2):
"""
Returns a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif style1 in ('italic', 'oblique') and \
style2 in ('italic', 'oblique'):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Returns a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Returns a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Returns a match score between *weight1* and *weight2*.
The result is the absolute value of the difference between the
CSS numeric values of *weight1* and *weight2*, normalized
between 0.0 and 1.0.
"""
try:
weightval1 = int(weight1)
except ValueError:
weightval1 = weight_dict.get(weight1, 500)
try:
weightval2 = int(weight2)
except ValueError:
weightval2 = weight_dict.get(weight2, 500)
return abs(weightval1 - weightval2) / 1000.0
def score_size(self, size1, size2):
"""
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings(size1)
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0
def findfont(self, prop, fontext='ttf', directory=None,
fallback_to_default=True, rebuild_if_missing=True):
"""
Search the font list for the font that most closely matches
the :class:`FontProperties` *prop*.
:meth:`findfont` performs a nearest neighbor search. Each
font is given a similarity score to the target font
properties. The first font with the highest score is
returned. If no matches below a certain threshold are found,
the default font (usually Vera Sans) is returned.
`directory`, is specified, will only return fonts from the
given directory (or subdirectory of that directory).
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
If `fallback_to_default` is True, will fallback to the default
font family (usually "Bitstream Vera Sans" or "Helvetica") if
the first lookup hard-fails.
See the `W3C Cascading Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
for a description of the font finding algorithm.
"""
if not isinstance(prop, FontProperties):
prop = FontProperties(prop)
fname = prop.get_file()
if fname is not None:
verbose.report('findfont returning %s'%fname, 'debug')
return fname
if fontext == 'afm':
fontlist = self.afmlist
else:
fontlist = self.ttflist
if directory is None:
cached = _lookup_cache[fontext].get(prop)
if cached is not None:
return cached
best_score = 1e64
best_font = None
for font in fontlist:
if (directory is not None and
os.path.commonprefix([font.fname, directory]) != directory):
continue
# Matching family should have highest priority, so it is multiplied
# by 10.0
score = \
self.score_family(prop.get_family(), font.name) * 10.0 + \
self.score_style(prop.get_style(), font.style) + \
self.score_variant(prop.get_variant(), font.variant) + \
self.score_weight(prop.get_weight(), font.weight) + \
self.score_stretch(prop.get_stretch(), font.stretch) + \
self.score_size(prop.get_size(), font.size)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is None or best_score >= 10.0:
if fallback_to_default:
warnings.warn(
'findfont: Font family %s not found. Falling back to %s' %
(prop.get_family(), self.defaultFamily[fontext]))
default_prop = prop.copy()
default_prop.set_family(self.defaultFamily[fontext])
return self.findfont(default_prop, fontext, directory, False)
else:
# This is a hard fail -- we can't find anything reasonable,
# so just return the vera.ttf
warnings.warn(
'findfont: Could not match %s. Returning %s' %
(prop, self.defaultFont[fontext]),
UserWarning)
result = self.defaultFont[fontext]
else:
verbose.report(
'findfont: Matching %s to %s (%s) with score of %f' %
(prop, best_font.name, repr(best_font.fname), best_score))
result = best_font.fname
if not os.path.isfile(result):
if rebuild_if_missing:
verbose.report(
'findfont: Found a missing font file. Rebuilding cache.')
_rebuild()
return fontManager.findfont(
prop, fontext, directory, True, False)
else:
raise ValueError("No valid font could be found")
if directory is None:
_lookup_cache[fontext].set(prop, result)
return result
_is_opentype_cff_font_cache = {}
def is_opentype_cff_font(filename):
"""
Returns True if the given font is a Postscript Compact Font Format
Font embedded in an OpenType wrapper. Used by the PostScript and
PDF backends that can not subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
result = _is_opentype_cff_font_cache.get(filename)
if result is None:
with open(filename, 'rb') as fd:
tag = fd.read(4)
result = (tag == 'OTTO')
_is_opentype_cff_font_cache[filename] = result
return result
return False
fontManager = None
_fmcache = None
# The experimental fontconfig-based backend.
if USE_FONTCONFIG and sys.platform != 'win32':
import re
def fc_match(pattern, fontext):
fontexts = get_fontext_synonyms(fontext)
ext = "." + fontext
try:
pipe = subprocess.Popen(
['fc-match', '-s', '--format=%{file}\\n', pattern],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output = pipe.communicate()[0]
except (OSError, IOError):
return None
# The bulk of the output from fc-list is ascii, so we keep the
# result in bytes and parse it as bytes, until we extract the
# filename, which is in sys.filesystemencoding().
if pipe.returncode == 0:
for fname in output.split(b'\n'):
try:
fname = six.text_type(fname, sys.getfilesystemencoding())
except UnicodeDecodeError:
continue
if os.path.splitext(fname)[1][1:] in fontexts:
return fname
return None
_fc_match_cache = {}
def findfont(prop, fontext='ttf'):
if not is_string_like(prop):
prop = prop.get_fontconfig_pattern()
cached = _fc_match_cache.get(prop)
if cached is not None:
return cached
result = fc_match(prop, fontext)
if result is None:
result = fc_match(':', fontext)
_fc_match_cache[prop] = result
return result
else:
_fmcache = None
if not 'TRAVIS' in os.environ:
cachedir = get_cachedir()
if cachedir is not None:
if six.PY3:
_fmcache = os.path.join(cachedir, 'fontList.py3k.cache')
else:
_fmcache = os.path.join(cachedir, 'fontList.cache')
fontManager = None
_lookup_cache = {
'ttf': TempCache(),
'afm': TempCache()
}
def _rebuild():
global fontManager
fontManager = FontManager()
if _fmcache:
pickle_dump(fontManager, _fmcache)
verbose.report("generated new fontManager")
if _fmcache:
try:
fontManager = pickle_load(_fmcache)
if (not hasattr(fontManager, '_version') or
fontManager._version != FontManager.__version__):
_rebuild()
else:
fontManager.default_size = None
verbose.report("Using fontManager instance from %s" % _fmcache)
except:
_rebuild()
else:
_rebuild()
def findfont(prop, **kw):
global fontManager
font = fontManager.findfont(prop, **kw)
return font
| mit |
szinck/pandalert | pandalert/graphitelib.py | 1 | 1386 | import requests
import urllib
import pandas as pd
import time
import log
class GraphiteClient(object):
def __init__(self, graphite_url, start_time=None, tz=None):
self.graphite_url = graphite_url
if start_time:
self.time_shift = int(time.time() - start_time)
else:
self.time_shift = 0
self.tz = tz
def get(self, target, from_time=-60, until_time=0):
result = pd.DataFrame(columns=('metric', 'time', 'target'))
payload = {
"format": "json",
"target": target,
"from": "%dsec" % (-self.time_shift + from_time),
}
if self.tz:
payload["tz"] = self.tz
until = -self.time_shift + until_time
if until != 0:
payload["until"] = "%dsec" % (until)
query = self.graphite_url + "/render?" + urllib.urlencode(payload)
log.log("Fetching Graphite Metric: %s" % (target))
log.log("Fetching Graphite URL: %s" % (query))
try:
for m in requests.get(query).json():
df = pd.DataFrame(m['datapoints'], columns=('metric', 'time'))
df['target'] = m['target']
result = result.append(df)
except ValueError as e:
log.log("got exception: %s. Returning empty metrics DataFrame." % (str(e)))
return result
| mit |
pozar87/apts | apts/equipment.py | 1 | 7989 | import logging
import cairo as ca
import igraph as ig
import matplotlib.pyplot as plt
import pandas as pd
from .constants import *
from .opticalequipment import *
from .optics import *
logger = logging.getLogger(__name__)
class Equipment:
"""
This class represents all possessed astronomical equipment. Allows to compute all possible
hardware configuration. It uses directed graph for internal processing.
"""
def __init__(self):
self.connection_garph = ig.Graph(directed=True)
# Register standard input and outputs
self.add_vertex(GraphConstants.SPACE_ID)
self.add_vertex(GraphConstants.EYE_ID)
self.add_vertex(GraphConstants.IMAGE_ID)
def _get_paths(self, output_id):
# Connect all outputs with inputs
self._connect()
# Find input and output nodes
space_node = self.connection_garph.vs.find(name=GraphConstants.SPACE_ID)
output_node = self.connection_garph.vs.find(name=output_id)
results = []
results_set = set()
for optical_path in Utils.find_all_paths(self.connection_garph, space_node.index, output_node.index):
result = [self.connection_garph.vs.find(
name=id)[NodeLabels.EQUIPMENT] for id in optical_path]
op = OpticalPath.from_path(
[item for item in result if item is not None])
if op.elements() not in results_set:
results_set.add(op.elements())
results.append(op)
return results
def get_zooms(self, node_id):
"""
Compute all possible zooms
:param node_id:
:return: sorted list of zooms
"""
result = [OpticsUtils.compute_zoom(path)
for path in self._get_paths(node_id)]
result.sort()
return result
def data(self):
columns = [EquipmentTableLabels.LABEL,
EquipmentTableLabels.TYPE,
EquipmentTableLabels.ZOOM,
EquipmentTableLabels.USEFUL_ZOOM,
EquipmentTableLabels.FOV,
EquipmentTableLabels.EXIT_PUPIL,
EquipmentTableLabels.DAWES_LIMIT,
EquipmentTableLabels.RANGE,
EquipmentTableLabels.BRIGHTNESS,
EquipmentTableLabels.ELEMENTS]
def append(result_data, paths):
for path in paths:
data = [[path.label(),
path.output.output_type(),
path.zoom().magnitude,
path.zoom() < path.telescope.max_useful_zoom(),
path.fov().magnitude,
path.output.focal_length / (path.telescope.focal_ratio() * path.effective_barlow()),
path.telescope.dawes_limit(),
path.telescope.limiting_magnitude(),
path.brightness().magnitude,
path.length()]]
result_data = result_data.append(pd.DataFrame(
data, columns=columns), ignore_index=True)
return result_data
result = pd.DataFrame(columns=columns)
result = append(result, self._get_paths(GraphConstants.EYE_ID))
result = append(result, self._get_paths(GraphConstants.IMAGE_ID))
# Add ID column as first
result['ID'] = result.index
return result[['ID'] + columns]
def plot_zoom(self, **args):
"""
Plot available magnification
"""
plot = self._plot(EquipmentTableLabels.ZOOM, 'Available zoom', 'Used equipment', 'Magnification', **args)
# Add marker for maximal useful zoom
max_zoom = self.max_zoom()
plot.axhline(max_zoom, color='orange', linestyle='--', alpha=0.7)
plot.annotate("Max useful zoom due to atmosphere", (-0.4, max_zoom + 2), alpha=0.7)
def max_zoom(self):
"""
Max useful zoom due to atmosphere
"""
return 350
def plot_fov(self, **args):
"""
Plot available fields of view
"""
def formatter(tick, pos):
return Utils.decdeg2dms(tick, pretty=True)
def add_line(description, position):
position = Utils.dms2decdeg(position)
plot.axhline(position, color='orange', linestyle='--', alpha=0.7)
plot.annotate(description, (-0.4, position + 0.03), alpha=0.7)
plot = self._plot(EquipmentTableLabels.FOV, 'Available fields of view', 'Used equipment', 'Field if view [°]',
**args)
plot.yaxis.set_major_formatter(plt.FuncFormatter(formatter))
# Pleiades width is 1°50'
add_line("Pleiades size", (1, 50, 0))
# Average moon size is 0°31'42"
add_line("Moon size", (0, 31, 42))
# M51 width is 0°11'
add_line("M51 size", (0, 11, 0))
def _plot(self, to_plot, title, x_label, y_label, autolayout=False, multiline_labels=True, **args):
data = self._filter_and_merge(to_plot, multiline_labels)
if autolayout:
plt.rcParams.update({'figure.autolayout': True})
ax = data.plot(kind='bar', title=title, stacked=True, **args)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.legend(loc='upper right')
return ax
def _filter_and_merge(self, to_plot, multiline_labels):
"""
This methods filter data to plot and merge Eye and Image series together
"""
# Filter only relevant data - by to_plot key
data = self.data()[[to_plot, EquipmentTableLabels.TYPE, EquipmentTableLabels.LABEL]].sort_values(by=to_plot)
if (len(data) <= 8):
# Split label by ',' if multiline_labels is set to true
labels = [label.replace(',', '\n') if multiline_labels else label for label in
data[EquipmentTableLabels.LABEL].values]
else:
# For more than 8 option display only ids
labels = data.index
# Merge Image and Eye series together
return pd.DataFrame([{row[1]: row[0]} for row in data.values], index=labels)
def plot_connection_graph(self, **args):
# Connect all outputs with inputs
self._connect()
return ig.plot(self.connection_garph, margin=80, **args)
def plot_connection_graph_svg(self, **args):
surface = ca.ImageSurface(ca.FORMAT_ARGB32, 800, 600)
plot = self.plot_connection_graph(target=surface, **args)
return plot._repr_svg_()
def _connect(self):
logger.debug("Connecting nodes")
for out_node in self.connection_garph.vs.select(node_type=OpticalType.OUTPUT):
# Get output type
connection_type = out_node[NodeLabels.CONNECTION_TYPE]
for in_node in self.connection_garph.vs.select(node_type=OpticalType.INPUT, connection_type=connection_type):
# Connect all outputs with all inputs, excluding connecting part to itself
out_id = OpticalEqipment.get_parent_id(
out_node[NodeLabels.NAME])
in_id = OpticalEqipment.get_parent_id(in_node[NodeLabels.NAME])
if out_id != in_id:
self.add_edge(out_node, in_node)
def add_vertex(self, node_name, equipment=None, node_type=OpticalType.GENERIC, connection_type=None):
"""
Add single node to graph. Return new vertex.
"""
self.connection_garph.add_vertex(node_name, label_dist=1.5)
node = self.connection_garph.vs.find(name=node_name)
if equipment is not None:
node_type = equipment.type()
node_label = "\n".join([equipment.get_name(), equipment.label()])
elif node_type == OpticalType.GENERIC:
node_label = node_name
elif node_type == OpticalType.INPUT:
node_label = str(connection_type) + " " + OpticalEqipment.IN
elif node_type == OpticalType.OUTPUT:
node_label = str(connection_type) + " " + OpticalEqipment.OUT
else:
node_label = ""
node[NodeLabels.TYPE] = node_type
node[NodeLabels.LABEL] = node_label
node[NodeLabels.COLOR] = GraphConstants.COLORS[node_type]
node[NodeLabels.EQUIPMENT] = equipment
node[NodeLabels.CONNECTION_TYPE] = connection_type
return node
def add_edge(self, node_from, node_to):
# Add edge if only it doesn't exist
if not self.connection_garph.are_connected(node_from, node_to):
self.connection_garph.add_edge(node_from, node_to)
def register(self, optical_eqipment):
"""
Register any optical equipment in a optical graph.
"""
optical_eqipment.register(self)
| apache-2.0 |
Kamp9/scipy | scipy/signal/filter_design.py | 14 | 127885 | """Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray, allclose,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array)
from numpy import mintypecode
import numpy as np
from scipy import special, optimize
from scipy.special import comb
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find an array of frequencies for computing the response of a filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the numerator `b` and denominator `a` of a filter, compute its
frequency response::
b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1]
H(w) = -------------------------------------------------------
a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1]
Parameters
----------
b : ndarray
Numerator of a linear filter.
a : ndarray
Denominator of a linear filter.
worN : {None, int}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which h was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=0, plot=None):
"""
Compute the frequency response of a digital filter.
Given the numerator `b` and denominator `a` of a digital filter,
compute its frequency response::
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[1]e + .... + a[n]e
Parameters
----------
b : ndarray
numerator of a linear filter
a : ndarray
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which h was computed, in radians/sample.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. Although they can operate on analog filters, the results may
be sub-optimal.
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficents of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def normalize(b, a):
"""Normalize polynomial representation of a transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b, a = map(atleast_1d, (b, a))
if len(a.shape) != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if len(b.shape) > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if len(b.shape) == 1:
b = asarray([b], b.dtype.char)
while a[0] == 0.0 and len(a) > 1:
a = a[1:]
outb = b * (1.0) / a[0]
outa = a * (1.0) / a[0]
if allclose(0, outb[:, 0], atol=1e-14):
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
while allclose(0, outb[:, 0], atol=1e-14) and (outb.shape[-1] > 1):
outb = outb[:, 1:]
if outb.shape[0] == 1:
outb = outb[0]
return outb, outa
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc in [buttap, besselap]:
z, p, k = typefunc(N)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : ndarray
Zeros of the analog IIR filter transfer function.
p : ndarray
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba'):
"""Bessel/Thomson digital and analog filter design.
Design an Nth order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Bessel filter, this is defined as the point at which the
asymptotes of the response are the same as a Butterworth filter of
the same order.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response.
As order increases, the Bessel filter approaches a Gaussian filter.
The digital Bessel filter is generated using the bilinear
transform, which does not preserve the phase response of the analog
filter. As such, it is only approximately correct at frequencies
below about fs/4. To get maximally flat group delay at higher
frequencies, the analog Bessel filter must be transformed using
phase-preserving techniques.
For a given `Wn`, the lowpass and highpass filter have the same phase vs
frequency curves; they are "phase-matched".
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the flat group delay and
the relationship to the Butterworth's cutoff frequency:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.plot(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter frequency response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel')
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
References
----------
Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5
and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
def besselap(N):
"""Return (z,p,k) for analog prototype of an Nth order Bessel filter.
The filter is normalized such that the filter asymptotes are the same as
a Butterworth filter of the same order with an angular (e.g. rad/s)
cutoff frequency of 1.
Parameters
----------
N : int
The order of the Bessel filter to return zeros, poles and gain for.
Values in the range 0-25 are supported.
Returns
-------
z : ndarray
Zeros. Is always an empty array.
p : ndarray
Poles.
k : scalar
Gain. Always 1.
"""
z = []
k = 1
if N == 0:
p = []
elif N == 1:
p = [-1]
elif N == 2:
p = [-.8660254037844386467637229 + .4999999999999999999999996j,
-.8660254037844386467637229 - .4999999999999999999999996j]
elif N == 3:
p = [-.9416000265332067855971980,
-.7456403858480766441810907 - .7113666249728352680992154j,
-.7456403858480766441810907 + .7113666249728352680992154j]
elif N == 4:
p = [-.6572111716718829545787781 - .8301614350048733772399715j,
-.6572111716718829545787788 + .8301614350048733772399715j,
-.9047587967882449459642637 - .2709187330038746636700923j,
-.9047587967882449459642624 + .2709187330038746636700926j]
elif N == 5:
p = [-.9264420773877602247196260,
-.8515536193688395541722677 - .4427174639443327209850002j,
-.8515536193688395541722677 + .4427174639443327209850002j,
-.5905759446119191779319432 - .9072067564574549539291747j,
-.5905759446119191779319432 + .9072067564574549539291747j]
elif N == 6:
p = [-.9093906830472271808050953 - .1856964396793046769246397j,
-.9093906830472271808050953 + .1856964396793046769246397j,
-.7996541858328288520243325 - .5621717346937317988594118j,
-.7996541858328288520243325 + .5621717346937317988594118j,
-.5385526816693109683073792 - .9616876881954277199245657j,
-.5385526816693109683073792 + .9616876881954277199245657j]
elif N == 7:
p = [-.9194871556490290014311619,
-.8800029341523374639772340 - .3216652762307739398381830j,
-.8800029341523374639772340 + .3216652762307739398381830j,
-.7527355434093214462291616 - .6504696305522550699212995j,
-.7527355434093214462291616 + .6504696305522550699212995j,
-.4966917256672316755024763 - 1.002508508454420401230220j,
-.4966917256672316755024763 + 1.002508508454420401230220j]
elif N == 8:
p = [-.9096831546652910216327629 - .1412437976671422927888150j,
-.9096831546652910216327629 + .1412437976671422927888150j,
-.8473250802359334320103023 - .4259017538272934994996429j,
-.8473250802359334320103023 + .4259017538272934994996429j,
-.7111381808485399250796172 - .7186517314108401705762571j,
-.7111381808485399250796172 + .7186517314108401705762571j,
-.4621740412532122027072175 - 1.034388681126901058116589j,
-.4621740412532122027072175 + 1.034388681126901058116589j]
elif N == 9:
p = [-.9154957797499037686769223,
-.8911217017079759323183848 - .2526580934582164192308115j,
-.8911217017079759323183848 + .2526580934582164192308115j,
-.8148021112269012975514135 - .5085815689631499483745341j,
-.8148021112269012975514135 + .5085815689631499483745341j,
-.6743622686854761980403401 - .7730546212691183706919682j,
-.6743622686854761980403401 + .7730546212691183706919682j,
-.4331415561553618854685942 - 1.060073670135929666774323j,
-.4331415561553618854685942 + 1.060073670135929666774323j]
elif N == 10:
p = [-.9091347320900502436826431 - .1139583137335511169927714j,
-.9091347320900502436826431 + .1139583137335511169927714j,
-.8688459641284764527921864 - .3430008233766309973110589j,
-.8688459641284764527921864 + .3430008233766309973110589j,
-.7837694413101441082655890 - .5759147538499947070009852j,
-.7837694413101441082655890 + .5759147538499947070009852j,
-.6417513866988316136190854 - .8175836167191017226233947j,
-.6417513866988316136190854 + .8175836167191017226233947j,
-.4083220732868861566219785 - 1.081274842819124562037210j,
-.4083220732868861566219785 + 1.081274842819124562037210j]
elif N == 11:
p = [-.9129067244518981934637318,
-.8963656705721166099815744 - .2080480375071031919692341j,
-.8963656705721166099815744 + .2080480375071031919692341j,
-.8453044014712962954184557 - .4178696917801248292797448j,
-.8453044014712962954184557 + .4178696917801248292797448j,
-.7546938934722303128102142 - .6319150050721846494520941j,
-.7546938934722303128102142 + .6319150050721846494520941j,
-.6126871554915194054182909 - .8547813893314764631518509j,
-.6126871554915194054182909 + .8547813893314764631518509j,
-.3868149510055090879155425 - 1.099117466763120928733632j,
-.3868149510055090879155425 + 1.099117466763120928733632j]
elif N == 12:
p = [-.9084478234140682638817772 - 95506365213450398415258360.0e-27j,
-.9084478234140682638817772 + 95506365213450398415258360.0e-27j,
-.8802534342016826507901575 - .2871779503524226723615457j,
-.8802534342016826507901575 + .2871779503524226723615457j,
-.8217296939939077285792834 - .4810212115100676440620548j,
-.8217296939939077285792834 + .4810212115100676440620548j,
-.7276681615395159454547013 - .6792961178764694160048987j,
-.7276681615395159454547013 + .6792961178764694160048987j,
-.5866369321861477207528215 - .8863772751320727026622149j,
-.5866369321861477207528215 + .8863772751320727026622149j,
-.3679640085526312839425808 - 1.114373575641546257595657j,
-.3679640085526312839425808 + 1.114373575641546257595657j]
elif N == 13:
p = [-.9110914665984182781070663,
-.8991314665475196220910718 - .1768342956161043620980863j,
-.8991314665475196220910718 + .1768342956161043620980863j,
-.8625094198260548711573628 - .3547413731172988997754038j,
-.8625094198260548711573628 + .3547413731172988997754038j,
-.7987460692470972510394686 - .5350752120696801938272504j,
-.7987460692470972510394686 + .5350752120696801938272504j,
-.7026234675721275653944062 - .7199611890171304131266374j,
-.7026234675721275653944062 + .7199611890171304131266374j,
-.5631559842430199266325818 - .9135900338325109684927731j,
-.5631559842430199266325818 + .9135900338325109684927731j,
-.3512792323389821669401925 - 1.127591548317705678613239j,
-.3512792323389821669401925 + 1.127591548317705678613239j]
elif N == 14:
p = [-.9077932138396487614720659 - 82196399419401501888968130.0e-27j,
-.9077932138396487614720659 + 82196399419401501888968130.0e-27j,
-.8869506674916445312089167 - .2470079178765333183201435j,
-.8869506674916445312089167 + .2470079178765333183201435j,
-.8441199160909851197897667 - .4131653825102692595237260j,
-.8441199160909851197897667 + .4131653825102692595237260j,
-.7766591387063623897344648 - .5819170677377608590492434j,
-.7766591387063623897344648 + .5819170677377608590492434j,
-.6794256425119233117869491 - .7552857305042033418417492j,
-.6794256425119233117869491 + .7552857305042033418417492j,
-.5418766775112297376541293 - .9373043683516919569183099j,
-.5418766775112297376541293 + .9373043683516919569183099j,
-.3363868224902037330610040 - 1.139172297839859991370924j,
-.3363868224902037330610040 + 1.139172297839859991370924j]
elif N == 15:
p = [-.9097482363849064167228581,
-.9006981694176978324932918 - .1537681197278439351298882j,
-.9006981694176978324932918 + .1537681197278439351298882j,
-.8731264620834984978337843 - .3082352470564267657715883j,
-.8731264620834984978337843 + .3082352470564267657715883j,
-.8256631452587146506294553 - .4642348752734325631275134j,
-.8256631452587146506294553 + .4642348752734325631275134j,
-.7556027168970728127850416 - .6229396358758267198938604j,
-.7556027168970728127850416 + .6229396358758267198938604j,
-.6579196593110998676999362 - .7862895503722515897065645j,
-.6579196593110998676999362 + .7862895503722515897065645j,
-.5224954069658330616875186 - .9581787261092526478889345j,
-.5224954069658330616875186 + .9581787261092526478889345j,
-.3229963059766444287113517 - 1.149416154583629539665297j,
-.3229963059766444287113517 + 1.149416154583629539665297j]
elif N == 16:
p = [-.9072099595087001356491337 - 72142113041117326028823950.0e-27j,
-.9072099595087001356491337 + 72142113041117326028823950.0e-27j,
-.8911723070323647674780132 - .2167089659900576449410059j,
-.8911723070323647674780132 + .2167089659900576449410059j,
-.8584264231521330481755780 - .3621697271802065647661080j,
-.8584264231521330481755780 + .3621697271802065647661080j,
-.8074790293236003885306146 - .5092933751171800179676218j,
-.8074790293236003885306146 + .5092933751171800179676218j,
-.7356166304713115980927279 - .6591950877860393745845254j,
-.7356166304713115980927279 + .6591950877860393745845254j,
-.6379502514039066715773828 - .8137453537108761895522580j,
-.6379502514039066715773828 + .8137453537108761895522580j,
-.5047606444424766743309967 - .9767137477799090692947061j,
-.5047606444424766743309967 + .9767137477799090692947061j,
-.3108782755645387813283867 - 1.158552841199330479412225j,
-.3108782755645387813283867 + 1.158552841199330479412225j]
elif N == 17:
p = [-.9087141161336397432860029,
-.9016273850787285964692844 - .1360267995173024591237303j,
-.9016273850787285964692844 + .1360267995173024591237303j,
-.8801100704438627158492165 - .2725347156478803885651973j,
-.8801100704438627158492165 + .2725347156478803885651973j,
-.8433414495836129204455491 - .4100759282910021624185986j,
-.8433414495836129204455491 + .4100759282910021624185986j,
-.7897644147799708220288138 - .5493724405281088674296232j,
-.7897644147799708220288138 + .5493724405281088674296232j,
-.7166893842372349049842743 - .6914936286393609433305754j,
-.7166893842372349049842743 + .6914936286393609433305754j,
-.6193710717342144521602448 - .8382497252826992979368621j,
-.6193710717342144521602448 + .8382497252826992979368621j,
-.4884629337672704194973683 - .9932971956316781632345466j,
-.4884629337672704194973683 + .9932971956316781632345466j,
-.2998489459990082015466971 - 1.166761272925668786676672j,
-.2998489459990082015466971 + 1.166761272925668786676672j]
elif N == 18:
p = [-.9067004324162775554189031 - 64279241063930693839360680.0e-27j,
-.9067004324162775554189031 + 64279241063930693839360680.0e-27j,
-.8939764278132455733032155 - .1930374640894758606940586j,
-.8939764278132455733032155 + .1930374640894758606940586j,
-.8681095503628830078317207 - .3224204925163257604931634j,
-.8681095503628830078317207 + .3224204925163257604931634j,
-.8281885016242836608829018 - .4529385697815916950149364j,
-.8281885016242836608829018 + .4529385697815916950149364j,
-.7726285030739558780127746 - .5852778162086640620016316j,
-.7726285030739558780127746 + .5852778162086640620016316j,
-.6987821445005273020051878 - .7204696509726630531663123j,
-.6987821445005273020051878 + .7204696509726630531663123j,
-.6020482668090644386627299 - .8602708961893664447167418j,
-.6020482668090644386627299 + .8602708961893664447167418j,
-.4734268069916151511140032 - 1.008234300314801077034158j,
-.4734268069916151511140032 + 1.008234300314801077034158j,
-.2897592029880489845789953 - 1.174183010600059128532230j,
-.2897592029880489845789953 + 1.174183010600059128532230j]
elif N == 19:
p = [-.9078934217899404528985092,
-.9021937639390660668922536 - .1219568381872026517578164j,
-.9021937639390660668922536 + .1219568381872026517578164j,
-.8849290585034385274001112 - .2442590757549818229026280j,
-.8849290585034385274001112 + .2442590757549818229026280j,
-.8555768765618421591093993 - .3672925896399872304734923j,
-.8555768765618421591093993 + .3672925896399872304734923j,
-.8131725551578197705476160 - .4915365035562459055630005j,
-.8131725551578197705476160 + .4915365035562459055630005j,
-.7561260971541629355231897 - .6176483917970178919174173j,
-.7561260971541629355231897 + .6176483917970178919174173j,
-.6818424412912442033411634 - .7466272357947761283262338j,
-.6818424412912442033411634 + .7466272357947761283262338j,
-.5858613321217832644813602 - .8801817131014566284786759j,
-.5858613321217832644813602 + .8801817131014566284786759j,
-.4595043449730988600785456 - 1.021768776912671221830298j,
-.4595043449730988600785456 + 1.021768776912671221830298j,
-.2804866851439370027628724 - 1.180931628453291873626003j,
-.2804866851439370027628724 + 1.180931628453291873626003j]
elif N == 20:
p = [-.9062570115576771146523497 - 57961780277849516990208850.0e-27j,
-.9062570115576771146523497 + 57961780277849516990208850.0e-27j,
-.8959150941925768608568248 - .1740317175918705058595844j,
-.8959150941925768608568248 + .1740317175918705058595844j,
-.8749560316673332850673214 - .2905559296567908031706902j,
-.8749560316673332850673214 + .2905559296567908031706902j,
-.8427907479956670633544106 - .4078917326291934082132821j,
-.8427907479956670633544106 + .4078917326291934082132821j,
-.7984251191290606875799876 - .5264942388817132427317659j,
-.7984251191290606875799876 + .5264942388817132427317659j,
-.7402780309646768991232610 - .6469975237605228320268752j,
-.7402780309646768991232610 + .6469975237605228320268752j,
-.6658120544829934193890626 - .7703721701100763015154510j,
-.6658120544829934193890626 + .7703721701100763015154510j,
-.5707026806915714094398061 - .8982829066468255593407161j,
-.5707026806915714094398061 + .8982829066468255593407161j,
-.4465700698205149555701841 - 1.034097702560842962315411j,
-.4465700698205149555701841 + 1.034097702560842962315411j,
-.2719299580251652601727704 - 1.187099379810885886139638j,
-.2719299580251652601727704 + 1.187099379810885886139638j]
elif N == 21:
p = [-.9072262653142957028884077,
-.9025428073192696303995083 - .1105252572789856480992275j,
-.9025428073192696303995083 + .1105252572789856480992275j,
-.8883808106664449854431605 - .2213069215084350419975358j,
-.8883808106664449854431605 + .2213069215084350419975358j,
-.8643915813643204553970169 - .3326258512522187083009453j,
-.8643915813643204553970169 + .3326258512522187083009453j,
-.8299435470674444100273463 - .4448177739407956609694059j,
-.8299435470674444100273463 + .4448177739407956609694059j,
-.7840287980408341576100581 - .5583186348022854707564856j,
-.7840287980408341576100581 + .5583186348022854707564856j,
-.7250839687106612822281339 - .6737426063024382240549898j,
-.7250839687106612822281339 + .6737426063024382240549898j,
-.6506315378609463397807996 - .7920349342629491368548074j,
-.6506315378609463397807996 + .7920349342629491368548074j,
-.5564766488918562465935297 - .9148198405846724121600860j,
-.5564766488918562465935297 + .9148198405846724121600860j,
-.4345168906815271799687308 - 1.045382255856986531461592j,
-.4345168906815271799687308 + 1.045382255856986531461592j,
-.2640041595834031147954813 - 1.192762031948052470183960j,
-.2640041595834031147954813 + 1.192762031948052470183960j]
elif N == 22:
p = [-.9058702269930872551848625 - 52774908289999045189007100.0e-27j,
-.9058702269930872551848625 + 52774908289999045189007100.0e-27j,
-.8972983138153530955952835 - .1584351912289865608659759j,
-.8972983138153530955952835 + .1584351912289865608659759j,
-.8799661455640176154025352 - .2644363039201535049656450j,
-.8799661455640176154025352 + .2644363039201535049656450j,
-.8534754036851687233084587 - .3710389319482319823405321j,
-.8534754036851687233084587 + .3710389319482319823405321j,
-.8171682088462720394344996 - .4785619492202780899653575j,
-.8171682088462720394344996 + .4785619492202780899653575j,
-.7700332930556816872932937 - .5874255426351153211965601j,
-.7700332930556816872932937 + .5874255426351153211965601j,
-.7105305456418785989070935 - .6982266265924524000098548j,
-.7105305456418785989070935 + .6982266265924524000098548j,
-.6362427683267827226840153 - .8118875040246347267248508j,
-.6362427683267827226840153 + .8118875040246347267248508j,
-.5430983056306302779658129 - .9299947824439872998916657j,
-.5430983056306302779658129 + .9299947824439872998916657j,
-.4232528745642628461715044 - 1.055755605227545931204656j,
-.4232528745642628461715044 + 1.055755605227545931204656j,
-.2566376987939318038016012 - 1.197982433555213008346532j,
-.2566376987939318038016012 + 1.197982433555213008346532j]
elif N == 23:
p = [-.9066732476324988168207439,
-.9027564979912504609412993 - .1010534335314045013252480j,
-.9027564979912504609412993 + .1010534335314045013252480j,
-.8909283242471251458653994 - .2023024699381223418195228j,
-.8909283242471251458653994 + .2023024699381223418195228j,
-.8709469395587416239596874 - .3039581993950041588888925j,
-.8709469395587416239596874 + .3039581993950041588888925j,
-.8423805948021127057054288 - .4062657948237602726779246j,
-.8423805948021127057054288 + .4062657948237602726779246j,
-.8045561642053176205623187 - .5095305912227258268309528j,
-.8045561642053176205623187 + .5095305912227258268309528j,
-.7564660146829880581478138 - .6141594859476032127216463j,
-.7564660146829880581478138 + .6141594859476032127216463j,
-.6965966033912705387505040 - .7207341374753046970247055j,
-.6965966033912705387505040 + .7207341374753046970247055j,
-.6225903228771341778273152 - .8301558302812980678845563j,
-.6225903228771341778273152 + .8301558302812980678845563j,
-.5304922463810191698502226 - .9439760364018300083750242j,
-.5304922463810191698502226 + .9439760364018300083750242j,
-.4126986617510148836149955 - 1.065328794475513585531053j,
-.4126986617510148836149955 + 1.065328794475513585531053j,
-.2497697202208956030229911 - 1.202813187870697831365338j,
-.2497697202208956030229911 + 1.202813187870697831365338j]
elif N == 24:
p = [-.9055312363372773709269407 - 48440066540478700874836350.0e-27j,
-.9055312363372773709269407 + 48440066540478700874836350.0e-27j,
-.8983105104397872954053307 - .1454056133873610120105857j,
-.8983105104397872954053307 + .1454056133873610120105857j,
-.8837358034555706623131950 - .2426335234401383076544239j,
-.8837358034555706623131950 + .2426335234401383076544239j,
-.8615278304016353651120610 - .3403202112618624773397257j,
-.8615278304016353651120610 + .3403202112618624773397257j,
-.8312326466813240652679563 - .4386985933597305434577492j,
-.8312326466813240652679563 + .4386985933597305434577492j,
-.7921695462343492518845446 - .5380628490968016700338001j,
-.7921695462343492518845446 + .5380628490968016700338001j,
-.7433392285088529449175873 - .6388084216222567930378296j,
-.7433392285088529449175873 + .6388084216222567930378296j,
-.6832565803536521302816011 - .7415032695091650806797753j,
-.6832565803536521302816011 + .7415032695091650806797753j,
-.6096221567378335562589532 - .8470292433077202380020454j,
-.6096221567378335562589532 + .8470292433077202380020454j,
-.5185914574820317343536707 - .9569048385259054576937721j,
-.5185914574820317343536707 + .9569048385259054576937721j,
-.4027853855197518014786978 - 1.074195196518674765143729j,
-.4027853855197518014786978 + 1.074195196518674765143729j,
-.2433481337524869675825448 - 1.207298683731972524975429j,
-.2433481337524869675825448 + 1.207298683731972524975429j]
elif N == 25:
p = [-.9062073871811708652496104,
-.9028833390228020537142561 - 93077131185102967450643820.0e-27j,
-.9028833390228020537142561 + 93077131185102967450643820.0e-27j,
-.8928551459883548836774529 - .1863068969804300712287138j,
-.8928551459883548836774529 + .1863068969804300712287138j,
-.8759497989677857803656239 - .2798521321771408719327250j,
-.8759497989677857803656239 + .2798521321771408719327250j,
-.8518616886554019782346493 - .3738977875907595009446142j,
-.8518616886554019782346493 + .3738977875907595009446142j,
-.8201226043936880253962552 - .4686668574656966589020580j,
-.8201226043936880253962552 + .4686668574656966589020580j,
-.7800496278186497225905443 - .5644441210349710332887354j,
-.7800496278186497225905443 + .5644441210349710332887354j,
-.7306549271849967721596735 - .6616149647357748681460822j,
-.7306549271849967721596735 + .6616149647357748681460822j,
-.6704827128029559528610523 - .7607348858167839877987008j,
-.6704827128029559528610523 + .7607348858167839877987008j,
-.5972898661335557242320528 - .8626676330388028512598538j,
-.5972898661335557242320528 + .8626676330388028512598538j,
-.5073362861078468845461362 - .9689006305344868494672405j,
-.5073362861078468845461362 + .9689006305344868494672405j,
-.3934529878191079606023847 - 1.082433927173831581956863j,
-.3934529878191079606023847 + 1.082433927173831581956863j,
-.2373280669322028974199184 - 1.211476658382565356579418j,
-.2373280669322028974199184 + 1.211476658382565356579418j]
else:
raise ValueError("Bessel Filter not supported for order %s" % N)
return asarray(z), asarray(p), k
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
| bsd-3-clause |
wdbm/pyprel | setup.py | 1 | 1315 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import setuptools
def main():
setuptools.setup(
name = "pyprel",
version = "2018.09.14.1501",
description = "Python print elegant",
long_description = long_description(),
url = "https://github.com/wdbm/pyprel",
author = "Will Breaden Madden",
author_email = "[email protected]",
license = "GPLv3",
packages = setuptools.find_packages(),
install_requires = [
"numpy",
"pandas",
"Pillow",
"pyfiglet",
"shijian"
],
include_package_data = True,
zip_safe = False
)
def long_description(filename = "README.md"):
if os.path.isfile(os.path.expandvars(filename)):
try:
import pypandoc
long_description = pypandoc.convert_file(filename, "rst")
except ImportError:
long_description = open(filename).read()
else:
long_description = ""
return long_description
if __name__ == "__main__":
main()
| gpl-3.0 |
legacysurvey/legacypipe | doc/conf.py | 2 | 9150 | # -*- coding: utf-8 -*-
# As per http://blog.rtwilson.com/how-to-make-your-sphinx-documentation-compile-with-readthedocs-when-youre-using-numpy-and-scipy/ :
import sys, os
from unittest.mock import MagicMock as Mock
MOCK_MODULES = [
'fitsio',
'astrometry',
'astrometry.util', 'astrometry.util.fits', 'astrometry.util.plotutils',
'astrometry.util.resample', 'astrometry.util.ttime',
'astrometry.util.starutil_numpy', 'astrometry.util.miscutils',
'astrometry.util.util', 'astrometry.util.multiproc',
'astrometry.util.file', 'astrometry.util.timingpool',
'astrometry.libkd',
'tractor',
'tractor.ellipses', 'tractor.galaxy', 'tractor.utils',
'tractor.basics', 'tractor.image', 'tractor.ceres_mixin',
'tractor.engine', 'tractor.psfex', 'tractor.sfd', 'tractor.tractortime',
'tractor.sersic', 'tractor.sky', 'tractor.splinesky', 'tractor.patch',
'numpy',
'scipy',
'scipy.interpolate', 'scipy.ndimage', 'scipy.ndimage.filters',
'scipy.ndimage.measurements', 'scipy.ndimage.morphology',
'matplotlib',
'matplotlib.pyplot',
'pylab',
'astropy',
'astropy.time', 'astropy.io',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# legacypipe documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 10 15:38:03 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../py'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'numpydoc']
#'sphinx.ext.pngmath', 'sphinx.ext.mathjax',
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'legacypipe'
copyright = u'2015--2020, LegacySurvey.org'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'legacypipedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'legacypipe.tex', u'legacypipe Documentation',
u'LegacySurvey.org', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'legacypipe', u'legacypipe Documentation',
[u'LegacySurvey.org'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'legacypipe', u'legacypipe Documentation',
u'LegacySurvey.org', 'legacypipe', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-3-clause |
thientu/scikit-learn | sklearn/utils/tests/test_extmath.py | 70 | 16531 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
clipo/idss-seriation | analysis/optimize-geographic-pvalue/opt-geog-bootstrap-original.py | 1 | 3919 | #!/usr/bin/env python
# Copyright (c) 2015. Mark E. Madsen <[email protected]>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
"""
Description here
"""
import pickle
import math
import random as rnd
import matplotlib.pyplot as plt
import time
import scipy as sp
import scipy.stats
from pylab import *
import pprint as pp
import matplotlib
matplotlib.use('Agg')
def setup():
self_xassem = pickle.load(open("xassem.pickle",'rb'))
self_yassem = pickle.load(open("yassem.pickle",'rb'))
mmg = pickle.load(open("mmg.pickle",'rb'))
labels = pickle.load(open("labels.pickle", 'rb'))
plot_filename = "test_plot.png"
return (mmg,self_xassem,self_yassem,plot_filename,labels)
def calculateGeographicSolutionPValue(graph,num_bootstrap,self_xassem,self_yassem,plot_filename,labels):
solutionDistance = 0
assemblagesInSolution = []
edges = 0
for e in graph.edges_iter():
d = graph.get_edge_data(*e)
edges += 1
fromAssemblage = e[0]
toAssemblage = e[1]
solutionDistance += math.sqrt(
pow((int(self_xassem[fromAssemblage]) - int(self_xassem[toAssemblage])), 2)
+ pow((int(self_yassem[fromAssemblage]) - int(self_yassem[toAssemblage])), 2))
assemblagesInSolution.append(fromAssemblage)
assemblagesInSolution.append(toAssemblage)
assemblageSet = set(assemblagesInSolution)
#print "solution Distance: %s" % solutionDistance
rnd.seed() # uses system time to initialize random number generator, or you can pass in a deterministic seed as an argument if you want
x = []
pvalueScore = 0.000
for b in range(0, num_bootstrap):
# code to use to generate K pairs
list1 = labels
list2 = labels
testDistance = 0
for p in range(0, edges - 1):
test = False
p1 = p2 = ""
while test is False:
p1 = rnd.choice(list1)
p2 = rnd.choice(list2)
if p1 != p2:
test = True
#print "Pair: ", p1, "-", p2
testDistance += math.sqrt(pow((int(self_xassem[p1]) - int(self_xassem[p2])), 2)
+ pow((int(self_yassem[p1]) - int(self_yassem[p2])), 2))
#print "Test Distance: ", testDistance
if testDistance <= solutionDistance:
# print "TEST is less than solutionDistance: ",testDistance
pvalueScore += 1
x.append(testDistance)
filename = plot_filename
f = plt.figure(filename, figsize=(8, 8))
plt.rcParams['font.family'] = 'sans-serif'
# f=plt.figure("Geographic Distance", figsize=(8, 8))
num_bins = 20
# the histogram of the data
n, bins, patches = plt.hist(x, num_bins, facecolor='green', alpha=0.5)
plt.axvline(solutionDistance, color='r', linestyle='dashed', linewidth=2)
figure_label = plot_filename[0:-4]
plt.xlabel(figure_label)
plt.ylabel('Count')
plt.title(r'Histogram of Summed Geographic Distance')
plt.savefig(filename, dpi=75)
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
minx = min(x)
maxx = max(x)
pvalue = pvalueScore / num_bootstrap
x1, x2, y1, y2 = plt.axis()
text = "p-value: " + str(pvalue)
plt.text(maxx / 3, (y2 - y1) * 2 / 3, text, style='italic')
if pvalue == 0:
pvalue = "0.000"
return pvalue, solutionDistance, mean(x), std(x)
if __name__ == "__main__":
(mmg,self_xassem,self_yassem,plot_filename,labels) = setup()
num_bootstraps = 1000
start_time = time.clock()
calculateGeographicSolutionPValue(mmg, num_bootstraps,self_xassem,self_yassem,plot_filename,labels)
end_time = time.clock()
elapsed = end_time - start_time
print "elapsed with %s bootstraps: %s secs" % (num_bootstraps, elapsed) | apache-2.0 |
mikedelong/aarhus | demos/matrix_factorization.py | 1 | 2679 | import cPickle as pickle
import json
import logging
import os
import numpy
import sklearn.feature_extraction.text as text
from sklearn import decomposition
logging.basicConfig(format='%(asctime)s : %(levelname)s :: %(message)s', level=logging.DEBUG)
with open('./matrix_factorization_input.json') as data_file:
data = json.load(data_file)
logging.debug(data)
input_folder = data['input_folder']
pickle_file_name = data['pickle_file_name']
max_file_count = data['max_file_count']
topics_count = data['topics_count']
top_words_count = data['top_words_count']
file_names = [os.path.join(root, current) for root, subdirectories,files in os.walk(input_folder) for current in files]
# truncate
if max_file_count < len(file_names) and max_file_count != -1:
file_names = file_names[:max_file_count]
logging.debug('we are working with %d files.' % len(file_names))
# todo what is min_df
vectorizer = text.CountVectorizer(input='filename', stop_words='english', min_df=20, decode_error='ignore')
logging.debug('created vectorizer')
dtm = vectorizer.fit_transform(file_names).toarray()
logging.debug('created matrix')
vocabulary = numpy.array(vectorizer.get_feature_names())
logging.debug('matrix shape: %s, vocabulary size: %d', dtm.shape, len(vocabulary))
clf = decomposition.NMF(n_components=topics_count, random_state=0)
logging.debug('decomposition complete.')
doctopic = clf.fit_transform(dtm)
logging.debug('fit-transform complete.')
topic_words = []
for topic in clf.components_:
word_idx = numpy.argsort(topic)[::-1][0:top_words_count]
topic_words.append([vocabulary[word] for word in word_idx])
doctopic /= numpy.sum(doctopic, axis=1, keepdims=True)
names = []
for file_name in file_names:
basename = os.path.basename(file_name)
names.append(basename)
names = numpy.asarray(names)
doctopic_orig = doctopic.copy()
groups_count = len(set(names))
doctopic_grouped = numpy.zeros((groups_count, topics_count))
for i, name in enumerate(sorted(set(names))):
doctopic_grouped[i, :] = numpy.mean(doctopic[names == name, :], axis=0)
doctopic = doctopic_grouped
out_pickle = {
'doctopic' : doctopic,
'topic_words' : topic_words
}
pickle.dump(out_pickle, open( pickle_file_name, 'wb' ))
logging.debug('pickle file written.')
t0 = sorted(set(names))
logging.info("Top NMF topics in...")
for i in range(len(doctopic)):
top_topics = numpy.argsort(doctopic[i, :])[::-1][0:3]
top_topics_str = ' '.join(str(t) for t in top_topics)
# logging.info("{}: {}".format(names[i], top_topics_str))
for t in range(len(topic_words)):
logging.info("Topic {}: {}".format(t, ' '.join(topic_words[t][:top_words_count])))
| apache-2.0 |
ericmjl/hiv-resistance-prediction | util/protein_df.py | 1 | 2138 | class ProteinDataFrame(object):
"""docstring for ProteinDataFrame"""
def __init__(self, handle):
super(ProteinDataFrame, self).__init__()
self.handle = handle
self.protein_df = self.parse_pdb()
self.alpha_carbons = self.extract_alpha_carbons()
def parse_pdb(self):
"""
Parses the PDB file as a pandas DataFrame object.
Backbone chain atoms are ignored for the calculation
of interacting residues.
"""
atomic_data = []
with open(self.pdb_handle, 'r') as f:
for line in f.readlines():
data = dict()
if line[0:4] == 'ATOM':
data['Record name'] = line[0:5].strip(' ')
data['serial_number'] = int(line[6:11].strip(' '))
data['atom'] = line[12:15].strip(' ')
data['resi_name'] = line[17:20]
data['chain_id'] = line[21]
data['resi_num'] = int(line[23:26])
data['x'] = float(line[30:37])
data['y'] = float(line[38:45])
data['z'] = float(line[46:53])
atomic_data.append(data)
atomic_df = pd.DataFrame(atomic_data)
return atomic_d
def extract_alpha_carbons(self):
c_alpha = self.protein_df[self.protein_df['atom'] == 'CA']
c_alpha.reset_index(drop=True, inplace=True)
c_alpha = c_alpha[c_alpha['chain_id'] == 'A']
self.alpha_carbons = c_alpha
def mutate_position(self, position, new_resi):
"""
Parameters:
===========
- position: (int) the amino acid position. Begins at 1, not 0.
- new_resi: (str) the 3-letter amino acid to mutate to.
"""
assert isinstance(new_resi, str), 'new_resi must be a string'
assert isinstance(position, int), 'position must be an integer'
assert len(new_resi) == 3, 'new_resi must be a 3-letter string'
mutated = c_alpha.copy()
mutated.ix[position - 1, 'resi_name'] = new_resi
return mutated
| mit |
ch3ll0v3k/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
dgwakeman/mne-python | mne/viz/tests/test_topo.py | 7 | 4728 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
from collections import namedtuple
import numpy as np
from numpy.testing import assert_raises
from mne import io, read_events, Epochs
from mne import pick_channels_evoked
from mne.channels import read_layout
from mne.time_frequency.tfr import AverageTFR
from mne.utils import run_tests_if_main
from mne.viz import (plot_topo, plot_topo_image_epochs, _get_presser,
mne_analyze_colormap)
from mne.viz.topo import _plot_update_evoked_topo
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.2, 0.2
layout = read_layout('Vectorview-all')
def _get_raw():
return io.Raw(raw_fname, preload=False)
def _get_events():
return read_events(event_name)
def _get_picks(raw):
return [0, 1, 2, 6, 7, 8, 340, 341, 342] # take a only few channels
def _get_epochs():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
return epochs
def _get_epochs_delayed_ssp():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
proj='delayed', reject=reject)
return epochs_delayed_ssp
def test_plot_topo():
"""Test plotting of ERP topography
"""
import matplotlib.pyplot as plt
# Show topography
evoked = _get_epochs().average()
plot_topo(evoked) # should auto-find layout
warnings.simplefilter('always', UserWarning)
picked_evoked = evoked.pick_channels(evoked.ch_names[:3], copy=True)
picked_evoked_eeg = evoked.pick_types(meg=False, eeg=True, copy=True)
picked_evoked_eeg.pick_channels(picked_evoked_eeg.ch_names[:3])
# test scaling
with warnings.catch_warnings(record=True):
for ylim in [dict(mag=[-600, 600]), None]:
plot_topo([picked_evoked] * 2, layout, ylim=ylim)
for evo in [evoked, [evoked, picked_evoked]]:
assert_raises(ValueError, plot_topo, evo, layout, color=['y', 'b'])
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
ch_names = evoked_delayed_ssp.ch_names[:3] # make it faster
picked_evoked_delayed_ssp = pick_channels_evoked(evoked_delayed_ssp,
ch_names)
fig = plot_topo(picked_evoked_delayed_ssp, layout, proj='interactive')
func = _get_presser(fig)
event = namedtuple('Event', 'inaxes')
func(event(inaxes=fig.axes[0]))
params = dict(evokeds=[picked_evoked_delayed_ssp],
times=picked_evoked_delayed_ssp.times,
fig=fig, projs=picked_evoked_delayed_ssp.info['projs'])
bools = [True] * len(params['projs'])
_plot_update_evoked_topo(params, bools)
# should auto-generate layout
plot_topo(picked_evoked_eeg.copy(),
fig_background=np.zeros((4, 3, 3)), proj=True)
plt.close('all')
def test_plot_topo_image_epochs():
"""Test plotting of epochs image topography
"""
import matplotlib.pyplot as plt
title = 'ERF images - MNE sample data'
epochs = _get_epochs()
cmap = mne_analyze_colormap(format='matplotlib')
plot_topo_image_epochs(epochs, sigma=0.5, vmin=-200, vmax=200,
colorbar=True, title=title, cmap=cmap)
plt.close('all')
def test_plot_tfr_topo():
"""Test plotting of TFR data
"""
epochs = _get_epochs()
n_freqs = 3
nave = 1
data = np.random.RandomState(0).randn(len(epochs.ch_names),
n_freqs, len(epochs.times))
tfr = AverageTFR(epochs.info, data, epochs.times, np.arange(n_freqs), nave)
tfr.plot_topo(baseline=(None, 0), mode='ratio', title='Average power',
vmin=0., vmax=14., show=False)
tfr.plot([4], baseline=(None, 0), mode='ratio', show=False, title='foo')
run_tests_if_main()
| bsd-3-clause |
hyperspy/hyperspy | hyperspy/drawing/widget.py | 1 | 38254 | # -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import matplotlib.pyplot as plt
from matplotlib.backend_bases import MouseEvent
import numpy as np
from hyperspy.drawing.utils import on_figure_window_close
from hyperspy.events import Events, Event
from hyperspy.defaults_parser import preferences
class WidgetBase(object):
"""Base class for interactive widgets/patches. A widget creates and
maintains one or more matplotlib patches, and manages the interaction code
so that the user can maniuplate it on the fly.
This base class implements functionality which is common to all such
widgets, mainly the code that manages the patch, axes management, and
sets up common events ('changed' and 'closed').
Any inherting subclasses must implement the following methods:
_set_patch(self)
_on_navigate(obj, name, old, new) # Only for widgets that can navigate
It should also make sure to fill the 'axes' attribute as early as
possible (but after the base class init), so that it is available when
needed.
"""
def __init__(self, axes_manager=None, color='red', alpha=1.0, **kwargs):
self.axes_manager = axes_manager
self._axes = list()
self.ax = None
self.picked = False
self.selected = False
self._selected_artist = None
self._size = 1.
self._pos = np.array([0.])
self._is_on = True
self.background = None
self.patch = []
self.color = color
self.alpha = alpha
self.cids = list()
self.blit = None
self.events = Events()
self.events.changed = Event(doc="""
Event that triggers when the widget has a significant change.
The event triggers after the internal state of the widget has been
updated.
Arguments:
----------
widget:
The widget that changed
""", arguments=['obj'])
self.events.closed = Event(doc="""
Event that triggers when the widget closed.
The event triggers after the widget has already been closed.
Arguments:
----------
widget:
The widget that closed
""", arguments=['obj'])
self._navigating = False
super(WidgetBase, self).__init__(**kwargs)
def _get_axes(self):
return self._axes
def _set_axes(self, axes):
if axes is None:
self._axes = list()
else:
self._axes = axes
axes = property(lambda s: s._get_axes(),
lambda s, v: s._set_axes(v))
@property
def is_on(self):
"""Determines if the widget is set to draw if valid (turned on).
"""
return self._is_on
def set_on(self, value, render_figure=True):
"""Change the on state of the widget. If turning off, all patches will
be removed from the matplotlib axes and the widget will disconnect from
all events. If turning on, the patch(es) will be added to the
matplotlib axes, and the widget will connect to its default events.
"""
did_something = False
if value is not self.is_on and self.ax is not None:
did_something = True
if value is True:
self._add_patch_to(self.ax)
self.connect(self.ax)
elif value is False:
for container in [
self.ax.patches,
self.ax.lines,
self.ax.artists,
self.ax.texts]:
for p in self.patch:
if p in container:
container.remove(p)
self.disconnect()
if hasattr(super(WidgetBase, self), 'set_on'):
super(WidgetBase, self).set_on(value)
if did_something:
if render_figure:
self.draw_patch()
if value is False:
self.ax = None
self._is_on = value
@property
def color(self):
return self._color
@color.setter
def color(self, color):
self._color = color
for p in self.patch:
p.set_color(self._color)
@property
def alpha(self):
return self._alpha
@alpha.setter
def alpha(self, alpha):
self._alpha = alpha
for p in self.patch:
p.set_alpha(self._alpha)
def _set_patch(self):
"""Create the matplotlib patch(es), and store it in self.patch
"""
if hasattr(super(WidgetBase, self), '_set_patch'):
super(WidgetBase, self)._set_patch()
# Must be provided by the subclass
def _add_patch_to(self, ax):
"""Create and add the matplotlib patches to 'ax'
"""
self.blit = hasattr(ax, 'hspy_fig') and ax.figure.canvas.supports_blit
self._set_patch()
for p in self.patch:
ax.add_artist(p)
p.set_animated(self.blit)
if hasattr(super(WidgetBase, self), '_add_patch_to'):
super(WidgetBase, self)._add_patch_to(ax)
def set_mpl_ax(self, ax):
"""Set the matplotlib Axes that the widget will draw to. If the widget
on state is True, it will also add the patch to the Axes, and connect
to its default events.
"""
if ax is self.ax:
return # Do nothing
# Disconnect from previous axes if set
if self.ax is not None and self.is_on:
self.disconnect()
self.ax = ax
if self.is_on is True:
self._add_patch_to(ax)
self.connect(ax)
ax.figure.canvas.draw_idle()
self.select()
def select(self):
"""
Cause this widget to be the selected widget in its MPL axes. This
assumes that the widget has its patch added to the MPL axes.
"""
if not self.patch or not self.is_on or not self.ax:
return
canvas = self.ax.figure.canvas
# Simulate a pick event
x, y = self.patch[0].get_transform().transform_point((0, 0))
mouseevent = MouseEvent('pick_event', canvas, x, y)
# when the widget is added programatically, mouseevent can be "empty"
if mouseevent.button:
canvas.pick_event(mouseevent, self.patch[0])
self.picked = False
def connect(self, ax):
"""Connect to the matplotlib Axes' events.
"""
on_figure_window_close(ax.figure, self.close)
if self._navigating:
self.connect_navigate()
def connect_navigate(self):
"""Connect to the axes_manager such that changes in the widget or in
the axes_manager are reflected in the other.
"""
if self._navigating:
self.disconnect_navigate()
self.axes_manager.events.indices_changed.connect(
self._on_navigate, {'obj': 'axes_manager'})
self._on_navigate(self.axes_manager) # Update our position
self._navigating = True
def disconnect_navigate(self):
"""Disconnect a previous naivgation connection.
"""
self.axes_manager.events.indices_changed.disconnect(self._on_navigate)
self._navigating = False
def _on_navigate(self, axes_manager):
"""Callback for axes_manager's change notification.
"""
pass # Implement in subclass!
def disconnect(self):
"""Disconnect from all events (both matplotlib and navigation).
"""
for cid in self.cids:
try:
self.ax.figure.canvas.mpl_disconnect(cid)
except BaseException:
pass
if self._navigating:
self.disconnect_navigate()
def close(self, window=None, render_figure=False):
"""Set the on state to off (removes patch and disconnects), and trigger
events.closed.
"""
self.set_on(False, render_figure=render_figure)
self.events.closed.trigger(obj=self)
def draw_patch(self, *args):
"""Update the patch drawing.
"""
try:
if hasattr(self.ax, 'hspy_fig'):
self.ax.hspy_fig.render_figure()
elif self.ax.figure is not None:
self.ax.figure.canvas.draw_idle()
except AttributeError:
pass # When figure is None, typically when closing
def _v2i(self, axis, v):
"""Wrapped version of DataAxis.value2index, which bounds the index
between axis.low_index and axis.high_index+1, and does not raise a
ValueError.
"""
try:
return axis.value2index(v)
except ValueError:
if v > axis.high_value:
return axis.high_index + 1
elif v < axis.low_value:
return axis.low_index
else:
raise
def _i2v(self, axis, i):
"""Wrapped version of DataAxis.index2value, which bounds the value
inbetween axis.low_value and axis.high_value + axis.scale when the axis
is uniform and does not raise a ValueError.
"""
try:
return axis.index2value(i)
except ValueError:
if i > axis.high_index:
if axis.is_uniform:
return axis.high_value + axis.scale
else:
return axis.high_value
elif i < axis.low_index:
return axis.low_value
else:
raise
def __str__(self):
return "{} with id {}".format(self.__class__.__name__, id(self))
class DraggableWidgetBase(WidgetBase):
"""Adds the `position` and `indices` properties, and adds a framework for
letting the user drag the patch around. Also adds the `moved` event.
The default behavior is that `position` snaps to the values corresponding
to the values of the axes grid (i.e. no subpixel values). This behavior
can be controlled by the property `snap_position`.
Any inheritors must override these methods:
_onmousemove(self, event)
_update_patch_position(self)
_set_patch(self)
"""
def __init__(self, axes_manager, **kwargs):
super(DraggableWidgetBase, self).__init__(axes_manager, **kwargs)
self.events.moved = Event(doc="""
Event that triggers when the widget was moved.
The event triggers after the internal state of the widget has been
updated. This event does not differentiate on how the position of
the widget was changed, so it is the responsibility of the user
to suppress events as neccessary to avoid closed loops etc.
Arguments:
----------
obj:
The widget that was moved.
""", arguments=['obj'])
self._snap_position = True
# Set default axes
if self.axes_manager is not None:
if self.axes_manager.navigation_dimension > 0:
self.axes = self.axes_manager.navigation_axes[0:1]
else:
self.axes = self.axes_manager.signal_axes[0:1]
else:
self._pos = np.array([0.])
def _set_axes(self, axes):
super(DraggableWidgetBase, self)._set_axes(axes)
if self.axes:
self._pos = np.array([ax.low_value for ax in self.axes])
def _get_indices(self):
"""Returns a tuple with the position (indices).
"""
idx = []
for i in range(len(self.axes)):
idx.append(self.axes[i].value2index(self._pos[i]))
return tuple(idx)
def _set_indices(self, value):
"""Sets the position of the widget (by indices). The dimensions should
correspond to that of the 'axes' attribute. Calls _pos_changed if the
value has changed, which is then responsible for triggering any
relevant events.
"""
if np.ndim(value) == 0 and len(self.axes) == 1:
self.position = [self.axes[0].index2value(value)]
elif len(self.axes) != len(value):
raise ValueError()
else:
p = []
for i in range(len(self.axes)):
p.append(self.axes[i].index2value(value[i]))
self.position = p
indices = property(lambda s: s._get_indices(),
lambda s, v: s._set_indices(v))
def _pos_changed(self):
"""Call when the position of the widget has changed. It triggers the
relevant events, and updates the patch position.
"""
if self._navigating:
with self.axes_manager.events.indices_changed.suppress_callback(
self._on_navigate):
for i in range(len(self.axes)):
self.axes[i].value = self._pos[i]
self.events.moved.trigger(self)
self.events.changed.trigger(self)
self._update_patch_position()
def _validate_pos(self, pos):
"""Validates the passed position. Depending on the position and the
implementation, this can either fire a ValueError, or return a modified
position that has valid values. Or simply return the unmodified
position if everything is ok.
This default implementation bounds the position within the axes limits.
"""
if len(pos) != len(self.axes):
raise ValueError()
pos = np.maximum(pos, [ax.low_value for ax in self.axes])
pos = np.minimum(pos, [ax.high_value for ax in self.axes])
if self.snap_position:
pos = self._do_snap_position(pos)
return pos
def _get_position(self):
"""Provides the position of the widget (by values) in a tuple.
"""
return tuple(
self._pos.tolist()) # Don't pass reference, and make it clear
def _set_position(self, position):
"""Sets the position of the widget (by values). The dimensions should
correspond to that of the 'axes' attribute. Calls _pos_changed if the
value has changed, which is then responsible for triggering any
relevant events.
"""
position = self._validate_pos(position)
if np.any(self._pos != position):
self._pos = np.array(position)
self._pos_changed()
position = property(lambda s: s._get_position(),
lambda s, v: s._set_position(v))
def _do_snap_position(self, value=None):
"""Snaps position to axes grid. Returns snapped value. If value is
passed as an argument, the internal state is left untouched, if not
the position attribute is updated to the snapped value.
"""
value = np.array(value) if value is not None else self._pos
for i, ax in enumerate(self.axes):
value[i] = ax.index2value(ax.value2index(value[i]))
return value
def _set_snap_position(self, value):
self._snap_position = value
if value:
snap_value = self._do_snap_position(self._pos)
if np.any(self._pos != snap_value):
self._pos = snap_value
self._pos_changed()
snap_position = property(lambda s: s._snap_position,
lambda s, v: s._set_snap_position(v))
def connect(self, ax):
super(DraggableWidgetBase, self).connect(ax)
canvas = ax.figure.canvas
self.cids.append(
canvas.mpl_connect('motion_notify_event', self._onmousemove))
self.cids.append(canvas.mpl_connect('pick_event', self.onpick))
self.cids.append(canvas.mpl_connect(
'button_release_event', self.button_release))
def _on_navigate(self, axes_manager):
if axes_manager is self.axes_manager:
p = self._pos.tolist()
for i, a in enumerate(self.axes):
p[i] = a.value
self.position = p # Use property to trigger events
def onpick(self, event):
# Callback for MPL pick event
self.picked = (event.artist in self.patch)
self._selected_artist = event.artist
if hasattr(super(DraggableWidgetBase, self), 'onpick'):
super(DraggableWidgetBase, self).onpick(event)
self.selected = self.picked
def _onmousemove(self, event):
"""Callback for mouse movement. For dragging, the implementor would
normally check that the widget is picked, and that the event.inaxes
Axes equals self.ax.
"""
# This method must be provided by the subclass
pass
def _update_patch_position(self):
"""Updates the position of the patch on the plot.
"""
# This method must be provided by the subclass
pass
def _update_patch_geometry(self):
"""Updates all geometrical properties of the patch on the plot.
"""
self._update_patch_position()
def button_release(self, event):
"""whenever a mouse button is released."""
if event.button != 1:
return
if self.picked is True:
self.picked = False
class Widget1DBase(DraggableWidgetBase):
"""A base class for 1D widgets.
It sets the right dimensions for size and
position, adds the 'border_thickness' attribute and initalizes the 'axes'
attribute to the first two navigation axes if possible, if not, the two
first signal_axes are used. Other than that, it mainly supplies common
utility functions for inheritors, and implements required functions for
ResizableDraggableWidgetBase.
The implementation for ResizableDraggableWidgetBase methods all assume that
a Rectangle patch will be used, centered on position. If not, the
inheriting class will have to override those as applicable.
"""
def _set_position(self, position):
try:
len(position)
except TypeError:
position = (position,)
super(Widget1DBase, self)._set_position(position)
def _validate_pos(self, pos):
pos = np.maximum(pos, self.axes[0].low_value)
pos = np.minimum(pos, self.axes[0].high_value)
return super(Widget1DBase, self)._validate_pos(pos)
class ResizableDraggableWidgetBase(DraggableWidgetBase):
"""Adds the `size` property and get_size_in_axes method, and adds a
framework for letting the user resize the patch, including resizing by
key strokes ('+', '-'). Also adds the 'resized' event.
Utility functions for resizing are implemented by `increase_size` and
`decrease_size`, which will in-/decrement the size by 1. Other utility
functions include `get_centre` and `get_centre_indices` which returns the
center position, and the internal _apply_changes which helps make sure that
only one 'changed' event is fired for a combined move and resize.
Any inheritors must override these methods:
_update_patch_position(self)
_update_patch_size(self)
_update_patch_geometry(self)
_set_patch(self)
"""
def __init__(self, axes_manager, **kwargs):
super(ResizableDraggableWidgetBase, self).__init__(
axes_manager, **kwargs)
if not self.axes:
self._size = np.array([1])
self.size_step = 1 # = one step in index space
self._snap_size = True
self.events.resized = Event(doc="""
Event that triggers when the widget was resized.
The event triggers after the internal state of the widget has been
updated. This event does not differentiate on how the size of
the widget was changed, so it is the responsibility of the user
to suppress events as neccessary to avoid closed loops etc.
Arguments:
----------
obj:
The widget that was resized.
""", arguments=['obj'])
self.no_events_while_dragging = False
self._drag_store = None
def _set_axes(self, axes):
super(ResizableDraggableWidgetBase, self)._set_axes(axes)
if self.axes:
self._size = np.array([self._get_step(ax) for ax in self.axes])
def _get_step(self, axis):
# TODO: need to check if this is working fine, particularly with
""" Use to determine the size of the widget with support for non
uniform axis.
"""
if axis.index >= axis.size - 1:
return axis.index2value(axis.index) - axis.index2value(axis.index - 1)
else:
return axis.index2value(axis.index + 1) - axis.index2value(axis.index)
def _get_size(self):
"""Getter for 'size' property. Returns the size as a tuple (to prevent
unintended in-place changes).
"""
return tuple(self._size.tolist())
def _set_size(self, value):
"""Setter for the 'size' property.
Calls _size_changed to handle size change, if the value has changed.
"""
value = np.minimum(value, [ax.size * ax.scale for ax in self.axes])
value = np.maximum(value,
self.size_step * [ax.scale for ax in self.axes])
if self.snap_size:
value = self._do_snap_size(value)
if np.any(self._size != value):
self._size = value
self._size_changed()
size = property(lambda s: s._get_size(), lambda s, v: s._set_size(v))
def _do_snap_size(self, value=None):
value = np.array(value) if value is not None else self._size
for i, ax in enumerate(self.axes):
value[i] = round(value[i] / ax.scale) * ax.scale
return value
def _set_snap_size(self, value):
self._snap_size = value
if value:
snap_value = self._do_snap_size(self._size)
if np.any(self._size != snap_value):
self._size = snap_value
self._size_changed()
snap_size = property(lambda s: s._snap_size,
lambda s, v: s._set_snap_size(v))
def _set_snap_all(self, value):
# Snap position first, as snapped size can depend on position.
self.snap_position = value
self.snap_size = value
snap_all = property(lambda s: s.snap_size and s.snap_position,
lambda s, v: s._set_snap_all(v))
def increase_size(self):
"""Increment all sizes by 1. Applied via 'size' property.
"""
self.size = np.array(self.size) + \
self.size_step * np.array([a.scale for a in self.axes])
def decrease_size(self):
"""Decrement all sizes by 1. Applied via 'size' property.
"""
self.size = np.array(self.size) - \
self.size_step * np.array([a.scale for a in self.axes])
def _size_changed(self):
"""Triggers resize and changed events, and updates the patch.
"""
self.events.resized.trigger(self)
self.events.changed.trigger(self)
self._update_patch_size()
def get_size_in_indices(self):
"""Gets the size property converted to the index space (via 'axes'
attribute).
"""
s = list()
for i in range(len(self.axes)):
s.append(int(round(self._size[i] / self.axes[i].scale)))
return np.array(s)
def set_size_in_indices(self, value):
"""Sets the size property converted to the index space (via 'axes'
attribute).
"""
s = list()
for i in range(len(self.axes)):
s.append(int(round(value[i] * self.axes[i].scale)))
self.size = s # Use property to get full processing
def get_centre(self):
"""Gets the center indices. The default implementation is simply the
position + half the size in axes space, which should work for any
symmetric widget, but more advanced widgets will need to decide whether
to return the center of gravity or the geometrical center of the
bounds.
"""
return self._pos + self._size() / 2.0
def get_centre_index(self):
"""Gets the center position (in index space). The default
implementation is simply the indices + half the size, which should
work for any symmetric widget, but more advanced widgets will need to
decide whether to return the center of gravity or the geometrical
center of the bounds.
"""
return self.indices + self.get_size_in_indices() / 2.0
def _update_patch_size(self):
"""Updates the size of the patch on the plot.
"""
# This method must be provided by the subclass
pass
def _update_patch_geometry(self):
"""Updates all geometry of the patch on the plot.
"""
# This method must be provided by the subclass
pass
def on_key_press(self, event):
if event.key == "+":
self.increase_size()
if event.key == "-":
self.decrease_size()
def connect(self, ax):
super(ResizableDraggableWidgetBase, self).connect(ax)
canvas = ax.figure.canvas
self.cids.append(canvas.mpl_connect('key_press_event',
self.on_key_press))
def onpick(self, event):
if hasattr(super(ResizableDraggableWidgetBase, self), 'onpick'):
super(ResizableDraggableWidgetBase, self).onpick(event)
if self.picked:
self._drag_store = (self.position, self.size)
def _apply_changes(self, old_size, old_position):
"""Evaluates whether the widget has been moved/resized, and triggers
the correct events and updates the patch geometry. This function has
the advantage that the geometry is updated only once, preventing
flickering, and the 'changed' event only fires once.
"""
moved = self.position != old_position
resized = self.size != old_size
if moved:
if self._navigating:
e = self.axes_manager.events.indices_changed
with e.suppress_callback(self._on_navigate):
for i in range(len(self.axes)):
self.axes[i].index = self.indices[i]
if moved or resized:
# Update patch first
if moved and resized:
self._update_patch_geometry()
elif moved:
self._update_patch_position()
else:
self._update_patch_size()
# Then fire events
if not self.no_events_while_dragging or not self.picked:
if moved:
self.events.moved.trigger(self)
if resized:
self.events.resized.trigger(self)
self.events.changed.trigger(self)
def button_release(self, event):
"""whenever a mouse button is released."""
picked = self.picked
super(ResizableDraggableWidgetBase, self).button_release(event)
if event.button != 1:
return
if picked and self.picked is False:
if self.no_events_while_dragging and self._drag_store:
self._apply_changes(*self._drag_store)
class Widget2DBase(ResizableDraggableWidgetBase):
"""A base class for 2D widgets. It sets the right dimensions for size and
position, adds the 'border_thickness' attribute and initalizes the 'axes'
attribute to the first two navigation axes if possible, if not, the two
first signal_axes are used. Other than that it mainly supplies common
utility functions for inheritors, and implements required functions for
ResizableDraggableWidgetBase.
The implementation for ResizableDraggableWidgetBase methods all assume that
a Rectangle patch will be used, centered on position. If not, the
inheriting class will have to override those as applicable.
"""
def __init__(self, axes_manager, **kwargs):
super(Widget2DBase, self).__init__(axes_manager, **kwargs)
self.border_thickness = 2
# Set default axes
if self.axes_manager is not None:
if self.axes_manager.navigation_dimension > 1:
self.axes = self.axes_manager.navigation_axes[0:2]
elif self.axes_manager.signal_dimension > 1:
self.axes = self.axes_manager.signal_axes[0:2]
elif len(self.axes_manager.shape) > 1:
self.axes = (self.axes_manager.signal_axes +
self.axes_manager.navigation_axes)
else:
raise ValueError("2D widget needs at least two axes!")
else:
self._pos = np.array([0, 0])
self._size = np.array([1, 1])
def _get_patch_xy(self):
"""Returns the xy position of the widget. In this default
implementation, the widget is centered on the position.
"""
return self._pos - self._size / 2.
def _get_patch_bounds(self):
"""Returns the bounds of the patch in the form of a tuple in the order
left, top, width, height. In matplotlib, 'bottom' is used instead of
'top' as the naming assumes an upwards pointing y-axis, meaning the
lowest value corresponds to bottom. However, our widgets will normally
only go on images (which has an inverted y-axis in MPL by default), so
we define the lowest value to be termed 'top'.
"""
xy = self._get_patch_xy()
xs, ys = self.size
return (xy[0], xy[1], xs, ys) # x,y,w,h
def _update_patch_position(self):
if self.is_on and self.patch:
self.patch[0].set_xy(self._get_patch_xy())
self.draw_patch()
def _update_patch_size(self):
self._update_patch_geometry()
def _update_patch_geometry(self):
if self.is_on and self.patch:
self.patch[0].set_bounds(*self._get_patch_bounds())
self.draw_patch()
class ResizersMixin(object):
"""
Widget mix-in for adding resizing manipulation handles.
The default handles are green boxes displayed on the outside corners of the
boundaries. By default, the handles are only displayed when the widget is
selected (`picked` in matplotlib terminology).
Attributes
----------
resizers : bool
Property that determines whether the resizer handles should be used
resize_color : matplotlib color
The color of the resize handles.
resize_pixel_size : tuple or None
Size of the resize handles in screen pixels. If None, it is set
equal to the size of one 'data-pixel' (image pixel size).
resizer_picked : False or int
Inidcates which, if any, resizer was selected the last time the
widget was picked. `False` if another patch was picked, or the
index of the resizer handle that was picked.
"""
def __init__(self, resizers=True, **kwargs):
super(ResizersMixin, self).__init__(**kwargs)
self.resizer_picked = False
self.pick_offset = (0, 0)
self.resize_color = 'lime'
pick_tol = preferences.Plot.pick_tolerance
self.resize_pixel_size = (pick_tol, pick_tol) # Set to None to make one data pixel
self._resizers = resizers
self._resizer_handles = []
self._resizers_on = False
# The `_resizers_on` attribute reflects whether handles are actually on
# as compared to `_resizers` which is whether the user wants them on.
# The difference is e.g. for turning on and off handles when the
# widget is selected/deselected.
@property
def resizers(self):
return self._resizers
@resizers.setter
def resizers(self, value):
if self._resizers != value:
self._resizers = value
self._set_resizers(value, self.ax)
def _update_resizers(self):
"""Update resizer handles' patch geometry.
"""
pos = self._get_resizer_pos()
rsize = self._get_resizer_size()
for i, r in enumerate(self._resizer_handles):
r.set_xy(pos[i])
r.set_width(rsize[0])
r.set_height(rsize[1])
def _set_resizers(self, value, ax):
"""Turns the resizers on/off, in much the same way that _set_patch
works.
"""
if ax is not None:
if value:
for r in self._resizer_handles:
ax.add_artist(r)
r.set_animated(self.blit)
else:
for container in [
ax.patches,
ax.lines,
ax.artists,
ax.texts]:
for r in self._resizer_handles:
if r in container:
container.remove(r)
self._resizers_on = value
def _get_resizer_size(self):
"""Gets the size of the resizer handles in axes coordinates. If
'resize_pixel_size' is None, a size of one pixel will be used.
"""
invtrans = self.ax.transData.inverted()
if self.resize_pixel_size is None:
rsize = [ax.scale for ax in self.axes]
else:
rsize = np.abs(invtrans.transform(self.resize_pixel_size) -
invtrans.transform((0, 0)))
return rsize
def _get_resizer_offset(self):
"""Utility for getting the distance from the boundary box to the
center of the resize handles.
"""
invtrans = self.ax.transData.inverted()
border = self.border_thickness
# Transform the border thickness into data values
dl = np.abs(invtrans.transform((border, border)) -
invtrans.transform((0, 0))) / 2
rsize = self._get_resizer_size()
return rsize / 2 + dl
def _get_resizer_pos(self):
"""Get the positions of the resizer handles.
"""
invtrans = self.ax.transData.inverted()
border = self.border_thickness
# Transform the border thickness into data values
dl = np.abs(invtrans.transform((border, border)) -
invtrans.transform((0, 0))) / 2
rsize = self._get_resizer_size()
xs, ys = self._size
positions = []
rp = np.array(self._get_patch_xy())
p = rp - rsize + dl # Top left
positions.append(p)
p = rp + (xs - dl[0], -rsize[1] + dl[1]) # Top right
positions.append(p)
p = rp + (-rsize[0] + dl[0], ys - dl[1]) # Bottom left
positions.append(p)
p = rp + (xs - dl[0], ys - dl[1]) # Bottom right
positions.append(p)
return positions
def _set_patch(self):
"""Creates the resizer handles, regardless of whether they will be
used or not.
"""
if hasattr(super(ResizersMixin, self), '_set_patch'):
super(ResizersMixin, self)._set_patch()
if self._resizer_handles:
self._set_resizers(False, self.ax)
self._resizer_handles = []
rsize = self._get_resizer_size()
pos = self._get_resizer_pos()
for i in range(len(pos)):
r = plt.Rectangle(pos[i], rsize[0], rsize[1], fill=True, lw=0,
fc=self.resize_color, picker=True,)
self._resizer_handles.append(r)
def set_on(self, value):
"""Turns on/off resizers whet widget is turned on/off.
"""
if self.resizers and value != self._resizers_on:
self._set_resizers(value, self.ax)
if hasattr(super(ResizersMixin, self), 'set_on'):
super(ResizersMixin, self).set_on(value)
def onpick(self, event):
"""Picking of main patch is same as for widget base, but this also
handles picking of the resize handles. If a resize handle is picked,
`picked` is set to `True`, and `resizer_picked` is set to an integer
indicating which handle was picked (0-3 for top left, top right, bottom
left, bottom right). It is set to `False` if another widget was picked.
If the main patch is picked, the offset from the picked pixel to the
`position` is stored in `pick_offset`. This can be used in e.g.
`_onmousemove` to ease dragging code (prevent widget center/corner
snapping to mouse).
"""
if event.artist in self._resizer_handles:
corner = self._resizer_handles.index(event.artist)
self.resizer_picked = corner
self.picked = True
elif self.picked:
if self.resizers and not self._resizers_on:
self._set_resizers(True, self.ax)
self.ax.figure.canvas.draw_idle()
x = event.mouseevent.xdata
y = event.mouseevent.ydata
self.pick_offset = (x - self._pos[0], y - self._pos[1])
self.resizer_picked = False
else:
self._set_resizers(False, self.ax)
if hasattr(super(ResizersMixin, self), 'onpick'):
super(ResizersMixin, self).onpick(event)
def _add_patch_to(self, ax):
"""Same as widget base, but also adds resizers if 'resizers' property
is True.
"""
if self.resizers:
self._set_resizers(True, ax)
if hasattr(super(ResizersMixin, self), '_add_patch_to'):
super(ResizersMixin, self)._add_patch_to(ax)
| gpl-3.0 |
mne-tools/mne-tools.github.io | 0.14/_downloads/plot_shift_evoked.py | 26 | 1200 | """
==================================
Shifting time-scale in evoked data
==================================
"""
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import tight_layout
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# Reading evoked data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname, condition=condition, baseline=(None, 0),
proj=True)
ch_names = evoked.info['ch_names']
picks = mne.pick_channels(ch_names=ch_names, include=["MEG 2332"])
# Create subplots
f, (ax1, ax2, ax3) = plt.subplots(3)
evoked.plot(exclude=[], picks=picks, axes=ax1,
titles=dict(grad='Before time shifting'))
# Apply relative time-shift of 500 ms
evoked.shift_time(0.5, relative=True)
evoked.plot(exclude=[], picks=picks, axes=ax2,
titles=dict(grad='Relative shift: 500 ms'))
# Apply absolute time-shift of 500 ms
evoked.shift_time(0.5, relative=False)
evoked.plot(exclude=[], picks=picks, axes=ax3,
titles=dict(grad='Absolute shift: 500 ms'))
tight_layout()
| bsd-3-clause |
CforED/Machine-Learning | sklearn/preprocessing/data.py | 9 | 67092 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be in ``range(n_values[i])``
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
nborggren/zipline | zipline/utils/data.py | 1 | 15731 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import datetime
from collections import MutableMapping
from copy import deepcopy
try:
from six.moves._thread import get_ident
except ImportError:
from six.moves._dummy_thread import get_ident
import numpy as np
import pandas as pd
from toolz import merge
def _ensure_index(x):
if not isinstance(x, pd.Index):
x = pd.Index(sorted(x))
return x
class RollingPanel(object):
"""
Preallocation strategies for rolling window over expanding data set
Restrictions: major_axis can only be a DatetimeIndex for now
"""
def __init__(self,
window,
items,
sids,
cap_multiple=2,
dtype=np.float64,
initial_dates=None):
self._pos = window
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.dtype = dtype
if initial_dates is None:
self.date_buf = np.empty(self.cap, dtype='M8[ns]') * pd.NaT
elif len(initial_dates) != window:
raise ValueError('initial_dates must be of length window')
else:
self.date_buf = np.hstack(
(
initial_dates,
np.empty(
window * (cap_multiple - 1),
dtype='datetime64[ns]',
),
),
)
self.buffer = self._create_buffer()
@property
def cap(self):
return self.cap_multiple * self._window
@property
def _start_index(self):
return self._pos - self._window
@property
def start_date(self):
return self.date_buf[self._start_index]
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._start_index, :]
return self.buffer.iloc[:, self._start_index, :]
def set_minor_axis(self, minor_axis):
self.minor_axis = _ensure_index(minor_axis)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def set_items(self, items):
self.items = _ensure_index(items)
self.buffer = self.buffer.reindex(items=self.items)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def extend_back(self, missing_dts):
"""
Resizes the buffer to hold a new window with a new cap_multiple.
If cap_multiple is None, then the old cap_multiple is used.
"""
delta = len(missing_dts)
if not delta:
raise ValueError(
'missing_dts must be a non-empty index',
)
self._window += delta
self._pos += delta
self.date_buf = self.date_buf.copy()
self.date_buf.resize(self.cap)
self.date_buf = np.roll(self.date_buf, delta)
old_vals = self.buffer.values
shape = old_vals.shape
nan_arr = np.empty((shape[0], delta, shape[2]))
nan_arr.fill(np.nan)
new_vals = np.column_stack(
(nan_arr,
old_vals,
np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))),
)
self.buffer = pd.Panel(
data=new_vals,
items=self.items,
minor_axis=self.minor_axis,
major_axis=np.arange(self.cap),
dtype=self.dtype,
)
# Fill the delta with the dates we calculated.
where = slice(self._start_index, self._start_index + delta)
self.date_buf[where] = missing_dts
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
values = frame
if isinstance(frame, pd.DataFrame):
values = frame.values
self.buffer.values[:, self._pos, :] = values.astype(self.dtype)
self.date_buf[self._pos] = tick
self._pos += 1
def get_current(self, item=None, raw=False, start=None, end=None):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
item_indexer = slice(None)
if item:
item_indexer = self.items.get_loc(item)
start_index = self._start_index
end_index = self._pos
# get inital date window
where = slice(start_index, end_index)
current_dates = self.date_buf[where]
def convert_datelike_to_long(dt):
if isinstance(dt, pd.Timestamp):
return dt.asm8
if isinstance(dt, datetime.datetime):
return np.datetime64(dt)
return dt
# constrict further by date
if start:
start = convert_datelike_to_long(start)
start_index += current_dates.searchsorted(start)
if end:
end = convert_datelike_to_long(end)
_end = current_dates.searchsorted(end, 'right')
end_index -= len(current_dates) - _end
where = slice(start_index, end_index)
values = self.buffer.values[item_indexer, where, :]
current_dates = self.date_buf[where]
if raw:
# return copy so we can change it without side effects here
return values.copy()
major_axis = pd.DatetimeIndex(deepcopy(current_dates), tz='utc')
if values.ndim == 3:
return pd.Panel(values, self.items, major_axis, self.minor_axis,
dtype=self.dtype)
elif values.ndim == 2:
return pd.DataFrame(values, major_axis, self.minor_axis,
dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._start_index, self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._start_index, self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
@property
def window_length(self):
return self._window
class MutableIndexRollingPanel(object):
"""
A version of RollingPanel that exists for backwards compatibility with
batch_transform. This is a copy to allow behavior of RollingPanel to drift
away from this without breaking this class.
This code should be considered frozen, and should not be used in the
future. Instead, see RollingPanel.
"""
def __init__(self, window, items, sids, cap_multiple=2, dtype=np.float64):
self._pos = 0
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.cap = cap_multiple * window
self.dtype = dtype
self.date_buf = np.empty(self.cap, dtype='M8[ns]')
self.buffer = self._create_buffer()
def _oldest_frame_idx(self):
return max(self._pos - self._window, 0)
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._oldest_frame_idx(), :]
return self.buffer.iloc[:, self._oldest_frame_idx(), :]
def set_sids(self, sids):
self.minor_axis = _ensure_index(sids)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def get_current(self):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
where = slice(self._oldest_frame_idx(), self._pos)
major_axis = pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
return pd.Panel(self.buffer.values[:, where, :], self.items,
major_axis, self.minor_axis, dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._oldest_frame_idx(), self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._oldest_frame_idx(), self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
if isinstance(frame, pd.DataFrame):
minor_axis = frame.columns
items = frame.index
if set(minor_axis).difference(set(self.minor_axis)) or \
set(items).difference(set(self.items)):
self._update_buffer(frame)
vals = frame.T.astype(self.dtype)
self.buffer.loc[:, self._pos, :] = vals
self.date_buf[self._pos] = tick
self._pos += 1
def _update_buffer(self, frame):
# Get current frame as we only need to care about the data that is in
# the active window
old_buffer = self.get_current()
if self._pos >= self._window:
# Don't count the last major_axis entry if we're past our window,
# since it's about to roll off the end of the panel.
old_buffer = old_buffer.iloc[:, 1:, :]
nans = pd.isnull(old_buffer)
# Find minor_axes that have only nans
# Note that minor is axis 2
non_nan_cols = set(old_buffer.minor_axis[~np.all(nans, axis=(0, 1))])
# Determine new columns to be added
new_cols = set(frame.columns).difference(non_nan_cols)
# Update internal minor axis
self.minor_axis = _ensure_index(new_cols.union(non_nan_cols))
# Same for items (fields)
# Find items axes that have only nans
# Note that items is axis 0
non_nan_items = set(old_buffer.items[~np.all(nans, axis=(1, 2))])
new_items = set(frame.index).difference(non_nan_items)
self.items = _ensure_index(new_items.union(non_nan_items))
# :NOTE:
# There is a simpler and 10x faster way to do this:
#
# Reindex buffer to update axes (automatically adds nans)
# self.buffer = self.buffer.reindex(items=self.items,
# major_axis=np.arange(self.cap),
# minor_axis=self.minor_axis)
#
# However, pandas==0.12.0, for which we remain backwards compatible,
# has a bug in .reindex() that this triggers. Using .update() as before
# seems to work fine.
new_buffer = self._create_buffer()
new_buffer.update(
self.buffer.loc[non_nan_items, :, non_nan_cols])
self.buffer = new_buffer
class SortedDict(MutableMapping):
"""A mapping of key-value pairs sorted by key according to the sort_key
function provided to the mapping. Ties from the sort_key are broken by
comparing the original keys. `iter` traverses the keys in sort order.
Parameters
----------
key : callable
Called on keys in the mapping to produce the values by which those keys
are sorted.
mapping : mapping, optional
**kwargs
The initial mapping.
>>> d = SortedDict(abs)
>>> d[-1] = 'negative one'
>>> d[0] = 'zero'
>>> d[2] = 'two'
>>> d # doctest: +NORMALIZE_WHITESPACE
SortedDict(<built-in function abs>,
[(0, 'zero'), (-1, 'negative one'), (2, 'two')])
>>> d[1] = 'one' # Mutating the mapping maintains the sort order.
>>> d # doctest: +NORMALIZE_WHITESPACE
SortedDict(<built-in function abs>,
[(0, 'zero'), (-1, 'negative one'), (1, 'one'), (2, 'two')])
>>> del d[0]
>>> d # doctest: +NORMALIZE_WHITESPACE
SortedDict(<built-in function abs>,
[(-1, 'negative one'), (1, 'one'), (2, 'two')])
>>> del d[2]
>>> d
SortedDict(<built-in function abs>, [(-1, 'negative one'), (1, 'one')])
"""
def __init__(self, key, mapping=None, **kwargs):
self._map = {}
self._sorted_key_names = []
self._sort_key = key
self.update(merge(mapping or {}, kwargs))
def __getitem__(self, name):
return self._map[name]
def __setitem__(self, name, value, _bisect_right=bisect.bisect_right):
self._map[name] = value
if len(self._map) > len(self._sorted_key_names):
key = self._sort_key(name)
pair = (key, name)
idx = _bisect_right(self._sorted_key_names, pair)
self._sorted_key_names.insert(idx, pair)
def __delitem__(self, name, _bisect_left=bisect.bisect_left):
del self._map[name]
idx = _bisect_left(self._sorted_key_names,
(self._sort_key(name), name))
del self._sorted_key_names[idx]
def __iter__(self):
for key, name in self._sorted_key_names:
yield name
def __len__(self):
return len(self._map)
def __repr__(self, _repr_running={}):
# Based on OrderedDict/defaultdict
call_key = id(self), get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s(%r)' % (self.__class__.__name__, self._sort_key)
return '%s(%r, %r)' % (self.__class__.__name__, self._sort_key,
list(self.items()))
finally:
del _repr_running[call_key]
| apache-2.0 |
JonasWallin/Mixture | example/NIGmultid.py | 1 | 1248 | '''
Testing if the model can recover the parameter for multi univariate
regular NIG density
Created on May 1, 2016
@author: jonaswallin
'''
from Mixture.density import NIG, mNIG
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
n = 1000
if __name__ == "__main__":
simObj = NIG(paramvec = [1.1, 2.12, 1., 2.])
Y_1 = simObj.simulate(n = n)
simObj2 = NIG(paramvec = [0, 2.12, .1, .1])
Y_2 = simObj2.simulate(n = n)
f, axarr = plt.subplots(2, 1)
axarr[0].hist(Y_1, 200,normed=True, histtype='stepfilled', alpha=0.2)
axarr[1].hist(Y_2, 200,normed=True, histtype='stepfilled', alpha=0.2)
#
multiObj = mNIG(d = 2)
multiObj.set_data(np.vstack((Y_1, Y_2)).transpose())
def f(x):
lik = - np.sum(multiObj(paramvec= x))
if np.isnan(lik):
return np.Inf
return lik
optim = sp.optimize.minimize(f, np.zeros(8),method='CG')
multiObj.set_param_vec(optim.x)
x_1 =np.linspace(np.min(Y_1),np.max(Y_1), num = 1000)
x_2 =np.linspace(np.min(Y_2),np.max(Y_2), num = 1000)
x = np.vstack((x_1, x_2)).transpose()
dens = multiObj.dens_dim(y = x, log_ = False)
axarr[0].plot(x_1, dens[:,0])
axarr[1].plot(x_2, dens[:,1])
plt.show() | gpl-3.0 |
USBhost/Simple-Stream | scripts/tracing/dma-api/trace.py | 96 | 12420 | """Main program and stuff"""
#from pprint import pprint
from sys import stdin
import os.path
import re
from argparse import ArgumentParser
import cPickle as pickle
from collections import namedtuple
from plotting import plotseries, disp_pic
import smmu
class TracelineParser(object):
"""Parse the needed information out of an ftrace line"""
# <...>-6 [000] d..2 5.287079: dmadebug_iommu_map_page: device=sdhci-tegra.3, addr=0x01048000, size=4096 page=c13e7214 archdata=ed504640
def __init__(self):
self.pattern = re.compile("device=(?P<dev>.*), addr=(?P<addr>.*), size=(?P<size>.*) page=(?P<page>.*) archdata=(?P<archdata>.*)")
def parse(self, args):
args = self.pattern.match(args)
return (args.group("dev"), int(args.group("addr"), 16),
int(args.group("size")), int(args.group("page"), 16),
int(args.group("archdata"), 16))
def biggest_indices(items, n):
"""Return list of indices of n biggest elements in items"""
with_indices = [(x, i) for i, x in enumerate(items)]
ordered = sorted(with_indices)
return [i for x, i in ordered[-n:]]
def by_indices(xs, ids):
"""Get elements from the list xs by their indices"""
return [xs[i] for i in ids]
"""Event represents one input line"""
Event = namedtuple("Event", ["time", "dev", "data", "delta"])
class Trace(object):
def __init__(self, args):
smmu.VERBOSITY = args.verbosity
self._args = args
self.devlist = []
self.events = []
self.metrics = {
"max_peak": self._usage_peak,
"activity_rate": self._usage_activity,
"average_mem": self._usage_avg
}
self.traceliner = TracelineParser()
@staticmethod
def get_metrics():
"""What filter metrics to get max users"""
return ["max_peak", "activity_rate", "average_mem"]
def show(self):
"""Shuffle events around, build plots, and show them"""
if self._args.max_plots:
evs = self.merge_events()
else:
evs = self.events
series, devlist = self.unload(evs)
if not self._args.no_plots:
self.plot(series, devlist)
def _get_usage(self, evs):
"""Return a metric of how active the events in evs are"""
return self.metrics[self._args.max_metric](evs)
def _usage_peak(self, evs):
"""Return the biggest peak"""
return max(e.data for e in evs)
def _usage_activity(self, evs):
"""Return the activity count: simply the length of the event list"""
return len(evs)
def _usage_avg(self, evs):
"""Return the average over all points"""
# FIXME: the data points are not uniform in time, so this might be
# somewhat off.
return float(sum(e.data for e in evs)) / len(e)
def merge_events(self):
"""Find out biggest users, keep them and flatten others to a single user"""
sizes = []
dev_evs = []
for i, dev in enumerate(self.devlist):
dev_evs.append([e for e in self.events if e.dev == dev])
sizes.append(self._get_usage(dev_evs[i]))
# indices of the devices
biggestix = biggest_indices(sizes, self._args.max_plots)
print biggestix
is_big = {}
for i, dev in enumerate(self.devlist):
is_big[dev] = i in biggestix
evs = []
for e in self.events:
if not is_big[e.dev]:
e = Event(e.time, "others", e.data, e.delta)
evs.append(e)
self.devlist.append("others")
return evs
def unload(self, events):
"""Prepare the event list for plotting
series ends up as [([time0], [data0]), ([time1], [data1]), ...]
"""
# ([x], [y]) for matplotlib
series = [([], []) for x in self.devlist]
devidx = dict([(d, i) for i, d in enumerate(self.devlist)])
for event in events:
devid = devidx[event.dev]
series[devid][0].append(event.time)
series[devid][1].append(event.data) # self.dev_data(event.dev))
series_out = []
devlist_out = []
for ser, dev in zip(series, self.devlist):
if len(ser[0]) > 0:
series_out.append(ser)
devlist_out.append(dev)
return series_out, devlist_out
def plot(self, series, devlist):
"""Display the plots"""
#series, devlist = flatten_axes(self.series, self.devlist,
# self._args.max_plots)
devinfo = (series, map(str, devlist))
allocfreeinfo = (self.allocsfrees, ["allocd", "freed", "current"])
plotseries(devinfo, allocfreeinfo)
#plotseries(devinfo)
def dev_data(self, dev):
"""what data to plot against time"""
return dev._cur_alloc
def _cache_hash(self, filename):
"""The trace files are probably not of the same size"""
return str(os.path.getsize(filename))
def load_cache(self):
"""Get the trace data from a database file, if one exists"""
has = self._cache_hash(self._args.filename)
try:
cache = open("trace." + has)
except IOError:
pass
else:
self._load_cache(pickle.load(cache))
return True
return False
def save_cache(self):
"""Store the raw trace data to a database"""
data = self._save_cache()
fh = open("trace." + self._cache_hash(self._args.filename), "w")
pickle.dump(data, fh)
def _save_cache(self):
"""Return the internal data that is needed to be pickled"""
return self.events, self.devlist, self.allocsfrees
def _load_cache(self, data):
"""Get the data from an unpickled object"""
self.events, self.devlist, self.allocsfrees = data
def load_events(self):
"""Get the internal data from a trace file or cache"""
if self._args.filename:
if self._args.cache and self.load_cache():
return
fh = open(self._args.filename)
else:
fh = stdin
self.parse(fh)
if self._args.cache and self._args.filename:
self.save_cache()
def parse(self, fh):
"""Parse the trace file in fh, store data to self"""
mems = {}
dev_by_name = {}
devlist = []
buf_owners = {}
events = []
allocsfrees = [([], []), ([], []), ([], [])] # allocs, frees, current
allocs = 0
frees = 0
curbufs = 0
mem_bytes = 1024 * 1024 * 1024
npages = mem_bytes / 4096
ncols = 512
le_pic = [0] * npages
lastupd = 0
for lineidx, line in enumerate(fh):
# no comments
if line.startswith("#"):
continue
taskpid, cpu, flags, timestamp, func, args = line.strip().split(None, 5)
func = func[:-len(":")]
# unneeded events may be there too
if not func.startswith("dmadebug"):
continue
if self._args.verbosity >= 3:
print line.rstrip()
timestamp = float(timestamp[:-1])
if timestamp < self._args.start:
continue
if timestamp >= self._args.end:
break
devname, addr, size, page, archdata = self.traceliner.parse(args)
if self._args.processes:
devname = taskpid.split("-")[0]
mapping = archdata
try:
memmap = mems[mapping]
except KeyError:
memmap = mem(mapping)
mems[mapping] = memmap
try:
dev = dev_by_name[devname]
except KeyError:
dev = smmu.Device(devname, memmap)
dev_by_name[devname] = dev
devlist.append(dev)
allocfuncs = ["dmadebug_map_page", "dmadebug_map_sg", "dmadebug_alloc_coherent"]
freefuncs = ["dmadebug_unmap_page", "dmadebug_unmap_sg", "dmadebug_free_coherent"]
ignfuncs = []
if timestamp-lastupd > 0.1:
# just some debug prints for now
lastupd = timestamp
print lineidx,timestamp
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
#disp_pic(le_pic2)
# animating the bitmap would be cool
#for row in le_pic:
# for i, a in enumerate(row):
# pass
#row[i] = 0.09 * a
if func in allocfuncs:
pages = dev_by_name[devname].alloc(addr, size)
for p in pages:
le_pic[p] = 1
buf_owners[addr] = dev_by_name[devname]
allocs += 1
curbufs += 1
allocsfrees[0][0].append(timestamp)
allocsfrees[0][1].append(allocs)
elif func in freefuncs:
if addr not in buf_owners:
if self._args.verbosity >= 1:
print "warning: %s unmapping unmapped %s" % (dev, addr)
buf_owners[addr] = dev
# fixme: move this to bitmap handling
# get to know the owners of bits
# allocs/frees calls should be traced separately from maps?
# map_pages is traced per page :(
if buf_owners[addr] != dev and self._args.verbosity >= 2:
print "note: %s unmapping [%d,%d) mapped by %s" % (
dev, addr, addr+size, buf_owners[addr])
pages = buf_owners[addr].free(addr, size)
for p in pages:
le_pic[p] = 0
frees -= 1
curbufs -= 1
allocsfrees[1][0].append(timestamp)
allocsfrees[1][1].append(frees)
elif func not in ignfuncs:
raise ValueError("unhandled %s" % func)
allocsfrees[2][0].append(timestamp)
allocsfrees[2][1].append(curbufs)
events.append(Event(timestamp, dev, self.dev_data(dev), size))
self.events = events
self.devlist = devlist
self.allocsfrees = allocsfrees
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
# FIXME: not quite ready yet
disp_pic(le_pic2)
return
def mem(asid):
"""Create a new memory object for the given asid space"""
SZ_2G = 2 * 1024 * 1024 * 1024
SZ_1M = 1 * 1024 * 1024
# arch/arm/mach-tegra/include/mach/iomap.h TEGRA_SMMU_(BASE|SIZE)
base = 0x80000000
size = SZ_2G - SZ_1M
return smmu.Memory(base, size, asid)
def get_args():
"""Eat command line arguments, return argparse namespace for settings"""
parser = ArgumentParser()
parser.add_argument("filename", nargs="?",
help="trace file dump, stdin if not given")
parser.add_argument("-s", "--start", type=float, default=0,
help="start timestamp")
parser.add_argument("-e", "--end", type=float, default=1e9,
help="end timestamp")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="amount of extra information: once for warns (dup addrs), "
"twice for notices (different client in map/unmap), "
"three for echoing all back")
parser.add_argument("-p", "--processes", action="store_true",
help="use processes as memory clients instead of devices")
parser.add_argument("-n", "--no-plots", action="store_true",
help="Don't draw the plots, only read the trace")
parser.add_argument("-c", "--cache", action="store_true",
help="Pickle the data and make a cache file for fast reloading")
parser.add_argument("-m", "--max-plots", type=int,
help="Maximum number of clients to show; show biggest and sum others")
parser.add_argument("-M", "--max-metric", choices=Trace.get_metrics(),
default=Trace.get_metrics()[0],
help="Metric to use when choosing clients in --max-plots")
return parser.parse_args()
def main():
args = get_args()
trace = Trace(args)
trace.load_events()
trace.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
trankmichael/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
beiko-lab/gengis | bin/Lib/site-packages/matplotlib/animation.py | 4 | 41616 | # TODO:
# * Loop Delay is broken on GTKAgg. This is because source_remove() is not
# working as we want. PyGTK bug?
# * Documentation -- this will need a new section of the User's Guide.
# Both for Animations and just timers.
# - Also need to update http://www.scipy.org/Cookbook/Matplotlib/Animations
# * Blit
# * Currently broken with Qt4 for widgets that don't start on screen
# * Still a few edge cases that aren't working correctly
# * Can this integrate better with existing matplotlib animation artist flag?
# - If animated removes from default draw(), perhaps we could use this to
# simplify initial draw.
# * Example
# * Frameless animation - pure procedural with no loop
# * Need example that uses something like inotify or subprocess
# * Complex syncing examples
# * Movies
# * Can blit be enabled for movies?
# * Need to consider event sources to allow clicking through multiple figures
import sys
import itertools
import contextlib
from matplotlib.cbook import iterable, is_string_like
from matplotlib.compat import subprocess
from matplotlib import verbose
from matplotlib import rcParams
# Other potential writing methods:
# * http://pymedia.org/
# * libmng (produces swf) python wrappers: https://github.com/libming/libming
# * Wrap x264 API:
# (http://stackoverflow.com/questions/2940671/
# how-to-encode-series-of-images-into-h264-using-x264-api-c-c )
# A registry for available MovieWriter classes
class MovieWriterRegistry(object):
def __init__(self):
self.avail = dict()
# Returns a decorator that can be used on classes to register them under
# a name. As in:
# @register('foo')
# class Foo:
# pass
def register(self, name):
def wrapper(writerClass):
if writerClass.isAvailable():
self.avail[name] = writerClass
return writerClass
return wrapper
def list(self):
''' Get a list of available MovieWriters.'''
return self.avail.keys()
def is_available(self, name):
return name in self.avail
def __getitem__(self, name):
if not self.avail:
raise RuntimeError("No MovieWriters available!")
return self.avail[name]
writers = MovieWriterRegistry()
class MovieWriter(object):
'''
Base class for writing movies. Fundamentally, what a MovieWriter does
is provide is a way to grab frames by calling grab_frame(). setup()
is called to start the process and finish() is called afterwards.
This class is set up to provide for writing movie frame data to a pipe.
saving() is provided as a context manager to facilitate this process as::
with moviewriter.saving('myfile.mp4'):
# Iterate over frames
moviewriter.grab_frame()
The use of the context manager ensures that setup and cleanup are
performed as necessary.
frame_format: string
The format used in writing frame data, defaults to 'rgba'
'''
def __init__(self, fps=5, codec=None, bitrate=None, extra_args=None,
metadata=None):
'''
Construct a new MovieWriter object.
fps: int
Framerate for movie.
codec: string or None, optional
The codec to use. If None (the default) the setting in the
rcParam `animation.codec` is used.
bitrate: int or None, optional
The bitrate for the saved movie file, which is one way to control
the output file size and quality. The default value is None,
which uses the value stored in the rcParam `animation.bitrate`.
A value of -1 implies that the bitrate should be determined
automatically by the underlying utility.
extra_args: list of strings or None
A list of extra string arguments to be passed to the underlying
movie utiltiy. The default is None, which passes the additional
argurments in the 'animation.extra_args' rcParam.
metadata: dict of string:string or None
A dictionary of keys and values for metadata to include in the
output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
'''
self.fps = fps
self.frame_format = 'rgba'
if codec is None:
self.codec = rcParams['animation.codec']
else:
self.codec = codec
if bitrate is None:
self.bitrate = rcParams['animation.bitrate']
else:
self.bitrate = bitrate
if extra_args is None:
self.extra_args = list(rcParams[self.args_key])
else:
self.extra_args = extra_args
if metadata is None:
self.metadata = dict()
else:
self.metadata = metadata
@property
def frame_size(self):
'A tuple (width,height) in pixels of a movie frame.'
width_inches, height_inches = self.fig.get_size_inches()
return width_inches * self.dpi, height_inches * self.dpi
def setup(self, fig, outfile, dpi, *args):
'''
Perform setup for writing the movie file.
fig: `matplotlib.Figure` instance
The figure object that contains the information for frames
outfile: string
The filename of the resulting movie file
dpi: int
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file.
'''
self.outfile = outfile
self.fig = fig
self.dpi = dpi
# Run here so that grab_frame() can write the data to a pipe. This
# eliminates the need for temp files.
self._run()
@contextlib.contextmanager
def saving(self, *args):
'''
Context manager to facilitate writing the movie file.
``*args`` are any parameters that should be passed to `setup`.
'''
# This particular sequence is what contextlib.contextmanager wants
self.setup(*args)
yield
self.finish()
def _run(self):
# Uses subprocess to call the program for assembling frames into a
# movie file. *args* returns the sequence of command line arguments
# from a few configuration options.
command = self._args()
if verbose.ge('debug'):
output = sys.stdout
else:
output = subprocess.PIPE
verbose.report('MovieWriter.run: running command: %s' %
' '.join(command))
self._proc = subprocess.Popen(command, shell=False,
stdout=output, stderr=output,
stdin=subprocess.PIPE)
def finish(self):
'Finish any processing for writing the movie.'
self.cleanup()
def grab_frame(self, **savefig_kwargs):
'''
Grab the image information from the figure and save as a movie frame.
All keyword arguments in savefig_kwargs are passed on to the 'savefig'
command that saves the figure.
'''
verbose.report('MovieWriter.grab_frame: Grabbing frame.',
level='debug')
try:
# Tell the figure to save its data to the sink, using the
# frame format and dpi.
self.fig.savefig(self._frame_sink(), format=self.frame_format,
dpi=self.dpi, **savefig_kwargs)
except RuntimeError:
out, err = self._proc.communicate()
verbose.report('MovieWriter -- Error '
'running proc:\n%s\n%s' % (out,
err), level='helpful')
raise
def _frame_sink(self):
'Returns the place to which frames should be written.'
return self._proc.stdin
def _args(self):
'Assemble list of utility-specific command-line arguments.'
return NotImplementedError("args needs to be implemented by subclass.")
def cleanup(self):
'Clean-up and collect the process used to write the movie file.'
out, err = self._proc.communicate()
verbose.report('MovieWriter -- '
'Command stdout:\n%s' % out, level='debug')
verbose.report('MovieWriter -- '
'Command stderr:\n%s' % err, level='debug')
@classmethod
def bin_path(cls):
'''
Returns the binary path to the commandline tool used by a specific
subclass. This is a class method so that the tool can be looked for
before making a particular MovieWriter subclass available.
'''
return rcParams[cls.exec_key]
@classmethod
def isAvailable(cls):
'''
Check to see if a MovieWriter subclass is actually available by
running the commandline tool.
'''
try:
subprocess.Popen(cls.bin_path(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
except OSError:
return False
class FileMovieWriter(MovieWriter):
'`MovieWriter` subclass that handles writing to a file.'
def __init__(self, *args, **kwargs):
MovieWriter.__init__(self, *args, **kwargs)
self.frame_format = rcParams['animation.frame_format']
def setup(self, fig, outfile, dpi, frame_prefix='_tmp', clear_temp=True):
'''
Perform setup for writing the movie file.
fig: `matplotlib.Figure` instance
The figure object that contains the information for frames
outfile: string
The filename of the resulting movie file
dpi: int
The DPI (or resolution) for the file. This controls the size
in pixels of the resulting movie file.
frame_prefix: string, optional
The filename prefix to use for the temporary files. Defaults
to '_tmp'
clear_temp: bool
Specifies whether the temporary files should be deleted after
the movie is written. (Useful for debugging.) Defaults to True.
'''
self.fig = fig
self.outfile = outfile
self.dpi = dpi
self.clear_temp = clear_temp
self.temp_prefix = frame_prefix
self._frame_counter = 0 # used for generating sequential file names
self._temp_names = list()
self.fname_format_str = '%s%%07d.%s'
@property
def frame_format(self):
'''
Format (png, jpeg, etc.) to use for saving the frames, which can be
decided by the individual subclasses.
'''
return self._frame_format
@frame_format.setter
def frame_format(self, frame_format):
if frame_format in self.supported_formats:
self._frame_format = frame_format
else:
self._frame_format = self.supported_formats[0]
def _base_temp_name(self):
# Generates a template name (without number) given the frame format
# for extension and the prefix.
return self.fname_format_str % (self.temp_prefix, self.frame_format)
def _frame_sink(self):
# Creates a filename for saving using the basename and the current
# counter.
fname = self._base_temp_name() % self._frame_counter
# Save the filename so we can delete it later if necessary
self._temp_names.append(fname)
verbose.report(
'FileMovieWriter.frame_sink: saving frame %d to fname=%s' %
(self._frame_counter, fname),
level='debug')
self._frame_counter += 1 # Ensures each created name is 'unique'
# This file returned here will be closed once it's used by savefig()
# because it will no longer be referenced and will be gc-ed.
return open(fname, 'wb')
def finish(self):
# Call run here now that all frame grabbing is done. All temp files
# are available to be assembled.
self._run()
MovieWriter.finish(self) # Will call clean-up
# Check error code for creating file here, since we just run
# the process here, rather than having an open pipe.
if self._proc.returncode:
raise RuntimeError('Error creating movie, return code: '
+ str(self._proc.returncode)
+ ' Try running with --verbose-debug')
def cleanup(self):
MovieWriter.cleanup(self)
#Delete temporary files
if self.clear_temp:
import os
verbose.report(
'MovieWriter: clearing temporary fnames=%s' %
str(self._temp_names),
level='debug')
for fname in self._temp_names:
os.remove(fname)
# Base class of ffmpeg information. Has the config keys and the common set
# of arguments that controls the *output* side of things.
class FFMpegBase:
exec_key = 'animation.ffmpeg_path'
args_key = 'animation.ffmpeg_args'
@property
def output_args(self):
# The %dk adds 'k' as a suffix so that ffmpeg treats our bitrate as in
# kbps
args = ['-vcodec', self.codec]
if self.bitrate > 0:
args.extend(['-b', '%dk' % self.bitrate])
if self.extra_args:
args.extend(self.extra_args)
for k, v in self.metadata.items():
args.extend(['-metadata', '%s=%s' % (k, v)])
return args + ['-y', self.outfile]
# Combine FFMpeg options with pipe-based writing
@writers.register('ffmpeg')
class FFMpegWriter(MovieWriter, FFMpegBase):
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a pipe.
args = [self.bin_path(), '-f', 'rawvideo', '-vcodec', 'rawvideo',
'-s', '%dx%d' % self.frame_size, '-pix_fmt', self.frame_format,
'-r', str(self.fps)]
# Logging is quieted because subprocess.PIPE has limited buffer size.
if not verbose.ge('debug'):
args += ['-loglevel', 'quiet']
args += ['-i', 'pipe:'] + self.output_args
return args
#Combine FFMpeg options with temp file-based writing
@writers.register('ffmpeg_file')
class FFMpegFileWriter(FileMovieWriter, FFMpegBase):
supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
'pbm', 'raw', 'rgba']
def _args(self):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie using a collection of temp images
return [self.bin_path(), '-vframes', str(self._frame_counter),
'-r', str(self.fps), '-i',
self._base_temp_name()] + self.output_args
# Base class of avconv information. AVConv has identical arguments to
# FFMpeg
class AVConvBase(FFMpegBase):
exec_key = 'animation.avconv_path'
args_key = 'animation.avconv_args'
# Combine AVConv options with pipe-based writing
@writers.register('avconv')
class AVConvWriter(AVConvBase, FFMpegWriter):
pass
# Combine AVConv options with file-based writing
@writers.register('avconv_file')
class AVConvFileWriter(AVConvBase, FFMpegFileWriter):
pass
# Base class of mencoder information. Contains configuration key information
# as well as arguments for controlling *output*
class MencoderBase:
exec_key = 'animation.mencoder_path'
args_key = 'animation.mencoder_args'
# Mencoder only allows certain keys, other ones cause the program
# to fail.
allowed_metadata = ['name', 'artist', 'genre', 'subject', 'copyright',
'srcform', 'comment']
# Mencoder mandates using name, but 'title' works better with ffmpeg.
# If we find it, just put it's value into name
def _remap_metadata(self):
if 'title' in self.metadata:
self.metadata['name'] = self.metadata['title']
@property
def output_args(self):
self._remap_metadata()
args = ['-o', self.outfile, '-ovc', 'lavc', '-lavcopts',
'vcodec=%s' % self.codec]
if self.bitrate > 0:
args.append('vbitrate=%d' % self.bitrate)
if self.extra_args:
args.extend(self.extra_args)
if self.metadata:
args.extend(['-info', ':'.join('%s=%s' % (k, v)
for k, v in self.metadata.items()
if k in self.allowed_metadata)])
return args
# Combine Mencoder options with pipe-based writing
@writers.register('mencoder')
class MencoderWriter(MovieWriter, MencoderBase):
def _args(self):
# Returns the command line parameters for subprocess to use
# mencoder to create a movie
return [self.bin_path(), '-', '-demuxer', 'rawvideo', '-rawvideo',
('w=%i:h=%i:' % self.frame_size +
'fps=%i:format=%s' % (self.fps,
self.frame_format))] + self.output_args
# Combine Mencoder options with temp file-based writing
@writers.register('mencoder_file')
class MencoderFileWriter(FileMovieWriter, MencoderBase):
supported_formats = ['png', 'jpeg', 'tga', 'sgi']
def _args(self):
# Returns the command line parameters for subprocess to use
# mencoder to create a movie
return [self.bin_path(),
'mf://%s*.%s' % (self.temp_prefix, self.frame_format),
'-frames', str(self._frame_counter), '-mf',
'type=%s:fps=%d' % (self.frame_format,
self.fps)] + self.output_args
# Base class for animated GIFs with convert utility
class ImageMagickBase:
exec_key = 'animation.convert_path'
args_key = 'animation.convert_args'
@property
def delay(self):
return 100. / self.fps
@property
def output_args(self):
return [self.outfile]
@writers.register('imagemagick')
class ImageMagickWriter(MovieWriter, ImageMagickBase):
def _args(self):
return ([self.bin_path(),
'-size', '%ix%i' % self.frame_size, '-depth', '8',
'-delay', str(self.delay), '-loop', '0',
'%s:-' % self.frame_format]
+ self.output_args)
@writers.register('imagemagick_file')
class ImageMagickFileWriter(FileMovieWriter, ImageMagickBase):
supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp',
'pbm', 'raw', 'rgba']
def _args(self):
return ([self.bin_path(), '-delay', str(self.delay), '-loop', '0',
'%s*.%s' % (self.temp_prefix, self.frame_format)]
+ self.output_args)
class Animation(object):
'''
This class wraps the creation of an animation using matplotlib. It is
only a base class which should be subclassed to provide needed behavior.
*fig* is the figure object that is used to get draw, resize, and any
other needed events.
*event_source* is a class that can run a callback when desired events
are generated, as well as be stopped and started. Examples include timers
(see :class:`TimedAnimation`) and file system notifications.
*blit* is a boolean that controls whether blitting is used to optimize
drawing.
'''
def __init__(self, fig, event_source=None, blit=False):
self._fig = fig
# Disables blitting for backends that don't support it. This
# allows users to request it if available, but still have a
# fallback that works if it is not.
self._blit = blit and fig.canvas.supports_blit
# These are the basics of the animation. The frame sequence represents
# information for each frame of the animation and depends on how the
# drawing is handled by the subclasses. The event source fires events
# that cause the frame sequence to be iterated.
self.frame_seq = self.new_frame_seq()
self.event_source = event_source
# Clear the initial frame
self._init_draw()
# Instead of starting the event source now, we connect to the figure's
# draw_event, so that we only start once the figure has been drawn.
self._first_draw_id = fig.canvas.mpl_connect('draw_event', self._start)
# Connect to the figure's close_event so that we don't continue to
# fire events and try to draw to a deleted figure.
self._close_id = self._fig.canvas.mpl_connect('close_event',
self._stop)
if self._blit:
self._setup_blit()
def _start(self, *args):
'''
Starts interactive animation. Adds the draw frame command to the GUI
handler, calls show to start the event loop.
'''
# On start, we add our callback for stepping the animation and
# actually start the event_source. We also disconnect _start
# from the draw_events
self.event_source.add_callback(self._step)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._first_draw_id)
self._first_draw_id = None # So we can check on save
def _stop(self, *args):
# On stop we disconnect all of our events.
if self._blit:
self._fig.canvas.mpl_disconnect(self._resize_id)
self._fig.canvas.mpl_disconnect(self._close_id)
self.event_source.remove_callback(self._step)
self.event_source = None
def save(self, filename, writer=None, fps=None, dpi=None, codec=None,
bitrate=None, extra_args=None, metadata=None, extra_anim=None,
savefig_kwargs=None):
'''
Saves a movie file by drawing every frame.
*filename* is the output filename, e.g., :file:`mymovie.mp4`
*writer* is either an instance of :class:`MovieWriter` or a string
key that identifies a class to use, such as 'ffmpeg' or 'mencoder'.
If nothing is passed, the value of the rcparam `animation.writer` is
used.
*fps* is the frames per second in the movie. Defaults to None,
which will use the animation's specified interval to set the frames
per second.
*dpi* controls the dots per inch for the movie frames. This combined
with the figure's size in inches controls the size of the movie.
*codec* is the video codec to be used. Not all codecs are supported
by a given :class:`MovieWriter`. If none is given, this defaults to the
value specified by the rcparam `animation.codec`.
*bitrate* specifies the amount of bits used per second in the
compressed movie, in kilobits per second. A higher number means a
higher quality movie, but at the cost of increased file size. If no
value is given, this defaults to the value given by the rcparam
`animation.bitrate`.
*extra_args* is a list of extra string arguments to be passed to the
underlying movie utiltiy. The default is None, which passes the
additional argurments in the 'animation.extra_args' rcParam.
*metadata* is a dictionary of keys and values for metadata to include
in the output file. Some keys that may be of use include:
title, artist, genre, subject, copyright, srcform, comment.
*extra_anim* is a list of additional `Animation` objects that should
be included in the saved movie file. These need to be from the same
`matplotlib.Figure` instance. Also, animation frames will just be
simply combined, so there should be a 1:1 correspondence between
the frames from the different animations.
*savefig_kwargs* is a dictionary containing keyword arguments to be
passed on to the 'savefig' command which is called repeatedly to save
the individual frames. This can be used to set tight bounding boxes,
for example.
'''
if savefig_kwargs is None:
savefig_kwargs = {}
# FIXME: Using 'bbox_inches' doesn't currently work with
# writers that pipe the data to the command because this
# requires a fixed frame size (see Ryan May's reply in this
# thread: [1]). Thus we drop the 'bbox_inches' argument if it
# exists in savefig_kwargs.
#
# [1] (http://matplotlib.1069221.n5.nabble.com/
# Animation-class-let-save-accept-kwargs-which-
# are-passed-on-to-savefig-td39627.html)
#
if 'bbox_inches' in savefig_kwargs:
if not (writer in ['ffmpeg_file', 'mencoder_file'] or
isinstance(writer,
(FFMpegFileWriter, MencoderFileWriter))):
print("Warning: discarding the 'bbox_inches' argument in "
"'savefig_kwargs' as it is only currently supported "
"with the writers 'ffmpeg_file' and 'mencoder_file' "
"(writer used: "
"'{}').".format(writer if isinstance(writer, str)
else writer.__class__.__name__))
savefig_kwargs.pop('bbox_inches')
# Need to disconnect the first draw callback, since we'll be doing
# draws. Otherwise, we'll end up starting the animation.
if self._first_draw_id is not None:
self._fig.canvas.mpl_disconnect(self._first_draw_id)
reconnect_first_draw = True
else:
reconnect_first_draw = False
if fps is None and hasattr(self, '_interval'):
# Convert interval in ms to frames per second
fps = 1000. / self._interval
# If the writer is None, use the rc param to find the name of the one
# to use
if writer is None:
writer = rcParams['animation.writer']
# Re-use the savefig DPI for ours if none is given
if dpi is None:
dpi = rcParams['savefig.dpi']
if codec is None:
codec = rcParams['animation.codec']
if bitrate is None:
bitrate = rcParams['animation.bitrate']
all_anim = [self]
if not extra_anim is None:
all_anim.extend(anim
for anim
in extra_anim if anim._fig is self._fig)
# If we have the name of a writer, instantiate an instance of the
# registered class.
if is_string_like(writer):
if writer in writers.avail:
writer = writers[writer](fps, codec, bitrate,
extra_args=extra_args,
metadata=metadata)
else:
import warnings
warnings.warn("MovieWriter %s unavailable" % writer)
try:
writer = writers.list()[0]
except IndexError:
raise ValueError("Cannot save animation: no writers are "
"available. Please install mencoder or "
"ffmpeg to save animations.")
verbose.report('Animation.save using %s' % type(writer),
level='helpful')
# Create a new sequence of frames for saved data. This is different
# from new_frame_seq() to give the ability to save 'live' generated
# frame information to be saved later.
# TODO: Right now, after closing the figure, saving a movie won't work
# since GUI widgets are gone. Either need to remove extra code to
# allow for this non-existant use case or find a way to make it work.
with writer.saving(self._fig, filename, dpi):
for data in itertools.izip(*[a.new_saved_frame_seq()
for a in all_anim]):
for anim, d in zip(all_anim, data):
#TODO: Need to see if turning off blit is really necessary
anim._draw_next_frame(d, blit=False)
writer.grab_frame(**savefig_kwargs)
# Reconnect signal for first draw if necessary
if reconnect_first_draw:
self._first_draw_id = self._fig.canvas.mpl_connect('draw_event',
self._start)
def _step(self, *args):
'''
Handler for getting events. By default, gets the next frame in the
sequence and hands the data off to be drawn.
'''
# Returns True to indicate that the event source should continue to
# call _step, until the frame sequence reaches the end of iteration,
# at which point False will be returned.
try:
framedata = next(self.frame_seq)
self._draw_next_frame(framedata, self._blit)
return True
except StopIteration:
return False
def new_frame_seq(self):
'Creates a new sequence of frame information.'
# Default implementation is just an iterator over self._framedata
return iter(self._framedata)
def new_saved_frame_seq(self):
'Creates a new sequence of saved/cached frame information.'
# Default is the same as the regular frame sequence
return self.new_frame_seq()
def _draw_next_frame(self, framedata, blit):
# Breaks down the drawing of the next frame into steps of pre- and
# post- draw, as well as the drawing of the frame itself.
self._pre_draw(framedata, blit)
self._draw_frame(framedata)
self._post_draw(framedata, blit)
def _init_draw(self):
# Initial draw to clear the frame. Also used by the blitting code
# when a clean base is required.
pass
def _pre_draw(self, framedata, blit):
# Perform any cleaning or whatnot before the drawing of the frame.
# This default implementation allows blit to clear the frame.
if blit:
self._blit_clear(self._drawn_artists, self._blit_cache)
def _draw_frame(self, framedata):
# Performs actual drawing of the frame.
raise NotImplementedError('Needs to be implemented by subclasses to'
' actually make an animation.')
def _post_draw(self, framedata, blit):
# After the frame is rendered, this handles the actual flushing of
# the draw, which can be a direct draw_idle() or make use of the
# blitting.
if blit and self._drawn_artists:
self._blit_draw(self._drawn_artists, self._blit_cache)
else:
self._fig.canvas.draw_idle()
# The rest of the code in this class is to facilitate easy blitting
def _blit_draw(self, artists, bg_cache):
# Handles blitted drawing, which renders only the artists given instead
# of the entire figure.
updated_ax = []
for a in artists:
# If we haven't cached the background for this axes object, do
# so now. This might not always be reliable, but it's an attempt
# to automate the process.
if a.axes not in bg_cache:
bg_cache[a.axes] = a.figure.canvas.copy_from_bbox(a.axes.bbox)
a.axes.draw_artist(a)
updated_ax.append(a.axes)
# After rendering all the needed artists, blit each axes individually.
for ax in set(updated_ax):
ax.figure.canvas.blit(ax.bbox)
def _blit_clear(self, artists, bg_cache):
# Get a list of the axes that need clearing from the artists that
# have been drawn. Grab the appropriate saved background from the
# cache and restore.
axes = set(a.axes for a in artists)
for a in axes:
a.figure.canvas.restore_region(bg_cache[a])
def _setup_blit(self):
# Setting up the blit requires: a cache of the background for the
# axes
self._blit_cache = dict()
self._drawn_artists = []
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
self._post_draw(None, self._blit)
def _handle_resize(self, *args):
# On resize, we need to disable the resize event handling so we don't
# get too many events. Also stop the animation events, so that
# we're paused. Reset the cache and re-init. Set up an event handler
# to catch once the draw has actually taken place.
self._fig.canvas.mpl_disconnect(self._resize_id)
self.event_source.stop()
self._blit_cache.clear()
self._init_draw()
self._resize_id = self._fig.canvas.mpl_connect('draw_event',
self._end_redraw)
def _end_redraw(self, evt):
# Now that the redraw has happened, do the post draw flushing and
# blit handling. Then re-enable all of the original events.
self._post_draw(None, self._blit)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._resize_id)
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
class TimedAnimation(Animation):
'''
:class:`Animation` subclass that supports time-based animation, drawing
a new frame every *interval* milliseconds.
*repeat* controls whether the animation should repeat when the sequence
of frames is completed.
*repeat_delay* optionally adds a delay in milliseconds before repeating
the animation.
'''
def __init__(self, fig, interval=200, repeat_delay=None, repeat=True,
event_source=None, *args, **kwargs):
# Store the timing information
self._interval = interval
self._repeat_delay = repeat_delay
self.repeat = repeat
# If we're not given an event source, create a new timer. This permits
# sharing timers between animation objects for syncing animations.
if event_source is None:
event_source = fig.canvas.new_timer()
event_source.interval = self._interval
Animation.__init__(self, fig, event_source=event_source,
*args, **kwargs)
def _step(self, *args):
'''
Handler for getting events.
'''
# Extends the _step() method for the Animation class. If
# Animation._step signals that it reached the end and we want to
# repeat, we refresh the frame sequence and return True. If
# _repeat_delay is set, change the event_source's interval to our loop
# delay and set the callback to one which will then set the interval
# back.
still_going = Animation._step(self, *args)
if not still_going and self.repeat:
self.frame_seq = self.new_frame_seq()
if self._repeat_delay:
self.event_source.remove_callback(self._step)
self.event_source.add_callback(self._loop_delay)
self.event_source.interval = self._repeat_delay
return True
else:
return Animation._step(self, *args)
else:
return still_going
def _stop(self, *args):
# If we stop in the middle of a loop delay (which is relatively likely
# given the potential pause here, remove the loop_delay callback as
# well.
self.event_source.remove_callback(self._loop_delay)
Animation._stop(self)
def _loop_delay(self, *args):
# Reset the interval and change callbacks after the delay.
self.event_source.remove_callback(self._loop_delay)
self.event_source.interval = self._interval
self.event_source.add_callback(self._step)
Animation._step(self)
class ArtistAnimation(TimedAnimation):
'''
Before calling this function, all plotting should have taken place
and the relevant artists saved.
frame_info is a list, with each list entry a collection of artists that
represent what needs to be enabled on each frame. These will be disabled
for other frames.
'''
def __init__(self, fig, artists, *args, **kwargs):
# Internal list of artists drawn in the most recent frame.
self._drawn_artists = []
# Use the list of artists as the framedata, which will be iterated
# over by the machinery.
self._framedata = artists
TimedAnimation.__init__(self, fig, *args, **kwargs)
def _init_draw(self):
# Make all the artists involved in *any* frame invisible
axes = []
for f in self.new_frame_seq():
for artist in f:
artist.set_visible(False)
# Assemble a list of unique axes that need flushing
if artist.axes not in axes:
axes.append(artist.axes)
# Flush the needed axes
for ax in axes:
ax.figure.canvas.draw()
def _pre_draw(self, framedata, blit):
'''
Clears artists from the last frame.
'''
if blit:
# Let blit handle clearing
self._blit_clear(self._drawn_artists, self._blit_cache)
else:
# Otherwise, make all the artists from the previous frame invisible
for artist in self._drawn_artists:
artist.set_visible(False)
def _draw_frame(self, artists):
# Save the artists that were passed in as framedata for the other
# steps (esp. blitting) to use.
self._drawn_artists = artists
# Make all the artists from the current frame visible
for artist in artists:
artist.set_visible(True)
class FuncAnimation(TimedAnimation):
'''
Makes an animation by repeatedly calling a function *func*, passing in
(optional) arguments in *fargs*.
*frames* can be a generator, an iterable, or a number of frames.
*init_func* is a function used to draw a clear frame. If not given, the
results of drawing from the first item in the frames sequence will be
used. This function will be called once before the first frame.
If blit=True, *func* and *init_func* should return an iterable of
drawables to clear.
'''
def __init__(self, fig, func, frames=None, init_func=None, fargs=None,
save_count=None, **kwargs):
if fargs:
self._args = fargs
else:
self._args = ()
self._func = func
# Amount of framedata to keep around for saving movies. This is only
# used if we don't know how many frames there will be: in the case
# of no generator or in the case of a callable.
self.save_count = save_count
# Set up a function that creates a new iterable when needed. If nothing
# is passed in for frames, just use itertools.count, which will just
# keep counting from 0. A callable passed in for frames is assumed to
# be a generator. An iterable will be used as is, and anything else
# will be treated as a number of frames.
if frames is None:
self._iter_gen = itertools.count
elif callable(frames):
self._iter_gen = frames
elif iterable(frames):
self._iter_gen = lambda: iter(frames)
self.save_count = len(frames)
else:
self._iter_gen = lambda: iter(range(frames))
self.save_count = frames
# If we're passed in and using the default, set it to 100.
if self.save_count is None:
self.save_count = 100
self._init_func = init_func
# Needs to be initialized so the draw functions work without checking
self._save_seq = []
TimedAnimation.__init__(self, fig, **kwargs)
# Need to reset the saved seq, since right now it will contain data
# for a single frame from init, which is not what we want.
self._save_seq = []
def new_frame_seq(self):
# Use the generating function to generate a new frame sequence
return self._iter_gen()
def new_saved_frame_seq(self):
# Generate an iterator for the sequence of saved data. If there are
# no saved frames, generate a new frame sequence and take the first
# save_count entries in it.
if self._save_seq:
return iter(self._save_seq)
else:
return itertools.islice(self.new_frame_seq(), self.save_count)
def _init_draw(self):
# Initialize the drawing either using the given init_func or by
# calling the draw function with the first item of the frame sequence.
# For blitting, the init_func should return a sequence of modified
# artists.
if self._init_func is None:
self._draw_frame(next(self.new_frame_seq()))
else:
self._drawn_artists = self._init_func()
def _draw_frame(self, framedata):
# Save the data for potential saving of movies.
self._save_seq.append(framedata)
# Make sure to respect save_count (keep only the last save_count
# around)
self._save_seq = self._save_seq[-self.save_count:]
# Call the func with framedata and args. If blitting is desired,
# func needs to return a sequence of any artists that were modified.
self._drawn_artists = self._func(framedata, *self._args)
| gpl-3.0 |
appapantula/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
maym2104/ift6266-h17-project | lib/image.py | 1 | 6058 | """
Copyright (c) 2017 - Philip Paquette
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import os
import skimage.color as color
import skimage.io as io
import skimage.transform as transform
import time
import theano
import warnings
from settings import SAVED_DIR, MAX_WIDTH, MAX_HEIGHT, AUTO_RESIZE, DATASET_AUGMENTATION
from .utils import mkdir_p
class Image(object):
"""Image manipulation class
This class loads images from disk and perform manipulation on them
(e.g. masking, rescaling)
"""
def __init__(self, dataset_image=None):
if dataset_image is not None:
self.full_path = dataset_image['full_path']
self.id = dataset_image['id']
self._load_from_disk()
if not DATASET_AUGMENTATION:
process_fn = self.resize if AUTO_RESIZE else self.crop
process_fn()
def downscale(self, factor):
""" Downscale the image with factor (Uses mean pooling) """
self.data = self._to_theano(transform.downscale_local_mean(self._from_theano(self.data), (1, factor, factor)))
def upscale(self, factor, use_smoothing=False):
""" Upscale the non-masked, masked, and mask with factor (with optional Gaussian filter for smoothing) """
scale_fn = transform.pyramid_expand if use_smoothing else transform.rescale
self.data = self._to_theano(scale_fn(self._from_theano(self.data), factor))
def crop(self, offset_h=None, offset_v=None, width=None, height=None):
""" Crop image """
offset_h = (self.width - MAX_WIDTH) // 2 if offset_h is None else offset_h
offset_v = (self.height - MAX_HEIGHT) // 2 if offset_v is None else offset_v
target_width = MAX_WIDTH if width is None else width
target_height = MAX_HEIGHT if height is None else height
self.data = self.data[:, offset_v:target_height + offset_v, offset_h:target_width + offset_h]
def resize(self, target_width=MAX_WIDTH, target_height=MAX_HEIGHT, keep_ratio=False):
"""" Resizes the image to the target dimension """
if not keep_ratio:
self.data = self._to_theano(transform.resize(self._from_theano(self.data), (target_height, target_width, 3)))
self.height, self.width = target_height, target_width
else:
scale = float(target_height) / min(self.height, self.width)
self.height, self.width = int(round(scale * self.height)), int(round(scale * self.width))
self.data = self._to_theano(transform.resize(self._from_theano(self.data), (self.height, self.width, 3)))
offset_h, offset_v = int((self.width - target_width) / 2.), int((self.height - target_height) / 2.)
self.crop(offset_h, offset_v, target_width, target_height)
self.height, self.width = target_height, target_width
def flip(self):
self.data = np.flip(self.data, axis=2)
def rotate(self, angle):
self.data = self._to_theano(transform.rotate(self._from_theano(self.data), -1. * angle, resize=False, mode='edge'))
def to_gray(self):
self.data = self._to_theano(color.gray2rgb(color.rgb2gray(self._from_theano(self.data))))
def save(self, path_prefix = '', file_prefix = '', image_data = None):
""" Save image to disk """
image_data = self.data if image_data is None else image_data
timestamp = int(time.time() * 1000)
target_folder = os.path.join(SAVED_DIR, path_prefix)
if not os.path.exists(target_folder):
mkdir_p(target_folder)
file_name = os.path.join(target_folder, 'image%s-%d.jpg' % (file_prefix, timestamp))
io.imsave(file_name, np.array(255. * self._from_theano(image_data)).astype('uint8'))
def display(self, image_data = None):
""" Display masked image """
plt.figure()
plt.axis('off')
plt.imshow(self._from_theano(image_data if image_data is not None else self.data))
plt.show()
# -----------------------
# --- Private Methods ---
# -----------------------
def _load_from_disk(self, target_dims=None):
""" Load the image file from disk """
if not os.path.exists(self.full_path):
warnings.warn('Unable to load image - Path: %s.' % (self.full_path))
return
data_from_disk = np.array(io.imread(self.full_path), dtype=theano.config.floatX) / 255.
if (len(data_from_disk.shape) == 2):
data_from_disk = color.gray2rgb(data_from_disk)
self.data = self._to_theano(data_from_disk)
self.height = self.data.shape[1]
self.width = self.data.shape[2]
def _to_theano(self, target):
""" Converts numpy array from (height, width, channel) to (channel, height, width) """
return np.transpose(target, (2, 0, 1))
def _from_theano(self, target):
""" Converts numpy array from (channel, height, width) to (height, width, channel) """
return np.transpose(target, (1, 2, 0))
| mit |
schreiberx/sweet | benchmarks_sphere/paper_jrn_sl_exp/test_compare_wt_dt_vs_accuracy_gaussian_bumps_pvd_M512_6hours_l_n_uv/postprocessing_consolidate_prog_phi_pert.py | 8 | 6291 | #! /usr/bin/env python3
import sys
import math
from mule.plotting.Plotting import *
from mule.postprocessing.JobsData import *
from mule.postprocessing.JobsDataConsolidate import *
sys.path.append('../')
import pretty_plotting as pp
sys.path.pop()
mule_plotting_usetex(False)
groups = ['runtime.timestepping_method']
tagnames_y = [
'sphere_data_diff_prog_phi_pert.res_norm_l1',
'sphere_data_diff_prog_phi_pert.res_norm_l2',
'sphere_data_diff_prog_phi_pert.res_norm_linf',
]
j = JobsData('./job_bench_*', verbosity=0)
c = JobsDataConsolidate(j)
print("")
print("Groups:")
job_groups = c.create_groups(groups)
for key, g in job_groups.items():
print(key)
for tagname_y in tagnames_y:
params = []
params += [
{
'tagname_x': 'runtime.timestep_size',
'xlabel': "Timestep size (seconds)",
'ylabel': pp.latex_pretty_names[tagname_y],
'title': 'Timestep size vs. error',
'xscale': 'log',
'yscale': 'log',
'convergence': True,
},
]
params += [
{
'tagname_x': 'output.simulation_benchmark_timings.main_timestepping',
'xlabel': "Wallclock time (seconds)",
'ylabel': pp.latex_pretty_names[tagname_y],
'title': 'Wallclock time vs. error',
'xscale': 'log',
'yscale': 'log',
'convergence': False,
},
]
for param in params:
tagname_x = param['tagname_x']
xlabel = param['xlabel']
ylabel = param['ylabel']
title = param['title']
xscale = param['xscale']
yscale = param['yscale']
convergence = param['convergence']
print("*"*80)
print("Processing tag "+tagname_x)
print("*"*80)
if True:
"""
Plotting format
"""
# Filter out errors beyond this value!
def data_filter(x, y, jobdata):
if y == None:
return True
x = float(x)
y = float(y)
if math.isnan(y):
return True
if 'l1' in tagname_y:
#if y > 1e1:
if y > 1e3:
print("Sorting out L1 data "+str(y))
return True
elif 'l2' in tagname_y:
#if y > 1e1:
if y > 1e3:
print("Sorting out L2 data "+str(y))
return True
elif 'linf' in tagname_y:
#if y > 1e2:
if y > 1e3:
print("Sorting out Linf data "+str(y))
return True
else:
raise Exception("Unknown y tag "+tagname_y)
return False
d = JobsData_GroupsPlottingScattered(
job_groups,
tagname_x,
tagname_y,
data_filter = data_filter
)
fileid = "output_plotting_"+tagname_x.replace('.', '-').replace('_', '-')+"_vs_"+tagname_y.replace('.', '-').replace('_', '-')
if True:
#
# Proper naming and sorting of each label
#
# new data dictionary
data_new = {}
for key, data in d.data.items():
# generate nice tex label
#data['label'] = pp.get_pretty_name(key)
data['label'] = key #pp.get_pretty_name(key)
key_new = pp.get_pretty_name_order(key)+'_'+key
# copy data
data_new[key_new] = copy.copy(data)
# Copy back new data table
d.data = data_new
p = Plotting_ScatteredData()
def fun(p):
from matplotlib import ticker
from matplotlib.ticker import FormatStrFormatter
plt.tick_params(axis='x', which='minor')
p.ax.xaxis.set_minor_formatter(FormatStrFormatter("%.0f"))
p.ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
p.ax.xaxis.set_minor_locator(ticker.LogLocator(subs=[1.5, 2.0, 3.0, 5.0]))
for tick in p.ax.xaxis.get_minor_ticks():
tick.label.set_fontsize(8)
plt.tick_params(axis='y', which='minor')
p.ax.yaxis.set_minor_formatter(FormatStrFormatter("%.1e"))
p.ax.yaxis.set_major_formatter(FormatStrFormatter("%.1e"))
p.ax.yaxis.set_minor_locator(ticker.LogLocator(subs=[1.5, 2.0, 3.0, 5.0]))
for tick in p.ax.yaxis.get_minor_ticks():
tick.label.set_fontsize(6)
#
# Add convergence information
#
if convergence:
if 'l1' in tagname_y:
ps = [100, 1e-2]
elif 'l2' in tagname_y:
ps = [100, 5e-2]
elif 'linf' in tagname_y:
ps = [100, 1e-0]
else:
ps = [100, 1e-0]
p.add_convergence(2, ps)
annotate_text_template = "{:.1f} / {:.3f}"
p.plot(
data_plotting = d.get_data_float(),
xlabel = xlabel,
ylabel = ylabel,
title = title,
xscale = xscale,
yscale = yscale,
#annotate = True,
#annotate_each_nth_value = 3,
#annotate_fontsize = 6,
#annotate_text_template = annotate_text_template,
legend_fontsize = 8,
grid = True,
outfile = fileid+".pdf",
lambda_fun = fun,
)
print("Data plotting:")
d.print()
d.write(fileid+".csv")
print("Info:")
print(" NaN: Errors in simulations")
print(" None: No data available")
| mit |
equialgo/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 18 | 26777 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.datasets import make_regression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.utils import check_random_state
from sklearn.datasets import make_multilabel_classification
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_regression_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
def test_ridge_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
param_grid = product((1.0, 1e-2), (True, False),
('svd', 'cholesky', 'lsqr', 'sparse_cg'))
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for (alpha, intercept, solver) in param_grid:
# Ridge with explicit sample_weight
est = Ridge(alpha=alpha, fit_intercept=intercept, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
coefs = est.coef_
inter = est.intercept_
# Closed form of the weighted regularized least square
# theta = (X^T W X + alpha I)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
I = np.eye(n_features)
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
I = np.eye(n_features + 1)
I[0, 0] = 0
cf_coefs = linalg.solve(X_aug.T.dot(W).dot(X_aug) + alpha * I,
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs, cf_coefs)
else:
assert_array_almost_equal(coefs, cf_coefs[1:])
assert_almost_equal(inter, cf_coefs[0])
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
reg = Ridge(alpha=0.0)
reg.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(reg.predict(X_test), [1., 2, 3, 4])
assert_equal(len(reg.coef_.shape), 1)
assert_equal(type(reg.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
reg.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(reg.coef_.shape), 2)
assert_equal(type(reg.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
fit_intercept = filter_ == DENSE_FILTER
if fit_intercept:
X_diabetes_ = X_diabetes - X_diabetes.mean(0)
else:
X_diabetes_ = X_diabetes
ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept)
ridge = Ridge(alpha=1.0, fit_intercept=fit_intercept)
# because fit_intercept is applied
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes_, y_diabetes, fit_intercept)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes_[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes_[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes_, y_diabetes, fit_intercept)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('neg_mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for reg in (RidgeClassifier(), RidgeClassifierCV()):
reg.fit(filter_(X_iris), y_iris)
assert_equal(reg.coef_.shape, (n_classes, n_features))
y_pred = reg.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
cv = KFold(5)
reg = RidgeClassifierCV(cv=cv)
reg.fit(filter_(X_iris), y_iris)
y_pred = reg.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5, fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3, fit_intercept=False)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def check_dense_sparse(test_func):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
yield check_dense_sparse, test_func
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd', fit_intercept=False)
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
reg = RidgeClassifier(class_weight=None)
reg.fit(X, y)
assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
reg = RidgeClassifier(class_weight={1: 0.001})
reg.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
reg = RidgeClassifier(class_weight='balanced')
reg.fit(X, y)
assert_array_equal(reg.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
reg = RidgeClassifier(class_weight=None)
reg.fit(X, y)
rega = RidgeClassifier(class_weight='balanced')
rega.fit(X, y)
assert_equal(len(rega.classes_), 2)
assert_array_almost_equal(reg.coef_, rega.coef_)
assert_array_almost_equal(reg.intercept_, rega.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for reg in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
reg1 = reg()
reg1.fit(iris.data, iris.target)
reg2 = reg(class_weight='balanced')
reg2.fit(iris.data, iris.target)
assert_almost_equal(reg1.coef_, reg2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
reg1 = reg()
reg1.fit(iris.data, iris.target, sample_weight)
reg2 = reg(class_weight=class_weight)
reg2.fit(iris.data, iris.target)
assert_almost_equal(reg1.coef_, reg2.coef_)
# Check that sample_weight and class_weight are multiplicative
reg1 = reg()
reg1.fit(iris.data, iris.target, sample_weight ** 2)
reg2 = reg(class_weight=class_weight)
reg2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(reg1.coef_, reg2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
reg = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
reg.fit(X, y)
# we give a small weights to class 1
reg = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
reg.fit(X, y)
assert_array_equal(reg.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
cv = KFold(5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
def test_ridge_fit_intercept_sparse():
X, y = make_regression(n_samples=1000, n_features=2, n_informative=2,
bias=10., random_state=42)
X_csr = sp.csr_matrix(X)
dense = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
sparse = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
dense.fit(X, y)
sparse.fit(X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
# test the solver switch and the corresponding warning
sparse = Ridge(alpha=1., tol=1.e-15, solver='lsqr', fit_intercept=True)
assert_warns(UserWarning, sparse.fit, X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
def test_errors_and_values_helper():
ridgecv = _RidgeGCV()
rng = check_random_state(42)
alpha = 1.
n = 5
y = rng.randn(n)
v = rng.randn(n)
Q = rng.randn(len(v), len(v))
QT_y = Q.T.dot(y)
G_diag, c = ridgecv._errors_and_values_helper(alpha, y, v, Q, QT_y)
# test that helper function behaves as expected
out, c_ = ridgecv._errors(alpha, y, v, Q, QT_y)
np.testing.assert_array_equal(out, (c / G_diag) ** 2)
np.testing.assert_array_equal(c, c)
out, c_ = ridgecv._values(alpha, y, v, Q, QT_y)
np.testing.assert_array_equal(out, y - (c / G_diag))
np.testing.assert_array_equal(c_, c)
def test_errors_and_values_svd_helper():
ridgecv = _RidgeGCV()
rng = check_random_state(42)
alpha = 1.
for n, p in zip((5, 10), (12, 6)):
y = rng.randn(n)
v = rng.randn(p)
U = rng.randn(n, p)
UT_y = U.T.dot(y)
G_diag, c = ridgecv._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
# test that helper function behaves as expected
out, c_ = ridgecv._errors_svd(alpha, y, v, U, UT_y)
np.testing.assert_array_equal(out, (c / G_diag) ** 2)
np.testing.assert_array_equal(c, c)
out, c_ = ridgecv._values_svd(alpha, y, v, U, UT_y)
np.testing.assert_array_equal(out, y - (c / G_diag))
np.testing.assert_array_equal(c_, c)
def test_ridge_classifier_no_support_multilabel():
X, y = make_multilabel_classification(n_samples=10, random_state=0)
assert_raises(ValueError, RidgeClassifier().fit, X, y)
| bsd-3-clause |
victorbergelin/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
raspearsy/bme590hrm | test_rhythmdetectors.py | 1 | 4182 | from ecgmeasure import ECGMeasure
import pandas as pd
# need test dataframe test_hr1 that has 3 columns and 3 rows, where each successive row will
# be tagged as no arrythmia, bradycardia, and tachycardia
def get_test_hr1(a=60, b=60, c=60):
""".. function:: get_test_hr1()
Create test dataframe.
"""
initial_messages = ['Healthy... for now', 'Healthy... for now', 'Healthy... for now']
test_hr1 = pd.DataFrame({'B/T': initial_messages, 'time': [0, 5, 10], 'HeartRate': [a, b, c]})
return test_hr1
def test_detect_rhythm_brady():
""".. function:: test_detect_rhythm_brady()
Test output of bradydetector when threshold is set to 50.
"""
bm_b = ECGMeasure(file_bool=True)
bm_b.data = get_test_hr1(a=20)
bm_b.detect_rhythm()
output_messages = ['Bradycardia Detected', 'Healthy... for now', 'Healthy... for now']
output_hr = {'B/T': output_messages, 'time': [0, 5, 10], 'HeartRate': [20, 60, 60]}
assert (bm_b.data['B/T'] == output_hr['B/T']).all()
def test_detect_rhythm_brady2():
""".. function:: test_detect_rhythm_brady2()
Test bradydetector when threshold is set to 100.
"""
bm_b2 = ECGMeasure(file_bool=True)
bm_b2.data = get_test_hr1()
bm_b2.change_brady_threshold(brady_threshold=100)
bm_b2.detect_rhythm()
output_messages = ['Bradycardia Detected', 'Bradycardia Detected', 'Bradycardia Detected']
output_hr = {'B/T': output_messages, 'time': [0, 5, 10], 'HeartRate': [60, 60, 60]}
assert (bm_b2.data['B/T'] == output_hr['B/T']).all()
def test_detect_rhythm_brady3():
""".. function:: test_detect_rhythm_brady3()
Test bradydetector when threshold is set to 0.
"""
bm_b3 = ECGMeasure(file_bool=True)
bm_b3.data = get_test_hr1()
bm_b3.change_brady_threshold(brady_threshold=0)
bm_b3.detect_rhythm()
output_messages = ['Healthy... for now', 'Healthy... for now', 'Healthy... for now']
output_hr = {'B/T': output_messages, 'time': [0, 5, 10], 'HeartRate': [60, 60, 60]}
assert (bm_b3.data['B/T'] == output_hr['B/T']).all()
def test_detect_rhythm_tachy():
""".. function:: test_detect_rhythm_tachy()
Test output of tachydetector when threshold is set to 140.
"""
bm_t = ECGMeasure(file_bool=True)
bm_t.data = get_test_hr1(c=200)
bm_t.detect_rhythm()
output_messages = ['Healthy... for now', 'Healthy... for now', 'Tachycardia Detected']
output_hr = {'B/T': output_messages, 'time': [0, 5, 10], 'HeartRate': [60, 60, 200]}
assert (bm_t.data['B/T'] == output_hr['B/T']).all()
def test_detect_rhythm_tachy2():
""".. function:: test_detect_rhythm_tachy2()
Test tachydetector when threshold is set to 1000.
"""
bm_t2 = ECGMeasure(file_bool=True)
bm_t2.data = get_test_hr1()
bm_t2.change_tachy_threshold(tachy_threshold=1000)
bm_t2.detect_rhythm()
output_messages = ['Healthy... for now', 'Healthy... for now', 'Healthy... for now']
output_hr = {'B/T': output_messages, 'time': [0, 5, 10], 'HeartRate': [60, 60, 60]}
assert (bm_t2.data['B/T'] == output_hr['B/T']).all()
def test_detect_rhythm_tachy3():
""".. function:: test_detect_rhythm_tachy3()
Test tachydetector when threshold is set to 0.
"""
bm_t3 = ECGMeasure(file_bool=True)
bm_t3.data = get_test_hr1()
bm_t3.change_tachy_threshold(tachy_threshold=0)
bm_t3.detect_rhythm()
output_messages = ['Tachycardia Detected', 'Tachycardia Detected', 'Tachycardia Detected']
output_hr = {'B/T': output_messages, 'time': [0, 5, 10], 'HeartRate': [60, 60, 60]}
assert (bm_t3.data['B/T'] == output_hr['B/T']).all()
def test_detect_rhythm_bradytachy():
""".. function:: test_detect_rhythm_bradytachy()
Test output of tachy/bradydetector when brady threshold is set to 50 and tachy to 100.
"""
bm_bt = ECGMeasure(file_bool=True)
bm_bt.data = get_test_hr1(a=20, c=200)
bm_bt.detect_rhythm()
output_messages = ['Bradycardia Detected', 'Healthy... for now', 'Tachycardia Detected']
output_hr = {'B/T': output_messages, 'time': [0, 5, 10], 'HeartRate': [20, 60, 200]}
assert (bm_bt.data['B/T'] == output_hr['B/T']).all()
| mit |
LumPenPacK/NetworkExtractionFromImages | osx_build/nefi2_osx_amd64_xcode_2015/site-packages/networkx/drawing/nx_pylab.py | 10 | 30226 | """
**********
Matplotlib
**********
Draw networks with matplotlib.
See Also
--------
matplotlib: http://matplotlib.org/
pygraphviz: http://pygraphviz.github.io/
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2004-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell',
'draw_graphviz']
def draw(G, pos=None, ax=None, hold=None, **kwds):
"""Draw the graph G with Matplotlib.
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
hold : bool, optional
Set the Matplotlib hold state. If True subsequent draw
commands will be added to the current axes.
kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
With pyplot use
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
cf = plt.gcf()
else:
cf = ax.get_figure()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax = cf.add_axes((0, 0, 1, 1))
else:
ax = cf.gca()
if 'with_labels' not in kwds:
kwds['with_labels'] = 'labels' in kwds
b = plt.ishold()
# allow callers to override the hold state by passing hold=True|False
h = kwds.pop('hold', None)
if h is not None:
plt.hold(h)
try:
draw_networkx(G, pos=pos, ax=ax, **kwds)
ax.set_axis_off()
plt.draw_if_interactive()
except:
plt.hold(b)
raise
plt.hold(b)
return
def draw_networkx(G, pos=None, arrows=True, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node and edge transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (default='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import matplotlib.pyplot as plt
>>> limits=plt.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos = nx.drawing.spring_layout(G) # default to spring layout
node_collection = draw_networkx_nodes(G, pos, **kwds)
edge_collection = draw_networkx_edges(G, pos, arrows=arrows, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
plt.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label=None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Returns
-------
matplotlib.collections.PathCollection
`PathCollection` of the nodes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if nodelist is None:
nodelist = G.nodes()
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
try:
xy = numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
node_collection = ax.scatter(xy[:, 0], xy[:, 1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=1.0,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float, or array of floats
Line width of edges (default=1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Returns
-------
matplotlib.collection.LineCollection
`LineCollection` of the edges
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter, Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edgelist is None:
edgelist = G.edges()
if not edgelist or len(edgelist) == 0: # no edges!
return None
# set edge positions
edge_pos = numpy.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color) == 1:
edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
arrow_collection = None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos = []
p = 1.0-0.25 # make head segment 25 percent of edge length
for src, dst in edge_pos:
x1, y1 = src
x2, y2 = dst
dx = x2-x1 # x offset
dy = y2-y1 # y offset
d = numpy.sqrt(float(dx**2 + dy**2)) # length of edge
if d == 0: # source and target at same position
continue
if dx == 0: # vertical edge
xa = x2
ya = dy*p+y1
if dy == 0: # horizontal edge
ya = y2
xa = dx*p+x1
else:
theta = numpy.arctan2(dy, dx)
xa = p*d*numpy.cos(theta)+x1
ya = p*d*numpy.sin(theta)+y1
a_pos.append(((xa, ya), (x2, y2)))
arrow_collection = LineCollection(a_pos,
colors=arrow_colors,
linewidths=[4*ww for ww in lw],
antialiaseds=(1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:, :, 0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:, :, 0]))
miny = numpy.amin(numpy.ravel(edge_pos[:, :, 1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:, :, 1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim(corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Returns
-------
dict
`dict` of labels keyed on the nodes
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if labels is None:
labels = dict((n, n) for n in G.nodes())
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = pos[n]
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True,
)
text_items[n] = t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Returns
-------
dict
`dict` of labels keyed on the edges
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.github.io/documentation/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edge_labels is None:
labels = dict(((u, v), d) for u, v, d in G.edges(data=True))
else:
labels = edge_labels
text_items = {}
for (n1, n2), label in labels.items():
(x1, y1) = pos[n1]
(x2, y2) = pos[n2]
(x, y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle = numpy.arctan2(y2-y1, x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle -= 180
if angle < - 90:
angle += 180
# transform data coordinate angle to screen coordinate angle
xy = numpy.array((x, y))
trans_angle = ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1, 2)))[0]
else:
trans_angle = 0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform=ax.transData,
bbox=bbox,
zorder=1,
clip_on=True,
)
text_items[(n1, n2)] = t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, circular_layout(G), **kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, random_layout(G), **kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spectral_layout(G), **kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spring_layout(G), **kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
nlist = kwargs.get('nlist', None)
if nlist is not None:
del(kwargs['nlist'])
draw(G, shell_layout(G, nlist=nlist), **kwargs)
def draw_graphviz(G, prog="neato", **kwargs):
"""Draw networkx graph with graphviz layout.
Parameters
----------
G : graph
A networkx graph
prog : string, optional
Name of Graphviz layout program
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
"""
pos = nx.drawing.graphviz_layout(G, prog)
draw(G, pos, **kwargs)
def draw_nx(G, pos, **kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G, pos, **kwds)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS', warn=False)
import matplotlib.pyplot as plt
except:
raise SkipTest("matplotlib not available")
| bsd-2-clause |
adammenges/statsmodels | statsmodels/tools/tools.py | 25 | 18536 | '''
Utility functions models code
'''
from statsmodels.compat.python import reduce, lzip, lmap, asstr2, range
import numpy as np
import numpy.lib.recfunctions as nprf
import numpy.linalg as L
from scipy.linalg import svdvals
from statsmodels.datasets import webuse
from statsmodels.tools.data import _is_using_pandas
from statsmodels.compat.numpy import np_matrix_rank
from pandas import DataFrame
def _make_dictnames(tmp_arr, offset=0):
"""
Helper function to create a dictionary mapping a column number
to the name in tmp_arr.
"""
col_map = {}
for i, col_name in enumerate(tmp_arr):
col_map.update({i+offset : col_name})
return col_map
def drop_missing(Y, X=None, axis=1):
"""
Returns views on the arrays Y and X where missing observations are dropped.
Y : array-like
X : array-like, optional
axis : int
Axis along which to look for missing observations. Default is 1, ie.,
observations in rows.
Returns
-------
Y : array
All Y where the
X : array
Notes
-----
If either Y or X is 1d, it is reshaped to be 2d.
"""
Y = np.asarray(Y)
if Y.ndim == 1:
Y = Y[:, None]
if X is not None:
X = np.array(X)
if X.ndim == 1:
X = X[:, None]
keepidx = np.logical_and(~np.isnan(Y).any(axis),
~np.isnan(X).any(axis))
return Y[keepidx], X[keepidx]
else:
keepidx = ~np.isnan(Y).any(axis)
return Y[keepidx]
# TODO: needs to better preserve dtype and be more flexible
# ie., if you still have a string variable in your array you don't
# want to cast it to float
# TODO: add name validator (ie., bad names for datasets.grunfeld)
def categorical(data, col=None, dictnames=False, drop=False, ):
'''
Returns a dummy matrix given an array of categorical variables.
Parameters
----------
data : array
A structured array, recarray, or array. This can be either
a 1d vector of the categorical variable or a 2d array with
the column specifying the categorical variable specified by the col
argument.
col : 'string', int, or None
If data is a structured array or a recarray, `col` can be a string
that is the name of the column that contains the variable. For all
arrays `col` can be an int that is the (zero-based) column index
number. `col` can only be None for a 1d array. The default is None.
dictnames : bool, optional
If True, a dictionary mapping the column number to the categorical
name is returned. Used to have information about plain arrays.
drop : bool
Whether or not keep the categorical variable in the returned matrix.
Returns
--------
dummy_matrix, [dictnames, optional]
A matrix of dummy (indicator/binary) float variables for the
categorical data. If dictnames is True, then the dictionary
is returned as well.
Notes
-----
This returns a dummy variable for EVERY distinct variable. If a
a structured or recarray is provided, the names for the new variable is the
old variable name - underscore - category name. So if the a variable
'vote' had answers as 'yes' or 'no' then the returned array would have to
new variables-- 'vote_yes' and 'vote_no'. There is currently
no name checking.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
Univariate examples
>>> import string
>>> string_var = [string.lowercase[0:5], string.lowercase[5:10], \
string.lowercase[10:15], string.lowercase[15:20], \
string.lowercase[20:25]]
>>> string_var *= 5
>>> string_var = np.asarray(sorted(string_var))
>>> design = sm.tools.categorical(string_var, drop=True)
Or for a numerical categorical variable
>>> instr = np.floor(np.arange(10,60, step=2)/10)
>>> design = sm.tools.categorical(instr, drop=True)
With a structured array
>>> num = np.random.randn(25,2)
>>> struct_ar = np.zeros((25,1), dtype=[('var1', 'f4'),('var2', 'f4'), \
('instrument','f4'),('str_instr','a5')])
>>> struct_ar['var1'] = num[:,0][:,None]
>>> struct_ar['var2'] = num[:,1][:,None]
>>> struct_ar['instrument'] = instr[:,None]
>>> struct_ar['str_instr'] = string_var[:,None]
>>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)
Or
>>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True)
'''
if isinstance(col, (list, tuple)):
try:
assert len(col) == 1
col = col[0]
except:
raise ValueError("Can only convert one column at a time")
# TODO: add a NameValidator function
# catch recarrays and structured arrays
if data.dtype.names or data.__class__ is np.recarray:
if not col and np.squeeze(data).ndim > 1:
raise IndexError("col is None and the input array is not 1d")
if isinstance(col, int):
col = data.dtype.names[col]
if col is None and data.dtype.names and len(data.dtype.names) == 1:
col = data.dtype.names[0]
tmp_arr = np.unique(data[col])
# if the cols are shape (#,) vs (#,1) need to add an axis and flip
_swap = True
if data[col].ndim == 1:
tmp_arr = tmp_arr[:, None]
_swap = False
tmp_dummy = (tmp_arr == data[col]).astype(float)
if _swap:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
if not tmp_arr.dtype.names: # how do we get to this code path?
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr)]
elif tmp_arr.dtype.names:
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr.tolist())]
# prepend the varname and underscore, if col is numeric attribute
# lookup is lost for recarrays...
if col is None:
try:
col = data.dtype.names[0]
except:
col = 'var'
# TODO: the above needs to be made robust because there could be many
# var_yes, var_no varaibles for instance.
tmp_arr = [col + '_' + item for item in tmp_arr]
# TODO: test this for rec and structured arrays!!!
if drop is True:
if len(data.dtype) <= 1:
if tmp_dummy.shape[0] < tmp_dummy.shape[1]:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
dt = lzip(tmp_arr, [tmp_dummy.dtype.str]*len(tmp_arr))
# preserve array type
return np.array(lmap(tuple, tmp_dummy.tolist()),
dtype=dt).view(type(data))
data = nprf.drop_fields(data, col, usemask=False,
asrecarray=type(data) is np.recarray)
data = nprf.append_fields(data, tmp_arr, data=tmp_dummy,
usemask=False,
asrecarray=type(data) is np.recarray)
return data
# handle ndarrays and catch array-like for an error
elif data.__class__ is np.ndarray or not isinstance(data, np.ndarray):
if not isinstance(data, np.ndarray):
raise NotImplementedError("Array-like objects are not supported")
if isinstance(col, int):
offset = data.shape[1] # need error catching here?
tmp_arr = np.unique(data[:, col])
tmp_dummy = (tmp_arr[:, np.newaxis] == data[:, col]).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
offset -= 1
data = np.delete(data, col, axis=1).astype(float)
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset)
return data, col_map
return data
elif col is None and np.squeeze(data).ndim == 1:
tmp_arr = np.unique(data)
tmp_dummy = (tmp_arr[:, None] == data).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
if dictnames is True:
col_map = _make_dictnames(tmp_arr)
return tmp_dummy, col_map
return tmp_dummy
else:
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset=1)
return data, col_map
return data
else:
raise IndexError("The index %s is not understood" % col)
def _series_add_constant(data, prepend, has_constant):
const = np.ones_like(data)
if data.var() == 0:
if has_constant == 'raise':
raise ValueError("data already contains a constant.")
elif has_constant == 'skip':
return data
elif has_constant == 'add':
pass
else:
raise ValueError("Option {0} not understood for "
"has_constant.".format(has_constant))
if not prepend:
columns = [data.name, 'const']
else:
columns = ['const', data.name]
results = DataFrame({data.name : data, 'const' : const}, columns=columns)
return results
def _dataframe_add_constant(data, prepend, has_constant):
# check for const.
if np.any(data.var(0) == 0):
if has_constant == 'raise':
raise ValueError("data already contains a constant.")
elif has_constant == 'skip':
return data
elif has_constant == 'add':
pass
else:
raise ValueError("Option {0} not understood for "
"has_constant.".format(has_constant))
if prepend:
data.insert(0, 'const', 1)
else:
data['const'] = 1
return data
def _pandas_add_constant(data, prepend, has_constant):
from pandas import Series
if isinstance(data, Series):
return _series_add_constant(data, prepend, has_constant)
else:
return _dataframe_add_constant(data, prepend, has_constant)
# TODO: add an axis argument to this for sysreg
def add_constant(data, prepend=True, has_constant='skip'):
"""
This appends a column of ones to an array if prepend==False.
Parameters
----------
data : array-like
`data` is the column-ordered design matrix
prepend : bool
True and the constant is prepended rather than appended.
has_constant : str {'raise', 'add', 'skip'}
Behavior if `data` already has a constant. The default will return
data without adding another constant. If 'raise', will raise an
error if a constant is present. Using 'add' will duplicate the
constant, if one is present. Has no effect for structured or
recarrays. There is no checking for a constant in this case.
Returns
-------
data : array
The original array with a constant (column of ones) as the first or
last column.
"""
if _is_using_pandas(data, None):
# work on a copy
return _pandas_add_constant(data.copy(), prepend, has_constant)
else:
data = np.asarray(data)
if not data.dtype.names:
var0 = data.var(0) == 0
if np.any(var0):
if has_constant == 'raise':
raise ValueError("data already contains a constant.")
elif has_constant == 'skip':
return data
elif has_constant == 'add':
pass
else:
raise ValueError("Option {0} not understood for "
"has_constant.".format(has_constant))
data = np.column_stack((data, np.ones((data.shape[0], 1))))
if prepend:
return np.roll(data, 1, 1)
else:
return_rec = data.__class__ is np.recarray
if prepend:
ones = np.ones((data.shape[0], 1), dtype=[('const', float)])
data = nprf.append_fields(ones, data.dtype.names,
[data[i] for i in data.dtype.names],
usemask=False, asrecarray=return_rec)
else:
data = nprf.append_fields(data, 'const', np.ones(data.shape[0]),
usemask=False, asrecarray=return_rec)
return data
def isestimable(C, D):
""" True if (Q, P) contrast `C` is estimable for (N, P) design `D`
From an Q x P contrast matrix `C` and an N x P design matrix `D`, checks if
the contrast `C` is estimable by looking at the rank of ``vstack([C,D])``
and verifying it is the same as the rank of `D`.
Parameters
----------
C : (Q, P) array-like
contrast matrix. If `C` has is 1 dimensional assume shape (1, P)
D: (N, P) array-like
design matrix
Returns
-------
tf : bool
True if the contrast `C` is estimable on design `D`
Examples
--------
>>> D = np.array([[1, 1, 1, 0, 0, 0],
... [0, 0, 0, 1, 1, 1],
... [1, 1, 1, 1, 1, 1]]).T
>>> isestimable([1, 0, 0], D)
False
>>> isestimable([1, -1, 0], D)
True
"""
C = np.asarray(C)
D = np.asarray(D)
if C.ndim == 1:
C = C[None, :]
if C.shape[1] != D.shape[1]:
raise ValueError('Contrast should have %d columns' % D.shape[1])
new = np.vstack([C, D])
if np_matrix_rank(new) != np_matrix_rank(D):
return False
return True
def pinv_extended(X, rcond=1e-15):
"""
Return the pinv of an array X as well as the singular values
used in computation.
Code adapted from numpy.
"""
X = np.asarray(X)
X = X.conjugate()
u, s, vt = np.linalg.svd(X, 0)
s_orig = np.copy(s)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond * np.maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = np.dot(np.transpose(vt), np.multiply(s[:, np.core.newaxis],
np.transpose(u)))
return res, s_orig
def recipr(X):
"""
Return the reciprocal of an array, setting all entries less than or
equal to 0 to 0. Therefore, it presumes that X should be positive in
general.
"""
x = np.maximum(np.asarray(X).astype(np.float64), 0)
return np.greater(x, 0.) / (x + np.less_equal(x, 0.))
def recipr0(X):
"""
Return the reciprocal of an array, setting all entries equal to 0
as 0. It does not assume that X should be positive in
general.
"""
test = np.equal(np.asarray(X), 0)
return np.where(test, 0, 1. / X)
def clean0(matrix):
"""
Erase columns of zeros: can save some time in pseudoinverse.
"""
colsum = np.add.reduce(matrix**2, 0)
val = [matrix[:, i] for i in np.flatnonzero(colsum)]
return np.array(np.transpose(val))
def rank(X, cond=1.0e-12):
"""
Return the rank of a matrix X based on its generalized inverse,
not the SVD.
"""
from warnings import warn
warn("rank is deprecated and will be removed in 0.7."
" Use np.linalg.matrix_rank instead.", FutureWarning)
X = np.asarray(X)
if len(X.shape) == 2:
D = svdvals(X)
return int(np.add.reduce(np.greater(D / D.max(),
cond).astype(np.int32)))
else:
return int(not np.alltrue(np.equal(X, 0.)))
def fullrank(X, r=None):
"""
Return a matrix whose column span is the same as X.
If the rank of X is known it can be specified as r -- no check
is made to ensure that this really is the rank of X.
"""
if r is None:
r = np_matrix_rank(X)
V, D, U = L.svd(X, full_matrices=0)
order = np.argsort(D)
order = order[::-1]
value = []
for i in range(r):
value.append(V[:, order[i]])
return np.asarray(np.transpose(value)).astype(np.float64)
def unsqueeze(data, axis, oldshape):
"""
Unsqueeze a collapsed array
>>> from numpy import mean
>>> from numpy.random import standard_normal
>>> x = standard_normal((3,4,5))
>>> m = mean(x, axis=1)
>>> m.shape
(3, 5)
>>> m = unsqueeze(m, 1, x.shape)
>>> m.shape
(3, 1, 5)
>>>
"""
newshape = list(oldshape)
newshape[axis] = 1
return data.reshape(newshape)
def chain_dot(*arrs):
"""
Returns the dot product of the given matrices.
Parameters
----------
arrs: argument list of ndarray
Returns
-------
Dot product of all arguments.
Examples
--------
>>> import numpy as np
>>> from statsmodels.tools import chain_dot
>>> A = np.arange(1,13).reshape(3,4)
>>> B = np.arange(3,15).reshape(4,3)
>>> C = np.arange(5,8).reshape(3,1)
>>> chain_dot(A,B,C)
array([[1820],
[4300],
[6780]])
"""
return reduce(lambda x, y: np.dot(y, x), arrs[::-1])
def nan_dot(A, B):
"""
Returns np.dot(left_matrix, right_matrix) with the convention that
nan * 0 = 0 and nan * x = nan if x != 0.
Parameters
----------
A, B : np.ndarrays
"""
# Find out who should be nan due to nan * nonzero
should_be_nan_1 = np.dot(np.isnan(A), (B != 0))
should_be_nan_2 = np.dot((A != 0), np.isnan(B))
should_be_nan = should_be_nan_1 + should_be_nan_2
# Multiply after setting all nan to 0
# This is what happens if there were no nan * nonzero conflicts
C = np.dot(np.nan_to_num(A), np.nan_to_num(B))
C[should_be_nan] = np.nan
return C
def maybe_unwrap_results(results):
"""
Gets raw results back from wrapped results.
Can be used in plotting functions or other post-estimation type
routines.
"""
return getattr(results, '_results', results)
class Bunch(dict):
"""
Returns a dict-like object with keys accessible via attribute lookup.
"""
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
webuse = np.deprecate(webuse,
old_name='statsmodels.tools.tools.webuse',
new_name='statsmodels.datasets.webuse',
message='webuse will be removed from the tools '
'namespace in the 0.7.0 release. Please use the'
' new import.')
| bsd-3-clause |
mjudsp/Tsallis | sklearn/pipeline.py | 14 | 21389 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
from warnings import warn
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s"
% (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies
``inverse_transform`` in inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
warn("From version 0.19, a 1d X will not be reshaped in"
" pipeline.inverse_transform any more.", FutureWarning)
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all
steps of the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None)) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB(priors=None))])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, multiply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, multiply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca',
PCA(copy=True, iterated_power=4,
n_components=None, random_state=None,
svd_solver='auto', tol=0.0, whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
radiasoft/opal | tests/fields/electrostatic/dfes.py | 1 | 1717 | __author__ = 'swebb'
try:
from opal.fields import discrete_fourier_electrostatic as dfes
from matplotlib import pyplot as plt
import numpy as np
dimensions = 2
dk = 0.1
nmodes = 2
pd = {}
pd['dimensions'] = dimensions
pd['delta k'] = np.array([dk]*dimensions)
pd['n_modes'] = np.array([nmodes]*dimensions)
es_solver = dfes.discrete_fourier_electrostatic(pd)
mykvectors = es_solver.get_kvectors()
nkvecs = np.shape(mykvectors)[0]
rho = np.zeros(nkvecs)
ksquared = np.zeros(nkvecs)
for idx in range(0, nkvecs):
ksquared[idx] = np.dot(mykvectors[idx], mykvectors[idx])
rho[idx] = 1.*idx
# This is an unphysical rho, but is a simple test
# that phi computes the right arithmetic
phi_expected = np.array([ -0.,
-12.73239545,
-25.46479089,
-21.22065908,
-50.92958179,
-318.30988618,
-381.97186342,
-89.12676813,
-101.85916358,
-572.95779513,
-636.61977237,
-140.05634992,
-84.88263632,
-165.52114082,
-178.25353626,
-106.10329539])
phi = es_solver.compute_fields(rho)
error = abs(phi - phi_expected)
for idx in range(0,nkvecs):
if error[idx] > 1.e-8:
failed = True
except:
print 'dfes failed tests'
raise | apache-2.0 |
keflavich/APEX_CMZ_H2CO | analysis/constrain_parameters.py | 1 | 21314 | raise "Use https://github.com/keflavich/h2co_modeling/blob/master/h2co_modeling/constrain_parameters.py instead"
"""
Functions for fitting temperature (and density and column) from the line ratio
plus whatever other constraints are available
"""
import inspect
import time
import collections
import warnings
import os
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy import stats
from astropy import units as u
from astropy import log
import pylab as pl
import matplotlib
from astropy.io import fits
from astropy.utils.console import ProgressBar
from h2co_modeling.paraH2COmodel import generic_paraH2COmodel
short_mapping = {'dens': 'density',
'col': 'column',
'tem': 'temperature'}
chi2_mapping = {'X': 'Abundance',
'ff1': "Filling Factor $3_{0,3}-2_{0,2}$",
'ff2': "Filling Factor $3_{2,1}-2_{2,0}$",
'r321303': "Ratio $3_{0,3}-2_{0,2}$ / $3_{2,1}-2_{2,0}$",
'dens': "Density $n(H_2)$ cm$^{-3}$",
'h2': "Column $N(H_2)$ cm$^{-2}$",
}
class paraH2COmodel(generic_paraH2COmodel):
def __init__(self, tbackground=2.73, gridsize=[250.,101.,100.]):
t0 = time.time()
from pyspeckit_fitting import (texgrid303, taugrid303, texgrid321, taugrid321,
texgrid322, taugrid322, hdr)
# The grid was computed with a linewidth (or gradient) 5 km/s/pc
self.grid_linewidth = 5.0
t1 = time.time()
log.debug("Loading grids took {0:0.1f} seconds".format(t1-t0))
self.texgrid303 = texgrid303
self.taugrid303 = taugrid303
self.texgrid321 = texgrid321
self.taugrid321 = taugrid321
self.texgrid322 = texgrid322
self.taugrid322 = taugrid322
self.hdr = hdr
self.Tbackground = tbackground
self.tline303a = ((1.0-np.exp(-np.array(self.taugrid303))) *
(self.texgrid303-self.Tbackground))
self.tline321a = ((1.0-np.exp(-np.array(self.taugrid321))) *
(self.texgrid321-self.Tbackground))
self.tline322a = ((1.0-np.exp(-np.array(self.taugrid322))) *
(self.texgrid322-self.Tbackground))
zinds,yinds,xinds = np.indices(self.tline303a.shape)
upsample_factor = np.array([gridsize[0]/self.tline303a.shape[0], # temperature
gridsize[1]/self.tline303a.shape[1], # density
gridsize[2]/self.tline303a.shape[2]], # column
dtype='float')
uzinds,uyinds,uxinds = upsinds = np.indices([int(x*us)
for x,us in zip(self.tline303a.shape,
upsample_factor)],
dtype='float')
self.tline303 = map_coordinates(self.tline303a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline321 = map_coordinates(self.tline321a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline322 = map_coordinates(self.tline322a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline = {303: self.tline303,
321: self.tline321,
322: self.tline322}
assert self.hdr['CTYPE2'].strip() == 'LOG-DENS'
assert self.hdr['CTYPE1'].strip() == 'LOG-COLU'
self.columnarr = ((uxinds + self.hdr['CRPIX1']-1)*self.hdr['CDELT1'] /
float(upsample_factor[2])+self.hdr['CRVAL1']) # log column
self.densityarr = ((uyinds + self.hdr['CRPIX2']-1)*self.hdr['CDELT2'] /
float(upsample_factor[1])+self.hdr['CRVAL2']) # log density
self.temparr = ((uzinds + self.hdr['CRPIX3']-1)*self.hdr['CDELT3'] /
float(upsample_factor[0])+self.hdr['CRVAL3']) # lin temperature
self.drange = [self.densityarr.min(), self.densityarr.max()]
self.crange = [self.columnarr.min(), self.columnarr.max()]
self.trange = [self.temparr.min(), self.temparr.max()]
self.darr = self.densityarr[0,:,0]
self.carr = self.columnarr[0,0,:]
self.tarr = self.temparr[:,0,0]
self.axes = {'dens': self.darr,
'col': self.carr,
'tem': self.tarr}
self.labels = {'dens': 'Density $n(\mathrm{H}_2)$ [log cm$^{-3}$]',
'col': 'p-H$_2$CO\n[log cm$^{-2}$/(km s$^{-1}$ pc)]',
'tem': 'Temperature [K]'}
# While the individual lines are subject to filling factor uncertainties, the
# ratio is not.
self.modelratio1 = self.tline321/self.tline303
self.modelratio2 = self.tline322/self.tline321
self.model_logabundance = np.log10(10**self.columnarr / u.pc.to(u.cm) /
10**self.densityarr)
t2 = time.time()
log.debug("Grid initialization took {0:0.1f} seconds total,"
" {1:0.1f} since loading grids.".format(t2-t0,t2-t1))
def list_parameters():
return ['taline303', 'etaline303', 'taline321', 'etaline321',
'taline322', 'etaline322', 'logabundance', 'elogabundance',
'logh2column', 'elogh2column', 'ratio321303', 'eratio321303',
'ratio321322', 'eratio321322', 'linewidth']
def set_constraints_fromrow(self, row, **kwargs):
mapping = {'e321':'etaline321',
'Smean321':'taline321',
'Smean303':'taline303',
'er321303':'eratio321303',
'eratio321303':'eratio321303',
'e303':'etaline303',
'r321303':'ratio321303',
'ratio321303':'ratio321303',
'r321303':'ratio321303',
'er321303':'eratio321303',
'logabundance':'logabundance',
'elogabundance':'elogabundance',
'logh2column':'logh2column',
'elogh2column':'elogh2column',
'dustmindens':'linmindens',
'v_rms':'linewidth',
}
pars = {mapping[k]: row[k] for k in row.colnames if k in mapping}
pars.update(**kwargs)
self.set_constraints(**pars)
def set_constraints(self,
taline303=None, etaline303=None,
taline321=None, etaline321=None,
taline322=None, etaline322=None,
logabundance=None, elogabundance=None,
logh2column=None, elogh2column=None,
ratio321303=None, eratio321303=None,
ratio321322=None, eratio321322=None,
linmindens=None,
mindens=None, emindens=0.2,
linewidth=None):
argspec=inspect.getargvalues(inspect.currentframe())
for arg in argspec.args:
if argspec.locals[arg] is not None:
setattr(self, arg, argspec.locals[arg])
self.chi2_X = (self.chi2_abundance(logabundance, elogabundance)
if not any(arg is None for arg in (logabundance,
elogabundance))
else 0)
self.chi2_h2 = (self.chi2_column(logh2column, elogh2column,
logabundance, linewidth)
if not
any(arg is None for arg in (logabundance, logh2column,
elogh2column, linewidth))
else 0)
self.chi2_ff1 = (self.chi2_fillingfactor(taline303, etaline303, 303)
if not any(arg is None for arg in (taline303,
etaline303))
else 0)
self.chi2_ff2 = (self.chi2_fillingfactor(taline321, etaline321, 321)
if not any(arg is None for arg in (taline321,
etaline321))
else 0)
self.chi2_r321303 = (self.grid_getmatch_321to303(ratio321303,
eratio321303)
if not any(arg is None for arg in (ratio321303,
eratio321303))
else 0)
if np.all(~np.isfinite(self.chi2_r321303)):
self.chi2_r321303 = 0
self.chi2_r321322 = (self.grid_getmatch_321to303(ratio321322,
eratio321322)
if not any(arg is None for arg in (ratio321322,
eratio321322))
else 0)
if np.all(~np.isfinite(self.chi2_r321322)):
self.chi2_r321322 = 0
if linmindens is not None:
if mindens is not None:
raise ValueError("Both linmindens and logmindens were set.")
mindens = np.log10(linmindens)
if mindens is not None:
self.chi2_dens = (((self.densityarr - mindens)/emindens)**2
* (self.densityarr < (mindens)))
else:
self.chi2_dens = 0
self.compute_chi2_fromcomponents()
def compute_chi2_fromcomponents(self):
"""
Compute the total chi2 from the individual chi2 components
"""
self._parconstraints = None # not determined until get_parconstraints run
self.chi2 = (self.chi2_X + self.chi2_h2 + self.chi2_ff1 + self.chi2_ff2
+ self.chi2_r321322 + self.chi2_r321303 + self.chi2_dens)
def parplot(self, par1='col', par2='dens', nlevs=5, levels=None,
colors=[(0.5,0,0), (0.75,0,0), (1.0,0,0), (1.0,0.25,0), (0.75,0.5,0)],
colorsf=[0.0, 0.33, 0.66, 1.0, 'w']):
cdict = {x: [(0.0, 0.0, 0.0),
(1.0, 1.0, 1.0)]
for x in ('red','green','blue')}
cdict['blue'] = [(0.0, 1., 1.), (1.0, 1.0, 1.0)]
cm = matplotlib.colors.LinearSegmentedColormap('mycm', cdict)
colorsf = [cm(float(ii)) if isinstance(ii, (float,int))
else ii
for ii in colorsf]
xax = self.axes[par1]
yax = self.axes[par2]
xlabel = self.labels[par1]
ylabel = self.labels[par2]
amapping = {('col','dens'): 0,
('dens','tem'): 2,
('col','tem'): 1}
if (par1,par2) in amapping:
axis = amapping[(par1,par2)]
swaps = (0,0)
elif (par2,par1) in amapping:
axis = amapping[(par2,par1)]
swaps = (0,1)
if levels is None:
levels = ([0]+[(stats.norm.cdf(ii)-stats.norm.cdf(-ii))
for ii in range(1,nlevs)]+[1])
xmaxlike = self.parconstraints['{0}_chi2'.format(short_mapping[par1])]
ymaxlike = self.parconstraints['{0}_chi2'.format(short_mapping[par2])]
xexpect = self.parconstraints['expected_{0}'.format(short_mapping[par1])]
yexpect = self.parconstraints['expected_{0}'.format(short_mapping[par2])]
fig = pl.gcf()
fig.clf()
ax1 = pl.subplot(2,2,1)
if 'chi2_r321303' in self.individual_likelihoods:
like = (self.individual_likelihoods['chi2_r321303'])
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
if self.chi2_r321322 is not 0:
like = cdf_of_like(self.individual_likelihoods['chi2_r321322'])
pl.contour(xax, yax, like.sum(axis=axis).swapaxes(*swaps),
levels=levels,
cmap=pl.cm.bone)
pl.title("Ratio $3_{0,3}-2_{0,2}/3_{2,1}-2_{2,0}$")
ax4 = pl.subplot(2,2,2)
if hasattr(self.chi2_X, 'size'):
like = self.individual_likelihoods['chi2_X']
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
pl.title("log(p-H$_2$CO/H$_2$) "
"$= {0:0.1f}\pm{1:0.1f}$".format(self.logabundance,
self.elogabundance))
ax3 = pl.subplot(2,2,3)
if hasattr(self.chi2_h2, 'size'):
like = (self.individual_likelihoods['chi2_h2'])
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
pl.title("Total log$(N(\\mathrm{{H}}_2))$ ")
# "= {0:0.1f}\pm{1:0.1f}$".format(self.logh2column,
# self.elogh2column))
ax5 = pl.subplot(2,2,4)
if hasattr(self.chi2_ff1, 'size'):
cdict = {x: [(0.0, 0.5, 0.5),
(1.0, 0.0, 0.0)]
for x in ('red','green','blue')}
cdict['green'] = [(0, 0.5, 0.5), (1,1,1)]
cdict['red'] = [(0, 0.5, 0.5), (1,0.7,0.7)]
cdict['blue'] = [(0, 0.0, 0.0), (1,0,0)]
#cdict['alpha'] = [(0.0, 0.0, 0.0), (1.0, 0.3, 0.3)]
darker = matplotlib.colors.LinearSegmentedColormap('darker', cdict)
like = (self.individual_likelihoods['chi2_ff1'])
plim = cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps)
pl.contour(xax, yax, plim, levels=levels,
cmap=darker, zorder=5)
if hasattr(self.chi2_dens, 'size'):
like = (self.individual_likelihoods['chi2_dens'])
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
#if hasattr(self, 'taline303'):
# ff1_mask = (self.tline303 < 10*self.taline303)
# pl.contour(xax, yax, ff1_mask.max(axis=axis).swapaxes(*swaps),
# levels=[0.5], colors='k')
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
#pl.contour(xax, yax, (tline303 < 100*par1).max(axis=axis).swapaxes(*swaps), levels=[0.5], colors='k')
#pl.contour(xax, yax, (tline321 < 10*par2).max(axis=axis).swapaxes(*swaps), levels=[0.5], colors='k', linestyles='--')
#pl.contour(xax, yax, (tline321 < 100*par2).max(axis=axis).swapaxes(*swaps), levels=[0.5], colors='k', linestyles='--')
#pl.title("Line Brightness + $ff\leq1$")
pl.title("Minimum Density & $ff$")
fig.text(0.05, 0.5, ylabel, horizontalalignment='center',
verticalalignment='center',
rotation='vertical', transform=fig.transFigure)
fig.text(0.5, 0.02, xlabel, horizontalalignment='center', transform=fig.transFigure)
if par1 == 'col':
for ss in range(1,5):
ax = pl.subplot(2,2,ss)
ax.xaxis.set_ticks(np.arange(self.carr.min(), self.carr.max()))
pl.subplots_adjust(wspace=0.25, hspace=0.45)
def parplot1d(self, par='col', levels=None, clf=True,
legend=True, legendfontsize=14):
xax = self.axes[par]
xlabel = self.labels[par]
amapping = {'col':(2,(0,1)),
'dens':(1,(0,2)),
'tem':(0,(1,2))}
axis,axes = amapping[par]
xmaxlike = self.parconstraints['{0}_chi2'.format(short_mapping[par])]
xexpect = self.parconstraints['expected_{0}'.format(short_mapping[par])]
like = self.likelihood.sum(axis=axes)
like /= like.sum()
inds_cdf = np.argsort(like)
cdf = like[inds_cdf]
fig = pl.gcf()
if clf:
fig.clf()
ax = fig.gca()
ax.plot(xax, like, 'k-', label='Posterior')
for key in self.individual_likelihoods:
if key in ('chi2','_chi2'):
continue # already done
ilike = self.individual_likelihoods[key].sum(axis=axes)
ilike /= ilike.sum()
ax.plot(xax, ilike, label=chi2_mapping[key.replace("chi2_","")])
ax.vlines((xmaxlike,), 0, like.max(), linestyle='--', color='r',
label='Maximum Likelihood')
ax.vlines((xexpect,), 0, like.max(), linestyle='--', color='b',
label='E[{0}]'.format(xlabel))
xexpect_v2 = (like*xax).sum()/like.sum()
ax.vlines((xexpect_v2,), 0, like.max(), linestyle='--', color='c',
zorder=-1)
print("par:{4} xmaxlike: {0}, xexpect: {1}, xexpect_v2: {2},"
"maxlike: {3}, diff:{5}"
.format(xmaxlike, xexpect, xexpect_v2, like.max(), par,
xexpect-xmaxlike))
if levels is not None:
if not isinstance(levels, collections.Iterable):
levels = [levels]
cdf_inds = np.argsort(like)
ppf = 1-like[cdf_inds].cumsum()
cutoff_likes = [like[cdf_inds[np.argmin(np.abs(ppf-lev))]]
for lev in levels]
for fillind,cutoff in enumerate(sorted(cutoff_likes)):
selection = like > cutoff
ax.fill_between(xax[selection], like[selection]*0,
like[selection], alpha=0.1, zorder=fillind-20)
if np.abs(like[selection].sum() - levels[0]) > 0.05:
# we want the sum of the likelihood to be right!
#import ipdb; ipdb.set_trace()
warnings.warn("Likelihood is not self-consistent.")
if legend:
ax.legend(loc='best', fontsize=legendfontsize)
ax.set_xlabel(xlabel)
ax.set_ylabel('$P(${0}$)$'.format(xlabel))
def parplot1d_all(self, legendfontsize=14, **kwargs):
fig = pl.gcf()
if not all(fig.get_size_inches() == [12,16]):
num = fig.number
pl.close(fig)
fig = pl.figure(num, figsize=(12,16))
for axindex,par in enumerate(('col','dens','tem')):
ax = fig.add_subplot(3,1,axindex+1)
self.parplot1d(par=par, clf=False, legend=False, **kwargs)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
if axindex == 1:
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),
fontsize=legendfontsize)
pl.subplots_adjust(hspace=0.45)
@property
def individual_likelihoods(self):
if hasattr(self, '_likelihoods') and self._likelihoods is not None:
return self._likelihoods
else:
self._likelihoods = {}
for key in self.__dict__:
if 'chi2' in key and getattr(self,key) is not 0:
self._likelihoods[key] = np.exp(-getattr(self,key)/2.)
self._likelihoods[key] /= self._likelihoods[key].sum()
return self._likelihoods
def cdf_of_like(like):
"""
There is probably an easier way to do this, BUT it works:
Turn a likelihood image into a CDF image
"""
like = like/like.sum()
order = np.argsort(like.flat)[::-1]
cdf = like.flat[order].cumsum()[np.argsort(order)].reshape(like.shape)
cdf[like == like.max()] = 0
return cdf
def ppf_of_like(like):
return 1-cdf_of_like(like)
| bsd-3-clause |
procoder317/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
vermouthmjl/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
oliverhuangchao/thunder | thunder/rdds/images.py | 4 | 29180 | from numpy import ndarray, arange, amax, amin, greater, size, asarray
from thunder.rdds.data import Data
from thunder.rdds.keys import Dimensions
class Images(Data):
"""
Distributed collection of images or volumes.
Backed by an RDD of key-value pairs, where the key
is an identifier and the value is a two or three-dimensional array.
"""
_metadata = Data._metadata + ['_dims']
def __init__(self, rdd, dims=None, nrecords=None, dtype=None):
super(Images, self).__init__(rdd, nrecords=nrecords, dtype=dtype)
if dims and not isinstance(dims, Dimensions):
try:
dims = Dimensions.fromTuple(dims)
except:
raise TypeError("Images dims parameter must be castable to Dimensions object, got: %s" % str(dims))
self._dims = dims
@property
def dims(self):
if self._dims is None:
self.populateParamsFromFirstRecord()
return self._dims
@property
def dtype(self):
# override just calls superclass; here for explicitness
return super(Images, self).dtype
@property
def _constructor(self):
return Images
def populateParamsFromFirstRecord(self):
record = super(Images, self).populateParamsFromFirstRecord()
self._dims = Dimensions.fromTuple(record[1].shape)
return record
@staticmethod
def _check_type(record):
if not isinstance(record[0], tuple):
raise Exception('Keys must be tuples')
if not isinstance(record[1], ndarray):
raise Exception('Values must be ndarrays')
def toBlocks(self, size="150M", units="pixels", padding=0):
"""
Convert to Blocks, each representing a subdivision of the larger Images data.
Parameters
----------
size : string memory size, tuple of splits per dimension, or instance of BlockingStrategy
String interpreted as memory size (e.g. "64M"). Tuple of ints interpreted as
"pixels per dimension" (default) or "splits per dimension", depending on units.
Instance of BlockingStrategy can be passed directly.
units : string, either "pixels" or "splits", default = "pixels"
What units to use for a tuple size.
padding : non-negative integer or tuple of int, optional, default = 0
Will generate blocks with extra `padding` voxels along each dimension.
Padded voxels will overlap with those in neighboring blocks, but will not be included
when converting blocks to Series or Images.
Returns
-------
Blocks instance
"""
from thunder.rdds.imgblocks.strategy import BlockingStrategy, SimpleBlockingStrategy, PaddedBlockingStrategy
stratClass = SimpleBlockingStrategy if not padding else PaddedBlockingStrategy
if isinstance(size, BlockingStrategy):
blockingStrategy = size
elif isinstance(size, basestring) or isinstance(size, int):
# make blocks close to the desired size
blockingStrategy = stratClass.generateFromBlockSize(self, size, padding=padding)
else:
# assume it is a tuple of positive int specifying splits
blockingStrategy = stratClass(size, units=units, padding=padding)
blockingStrategy.setSource(self)
avgSize = blockingStrategy.calcAverageBlockSize()
if avgSize >= BlockingStrategy.DEFAULT_MAX_BLOCK_SIZE:
# TODO: use logging module here rather than print
print "Thunder WARNING: average block size of %g bytes exceeds suggested max size of %g bytes" % \
(avgSize, BlockingStrategy.DEFAULT_MAX_BLOCK_SIZE)
returntype = blockingStrategy.getBlocksClass()
vals = self.rdd.flatMap(blockingStrategy.blockingFunction, preservesPartitioning=False)
# fastest changing dimension (e.g. x) is first, so must sort reversed keys to get desired ordering
# sort must come after group, b/c group will mess with ordering.
groupedvals = vals.groupBy(lambda (k, _): k.spatialKey).sortBy(lambda (k, _): tuple(k[::-1]))
# groupedvals is now rdd of (z, y, x spatial key, [(partitioning key, numpy array)...]
blockedvals = groupedvals.map(blockingStrategy.combiningFunction)
return returntype(blockedvals, dims=self.dims, nimages=self.nrecords, dtype=self.dtype)
def toTimeSeries(self, size="150M"):
"""
Converts this Images object to a TimeSeries object.
This method is equivalent to images.toBlocks(size).toSeries().toTimeSeries().
Parameters
----------
size: string memory size, optional, default = "150M"
String interpreted as memory size (e.g. "64M").
units: string, either "pixels" or "splits", default = "pixels"
What units to use for a tuple size.
Returns
-------
new TimeSeries object
See also
--------
Images.toBlocks
"""
return self.toBlocks(size).toSeries().toTimeSeries()
def toSeries(self, size="150M"):
"""
Converts this Images object to a Series object.
This method is equivalent to images.toBlocks(size).toSeries().
Parameters
----------
size: string memory size, optional, default = "150M"
String interpreted as memory size (e.g. "64M").
Returns
-------
new Series object
See also
--------
Images.toBlocks
"""
return self.toBlocks(size).toSeries()
def saveAsBinarySeries(self, outputDirPath, blockSizeSpec="150M", units="pixels", overwrite=False):
"""
Writes this Images object to disk as binary Series data.
This method is equivalent to images.toBlocks(blockSizeSpec).saveAsBinarySeries(outputdirname, overwrite)
Parameters
----------
blockSizeSpec: string memory size, tuple of positive int, or instance of BlockingStrategy
A string spec will be interpreted as a memory size string (e.g. "64M"). The resulting Series data files will
be generated by a SimpleBlockingStrategy to be close to the requested size.
A tuple of positive ints will be interpreted as either "pixels per dimension" (default) or "splits per
dimension", depending on the value of the units parameter. The length of the tuple must match the
dimensionality of this Images object. These units will be passed into a SimpleBlockingStrategy which will
be used to control the size of the individual files written to disk.
If an instance of BlockingStrategy is passed, it will be used to generate the Series data files.
outputDirPath : string path or URI to directory to be created
Output files will be written underneath outputDirPath. This directory must not yet exist
(unless overwrite is True), and must be no more than one level beneath an existing directory.
It will be created as a result of this call.
units: string, either "pixels" or "splits" (or unique prefix of each, such as "s"), default "pixels"
Specifies units to be used in interpreting a tuple passed as blockSizeSpec. If a string or a
BlockingStrategy instance is passed as blockSizeSpec, this parameter has no effect.
overwrite : bool
If true, outputdirname and all its contents will be deleted and recreated as part
of this call.
Returns
-------
no return value
"""
if not overwrite:
self._checkOverwrite(outputDirPath)
overwrite = True # prevent additional downstream checks for this path
self.toBlocks(blockSizeSpec, units=units).saveAsBinarySeries(outputDirPath, overwrite=overwrite)
def saveAsPngs(self, outputDirPath, cmap=None, vmin=None, vmax=None, filePrefix="export", overwrite=False,
collectToDriver=True):
"""
Write out basic png files for two-dimensional image data.
Files will be written into a newly-created directory given by outputdirname.
Parameters
----------
outputDirPath : string
Path to output directory to be created. Exception will be thrown if this directory already
exists, unless overwrite is True. Directory must be one level below an existing directory.
filePrefix : string
String to prepend to all filenames. Files will be named <fileprefix>-00000.png, <fileprefix>-00001.png, etc
overwrite : bool
If true, the directory given by outputdirname will first be deleted if it already exists.
collectToDriver : bool, default True
If true, images will be collect()'ed at the driver first before being written out, allowing
for use of a local filesystem at the expense of network overhead. If false, images will be written
in parallel by each executor, presumably to a distributed or networked filesystem.
"""
dims = self.dims
if not len(dims) == 2:
raise ValueError("Only two-dimensional images can be exported as .png files; image is %d-dimensional." %
len(dims))
from matplotlib.pyplot import imsave
from io import BytesIO
from thunder.rdds.fileio.writers import getParallelWriterForPath, getCollectedFileWriterForPath
from thunder.utils.aws import AWSCredentials
def toFilenameAndPngBuf(kv):
key, img = kv
fname = filePrefix+"-"+"%05d.png" % int(key)
bytebuf = BytesIO()
imsave(bytebuf, img, vmin, vmax, cmap=cmap, format="png")
return fname, bytebuf.getvalue()
bufRdd = self.rdd.map(toFilenameAndPngBuf)
awsCredentials = AWSCredentials.fromContext(self.rdd.ctx)
if collectToDriver:
writer = getCollectedFileWriterForPath(outputDirPath)(outputDirPath, overwrite=overwrite,
awsCredentialsOverride=awsCredentials)
writer.writeCollectedFiles(bufRdd.collect())
else:
writer = getParallelWriterForPath(outputDirPath)(outputDirPath, overwrite=overwrite,
awsCredentialsOverride=awsCredentials)
bufRdd.foreach(writer.writerFcn)
def saveAsBinaryImages(self, outputDirPath, prefix="image", overwrite=False):
"""
Write out images or volumes as flat binary files.
Files will be written into a newly-created directory given by outputdirname.
Parameters
----------
outputDirPath : string
Path to output directory to be created. Exception will be thrown if this directory already
exists, unless overwrite is True. Directory must be one level below an existing directory.
prefix : string
String to prepend to all filenames. Files will be named <fileprefix>-00000.bin, <fileprefix>-00001.bin, etc
overwrite : bool
If true, the directory given by outputdirname will first be deleted if it already exists.
"""
from thunder.rdds.fileio.writers import getParallelWriterForPath
from thunder.rdds.fileio.imagesloader import writeBinaryImagesConfig
from thunder.utils.aws import AWSCredentials
dimsTotal = list(asarray(self.dims.max)-asarray(self.dims.min)+1)
def toFilenameAndBinaryBuf(kv):
key, img = kv
fname = prefix+"-"+"%05d.bin" % int(key)
return fname, img.transpose().copy()
bufRdd = self.rdd.map(toFilenameAndBinaryBuf)
awsCredentials = AWSCredentials.fromContext(self.rdd.ctx)
writer = getParallelWriterForPath(outputDirPath)(outputDirPath, overwrite=overwrite,
awsCredentialsOverride=awsCredentials)
bufRdd.foreach(writer.writerFcn)
writeBinaryImagesConfig(outputDirPath, dims=dimsTotal, dtype=self.dtype,
overwrite=overwrite, awsCredentialsOverride=awsCredentials)
def maxProjection(self, axis=2):
"""
Compute maximum projections of images / volumes
along the specified dimension.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along
"""
if axis >= size(self.dims):
raise Exception("Axis for projection (%s) exceeds image dimensions (%s-%s)" % (axis, 0, size(self.dims)-1))
proj = self.rdd.mapValues(lambda x: amax(x, axis))
# update dimensions to remove axis of projection
newDims = list(self.dims)
del newDims[axis]
return self._constructor(proj, dims=newDims).__finalize__(self)
def maxminProjection(self, axis=2):
"""
Compute maximum-minimum projections of images / volumes
along the specified dimension. This computes the sum
of the maximum and minimum values along the given dimension.
Parameters
----------
axis : int, optional, default = 2
Which axis to compute projection along
"""
proj = self.rdd.mapValues(lambda x: amax(x, axis) + amin(x, axis))
# update dimensions to remove axis of projection
newDims = list(self.dims)
del newDims[axis]
return self._constructor(proj, dims=newDims).__finalize__(self)
def subsample(self, sampleFactor):
"""
Downsample an image volume by an integer factor
Parameters
----------
sampleFactor : positive int or tuple of positive ints
Stride to use in subsampling. If a single int is passed, each dimension of the image
will be downsampled by this same factor. If a tuple is passed, it must have the same
dimensionality of the image. The strides given in a passed tuple will be applied to
each image dimension.
"""
dims = self.dims
ndims = len(dims)
if not hasattr(sampleFactor, "__len__"):
sampleFactor = [sampleFactor] * ndims
sampleFactor = [int(sf) for sf in sampleFactor]
if any((sf <= 0 for sf in sampleFactor)):
raise ValueError("All sampling factors must be positive; got " + str(sampleFactor))
def divRoundup(a, b):
# thanks stack overflow & Eli Collins:
# http://stackoverflow.com/questions/7181757/how-to-implement-division-with-round-towards-infinity-in-python
# this only works for positive ints, but we've checked for that above
return (a + b - 1) // b
sampleSlices = [slice(0, dims[i], sampleFactor[i]) for i in xrange(ndims)]
newDims = [divRoundup(dims[i], sampleFactor[i]) for i in xrange(ndims)]
return self._constructor(
self.rdd.mapValues(lambda v: v[sampleSlices]), dims=newDims).__finalize__(self)
def gaussianFilter(self, sigma=2, order=0):
"""
Spatially smooth images with a gaussian filter.
Filtering will be applied to every image in the collection and can be applied
to either images or volumes. For volumes, if an single scalar sigma is passed,
it will be interpreted as the filter size in x and y, with no filtering in z.
parameters
----------
sigma : scalar or sequence of scalars, default=2
Size of the filter size as standard deviation in pixels. A sequence is interpreted
as the standard deviation for each axis. For three-dimensional data, a single
scalar is interpreted as the standard deviation in x and y, with no filtering in z.
order : choice of 0 / 1 / 2 / 3 or sequence from same set, optional, default = 0
Order of the gaussian kernel, 0 is a gaussian, higher numbers correspond
to derivatives of a gaussian.
"""
from scipy.ndimage.filters import gaussian_filter
dims = self.dims
ndims = len(dims)
if ndims == 3 and size(sigma) == 1:
sigma = [sigma, sigma, 0]
return self._constructor(
self.rdd.mapValues(lambda v: gaussian_filter(v, sigma, order))).__finalize__(self)
def uniformFilter(self, size=2):
"""
Spatially filter images using a uniform filter.
Filtering will be applied to every image in the collection and can be applied
to either images or volumes. For volumes, if an single scalar neighborhood is passed,
it will be interpreted as the filter size in x and y, with no filtering in z.
parameters
----------
size: int, optional, default=2
Size of the filter neighbourhood in pixels. A sequence is interpreted
as the neighborhood size for each axis. For three-dimensional data, a single
scalar is intrepreted as the neighborhood in x and y, with no filtering in z.
"""
return self._imageFilter(filter='uniform', size=size)
def medianFilter(self, size=2):
"""
Spatially filter images using a median filter.
Filtering will be applied to every image in the collection and can be applied
to either images or volumes. For volumes, if an single scalar neighborhood is passed,
it will be interpreted as the filter size in x and y, with no filtering in z.
parameters
----------
size: int, optional, default=2
Size of the filter neighbourhood in pixels. A sequence is interpreted
as the neighborhood size for each axis. For three-dimensional data, a single
scalar is intrepreted as the neighborhood in x and y, with no filtering in z.
"""
return self._imageFilter(filter='median', size=size)
def _imageFilter(self, filter=None, size=2):
"""
Generic function for applying a filtering operation to images or volumes.
See also
--------
Images.uniformFilter
Images.medianFilter
"""
from numpy import isscalar
from scipy.ndimage.filters import median_filter, uniform_filter
FILTERS = {
'median': median_filter,
'uniform': uniform_filter
}
func = FILTERS[filter]
dims = self.dims
ndims = len(dims)
if ndims == 3 and isscalar(size) == 1:
def filter_(im):
im.setflags(write=True)
for z in arange(0, dims[2]):
im[:, :, z] = func(im[:, :, z], size)
return im
else:
filter_ = lambda x: func(x, size)
return self._constructor(
self.rdd.mapValues(lambda v: filter_(v))).__finalize__(self)
def localCorr(self, neighborhood=2):
"""
Correlate every pixel to the average of its local neighborhood.
This algorithm computes, for every spatial record, the correlation coefficient
between that record's series, and the average series of all records within
a local neighborhood with a size defined by the neighborhood parameter.
The neighborhood is currently required to be a single integer, which represents the neighborhood
size in both x and y.
parameters
----------
neighborhood: int, optional, default=2
Size of the correlation neighborhood (in both the x and y directions), in pixels.
"""
if not isinstance(neighborhood, int):
raise ValueError("The neighborhood must be specified as an integer.")
from numpy import corrcoef
nimages = self.nrecords
# Spatially average the original image set over the specified neighborhood
blurred = self.uniformFilter((neighborhood * 2) + 1)
# Union the averaged images with the originals to create an Images object containing 2N images (where
# N is the original number of images), ordered such that the first N images are the averaged ones.
combined = self.rdd.union(blurred.applyKeys(lambda k: k + nimages).rdd)
combinedImages = self._constructor(combined, nrecords=(2 * nimages)).__finalize__(self)
# Correlate the first N (averaged) records with the last N (original) records
series = combinedImages.toSeries()
corr = series.applyValues(lambda x: corrcoef(x[:nimages], x[nimages:])[0, 1])
return corr.pack()
def crop(self, minbound, maxbound):
"""
Crop a spatial region from 2D or 3D data.
Parameters
----------
minbound : list or tuple
Minimum of crop region (x,y) or (x,y,z)
maxbound : list or tuple
Maximum of crop region (x,y) or (x,y,z)
Returns
-------
Images object with cropped images / volume
"""
dims = self.dims
ndims = len(dims)
dimsCount = dims.count
if ndims < 2 or ndims > 3:
raise Exception("Cropping only supported on 2D or 3D image data.")
dimMinMaxTuples = zip(dimsCount, minbound, maxbound)
if len(dimMinMaxTuples) != ndims:
raise ValueError("Number of specified bounds (%d) must equal image dimensionality (%d)" %
(len(dimMinMaxTuples), ndims))
slices = []
newdims = []
for dim, minb, maxb in dimMinMaxTuples:
if maxb > dim:
raise ValueError("Maximum bound (%d) may not exceed image size (%d)" % (maxb, dim))
if minb < 0:
raise ValueError("Minumum bound (%d) must be positive" % minb)
if minb < maxb:
slise = slice(minb, maxb)
newdims.append(maxb - minb)
elif minb == maxb:
slise = minb # just an integer index, not a slice; this squeezes out singleton dimensions
# don't append to newdims, this dimension will be squeezed out
else:
raise ValueError("Minimum bound (%d) must be <= max bound (%d)" % (minb, maxb))
slices.append(slise)
newrdd = self.rdd.mapValues(lambda v: v[slices])
newdims = tuple(newdims)
return self._constructor(newrdd, dims=newdims).__finalize__(self)
def meanByRegions(self, selection):
"""
Reduces images to one or more spatially averaged values using the given selection, which can be
either a mask array or sequence of indicies.
A passed mask must be a numpy ndarray of the same shape as the individual arrays in this
Images object. If the mask array is of integer or unsigned integer type, one mean value will
be calculated for each unique nonzero value in the passed mask. (That is, all pixels with a
value of '1' in the mask will be averaged together, as will all with a mask value of '2', and so
on.) For other mask array types, all nonzero values in the mask will be averaged together into
a single regional average.
Alternatively, subscripted indices may be passed directly as a sequence of sequences of tuple indicies. For
instance, selection=[[(0,1), (1,0)], [(2,1), (2,2)]] would return two means, one for the region made up
of the pixels at (0,1) and (1,0), and the other of (2,1) and (2,2).
The returned object will be a new 2d Images object with dimensions (1, number of regions). This can be
converted into a Series object and from there into time series arrays by calling
regionMeanImages.toSeries().collect().
Parameters
----------
selection: ndarray mask with shape equal to self.dims.count, or sequence of sequences of pixel indicies
Returns
-------
new Images object
"""
from numpy import array, mean
ctx = self.rdd.context
def meanByIntMask(kv):
key, ary = kv
uniq = bcUnique.value
msk = bcSelection.value
meanVals = [mean(ary[msk == grp]) for grp in uniq if grp != 0]
return key, array(meanVals, dtype=ary.dtype).reshape((1, -1))
def meanByMaskIndices(kv):
key, ary = kv
maskIdxsSeq = bcSelection.value
means = array([mean(ary[maskIdxs]) for maskIdxs in maskIdxsSeq], dtype=ary.dtype).reshape((1, -1))
return key, means
# argument type checking
if isinstance(selection, ndarray):
# passed a numpy array mask
from numpy import unique
# getting image dimensions just requires a first() call, not too expensive; and we probably
# already have them anyway
if selection.shape != self.dims.count:
raise ValueError("Shape mismatch between mask '%s' and image dimensions '%s'; shapes must be equal" %
(str(selection.shape), str(self.dims.count)))
if selection.dtype.kind in ('i', 'u'):
# integer or unsigned int mask
selectFcn = meanByIntMask
uniq = unique(selection)
nregions = len(uniq) - 1 if 0 in uniq else len(uniq) # 0 doesn't turn into a region
bcUnique = ctx.broadcast(uniq)
bcSelection = ctx.broadcast(selection)
else:
selectFcn = meanByMaskIndices
nregions = 1
bcUnique = None
bcSelection = ctx.broadcast((selection.nonzero(), ))
else:
# expect sequence of sequences of subindices if we aren't passed a mask
selectFcn = meanByMaskIndices
regionSelections = []
imgNDims = len(self.dims.count)
for regionIdxs in selection:
# generate sequence of subindex arrays
# instead of sequence [(x0, y0, z0), (x1, y1, z1), ... (xN, yN, zN)], want:
# array([x0, x1, ... xN]), array([y0, y1, ... yN]), ... array([z0, z1, ... zN])
# this can be used directly in an array indexing expression: ary[regionSelect]
for idxTuple in regionIdxs:
if len(idxTuple) != imgNDims:
raise ValueError("Image is %d-dimensional, but got %d dimensional index: %s" %
(imgNDims, len(idxTuple), str(idxTuple)))
regionSelect = []
regionIdxs = asarray(regionIdxs).T.tolist()
for idxDimNum, dimIdxs in enumerate(zip(regionIdxs)):
imgDimMax = self.dims.count[idxDimNum]
dimIdxAry = array(dimIdxs, dtype='uint16')
idxMin, idxMax = dimIdxAry.min(), dimIdxAry.max()
if idxMin < 0 or idxMax >= imgDimMax:
raise ValueError("Index of dimension %d out of bounds; " % idxDimNum +
"got min/max %d/%d, all must be >=0 and <%d" %
(idxMin, idxMax, imgDimMax))
regionSelect.append(dimIdxAry)
regionSelections.append(regionSelect)
nregions = len(regionSelections)
bcUnique = None
bcSelection = ctx.broadcast(regionSelections)
data = self.rdd.map(selectFcn)
return self._constructor(data, dims=(1, nregions)).__finalize__(self)
def planes(self, startidz, stopidz):
"""
Subselect planes from 3D image data.
Parameters
----------
startidz, stopidz : int
Indices of region to crop in z, interpreted according to python slice indexing conventions.
See also
--------
Images.crop
"""
dims = self.dims
if len(dims) == 2 or dims[2] == 1:
raise Exception("Cannot subselect planes, images must be 3D")
return self.crop([0, 0, startidz], [dims[0], dims[1], stopidz])
def subtract(self, val):
"""
Subtract a constant value or an image / volume from
all images / volumes in the data set.
Parameters
----------
val : int, float, or ndarray
Value to subtract
"""
if size(val) != 1:
if val.shape != self.dims.count:
raise Exception('Cannot subtract image with dimensions %s '
'from images with dimension %s' % (str(val.shape), str(self.dims)))
return self.applyValues(lambda x: x - val)
def renumber(self):
"""
Recalculates keys for this Images object.
New keys will be a sequence of consecutive integers, starting at 0 and ending at self.nrecords-1.
"""
renumberedRdd = self.rdd.values().zipWithIndex().map(lambda (ary, idx): (idx, ary))
return self._constructor(renumberedRdd).__finalize__(self) | apache-2.0 |
emmdim/guifiAnalyzer | traffic/getZoneTraffic.py | 1 | 9212 | from guifiAnalyzer.db.traffic import TrafficDB
from guifiAnalyzer.db.traffic_assistant import TrafficAssistantDB
from guifiAnalyzer.db.infrastructure import InfraDB
from datetime import datetime
#from pprint import pprint
#from collections import Counter
import pandas
import numpy
import matplotlib
from matplotlib import pyplot
import os
import pdb
#root = 2444
#root = 2435
root = 8346
#root = 18668
core = False
corename = '_core' if core else ''
def getIfaceRadioMode(iface_id):
iface = infraDB.getInterface(iface_id)
radio = infraDB.getRadio(iface['parent'])
return radio['mode']
def getLinkRadioMode(link_id,index):
link = infraLinks[link_id]
iface = infraIfaces[link['interface'+index]]
radio = infraRadios[tuple(iface['parent'].values())]
return radio['mode']
def getDeviceLinkTrafficDF(link_id, device_id):
device_infra = trafficAssDevices[device_id]
#traffic_documents = trafficDB.getDeviceDocumentsAscending(device_id)
traffic_documents = [v for k,v in trafficDevices[device_id].iteritems()] if device_id in trafficDevices else None
traffic_in = []
traffic_out = []
dates = []
error = 'No'
if traffic_documents:
for doc in traffic_documents:
date = doc['_id'].split(':')[1]
snmp_key = device_infra['links'][link_id]
for i in range(0,12,1):
minutes = str(i*5) if i > 1 else '0'+str(i*5)
date_time = date+minutes
date_time = datetime.strptime(date_time, "%y%m%d%H%M")
dates.extend([date_time])
if snmp_key in doc['measurements'][i]['data']['traffic']:
traffic_in.extend([float(doc['measurements'][i]['data']['traffic'][snmp_key]['traffic_in'])])
traffic_out.extend([float(doc['measurements'][i]['data']['traffic'][snmp_key]['traffic_out'])])
else:
traffic_in.extend([float('NaN')])
traffic_out.extend([float('NaN')])
error = 'NTKID'
ts_in = pandas.Series(traffic_in, index=dates, name='trafficIn')
ts_out = pandas.Series(traffic_out, index=dates, name='trafficOut')
data_frame = pandas.concat([ts_in, ts_out], join='outer', axis=1)
else:
data_frame = None
error = 'NTD'
return (data_frame, error)
def getApClientLinkApDeviceId(link_id):
# Keep in mind that one of both will for sure be ap
link = trafficAssLinks[link_id]
#device_id = link['deviceA'] if getLinkRadioMode(link_id,'A') == 'ap' else link['deviceB']
device_id = link['deviceA'] if link['radioModeA'] == 'ap' else link['deviceB']
return device_id
def getApClientLinkClientDeviceId(link_id):
# Keep in mind that one of both will for sure be ap
link = trafficAssLinks[link_id]
device_id = link['deviceA'] if link['radioModeA'] == 'client' else link['deviceB']
return device_id
file_name = str(root)+corename+'_generic_df'
file_final_df = os.path.join( os.getcwd(), 'guifiAnalyzerOut', 'results', file_name)
trafficDevices = None
if not os.path.isfile(file_final_df):
print 'DOWNLOADING TRAFFIC DATA'
trafficDB = TrafficDB(root, core)
trafficDB.connect()
traffic_assDB = TrafficAssistantDB(root, core)
traffic_assDB.connect()
infraDB = InfraDB(root, core)
infraDB.connect()
infraLinks1 = list(infraDB.getLinks())
infraLinks = {l['_id']:l for l in infraLinks1}
infraIfaces1 = list(infraDB.getIfaces())
infraIfaces = {l['_id']:l for l in infraIfaces1}
infraRadios1 = list(infraDB.getRadios())
infraRadios = {tuple(l['_id'].values()):l for l in infraRadios1}
trafficAssDevices1 = list(traffic_assDB.getCollection('devices'))
trafficAssDevices = {l['_id']:l for l in trafficAssDevices1}
trafficAssLinks1 = list(traffic_assDB.getCollection('links'))
trafficAssLinks = {l['_id']:l for l in trafficAssLinks1}
trafficDevices1 = list(trafficDB.database.devices.find())
trafficDevicesIds1 =[l['_id'].split(':')[0] for l in trafficDevices1]
trafficDevicesIds = list(set(trafficDevicesIds1))
print len(trafficDevicesIds)
trafficDevices = {l:{} for l in trafficDevicesIds}
for d in trafficDevices1:
trafficDevices[d['_id'].split(':')[0]][d['_id'].split(':')[1]]= d
links = list(traffic_assDB.getCollection('links'))
links = [l for l in links if l['deviceA']!=None or l['deviceB']!=None]
linkIds = [l['_id'] for l in links]
apclientLinks = [l for l in links if infraLinks[l['_id']]['type'] == 'ap/client']
apclientLinkIds = [l['_id'] for l in apclientLinks]
#wdsLinks = list(set(links)-set(apclientLinks))
wdsLinks = [l for l in links if infraLinks[l['_id']]['type'] == 'wds']
#links = [traffic_assDB.getLink('54034')]
wdsDevices1 = [l['deviceA'] for l in wdsLinks]
wdsDevices2 = [l['deviceB'] for l in wdsLinks]
apDevices = [getApClientLinkApDeviceId(l['_id']) for l in apclientLinks ]
#apDevices = list(set(apDevices1))
print len(apDevices)
clientDevices = [getApClientLinkClientDeviceId(l['_id']) for l in apclientLinks ]
#clientDevices = list(set(clientDevices1))
print len(clientDevices)
devices = {'ap':apDevices,'client':clientDevices, 'wdsA':wdsDevices1, 'wdsB':wdsDevices2}
return_tupleA = lambda x: (x['_id'], x['deviceA'])
return_tupleB = lambda x: (x['_id'], x['deviceB'])
final_index = [f(l) for l in apclientLinks for f in (return_tupleA, return_tupleB)]
final_index1 = [f(l) for l in wdsLinks for f in (return_tupleA, return_tupleB)]
final_index.extend(final_index1)
final_index = pandas.MultiIndex.from_tuples(final_index, names=['Links','Devices'])
final_df = pandas.DataFrame(columns=['trafficIn', 'trafficOut', 'radioMode', 'error'], index=final_index)
pandas.options.display.mpl_style = 'default'
#matplotlib.rc('font', family='sans-serif')
if trafficDevices == None:
final_df = pandas.read_pickle(file_final_df)
else:
testedDevices = []
for k, v in devices.iteritems():
if k in ['wdsA', 'wdsB']:
typ = 'wds'
else:
typ = k
print k
for dev in v:
if (not dev) or (dev in testedDevices):
continue
if k in ['ap', 'client']:
link = apclientLinks[v.index(dev)]
else:
link = wdsLinks[v.index(dev)]
df, error = getDeviceLinkTrafficDF(link['_id'], dev)
if not isinstance(df, pandas.DataFrame):
testedDevices.extend([dev])
# I could have an exgtra column to note where there
# were no data at all / not graphed
final_df.loc[link['_id'], dev] = [float('nan'), float('nan'), typ, error ]
continue
mean = df.astype('float64').mean(skipna=True, numeric_only=True)
#pdb.set_trace()
final_df.loc[link['_id'], dev] = [mean['trafficIn'], mean['trafficOut'], typ, error]
testedDevices.extend([dev])
final_df.to_pickle(file_final_df)
def get_df_statistics(df, name, stats):
print '/////////////////'
print 'Info %s' % name
#print df.info()
if name == 'TOTAL':
total = stats.loc['WDS', 'total'] + stats.loc['AP/CLIENT', 'total']
else:
total = len(df)
print 'Total Devices: %s' % (total)
stats.loc[name, 'total'] = total
usable = df[(df.trafficIn > 0) | (df.trafficOut > 0)].trafficIn.count()
print 'Total Usable Devices: %s' % (usable)
stats.loc[name, 'correct'] = usable
zeros = df[(df.trafficIn == 0) & (df.trafficOut == 0)].trafficIn.count()
print 'Total Devices with zero traffic: %s' % (zeros)
stats.loc[name, 'zeros'] = zeros
if name == 'TOTAL':
null = stats.loc['WDS', 'null'] + stats.loc['AP/CLIENT', 'null']
else:
null = df.trafficIn.isnull().sum()
print 'Total Null %s' % (null)
stats.loc[name, 'null'] = null
otherrors = len(df) -usable -zeros -null
print 'Other Errors %s' % otherrors
stats.loc[name, 'erorrs'] = otherrors
print 'Error :Devices without Traffic Data %s' % (df[(df.error == 'NTD')].trafficIn.count())
print 'Error: Device without Correct SNMP_key %s' % (df[(df.error == 'NTKID')].trafficIn.count())
print 'Average Traffic In: %s | Total Traffic In %s | Zeros %s | Null %s' % (df.trafficIn.mean(),
df.trafficIn.sum(), df[df.trafficIn == 0].trafficIn.count(), df.trafficIn.isnull().sum())
print 'Average Traffic Out: %s | Total Traffic Out %s | Zeros %s | Null %s' % (df.trafficOut.mean(),
df.trafficOut.sum(), df[df.trafficOut == 0].trafficOut.count(), df.trafficIn.isnull().sum())
stats_df = pandas.DataFrame(columns=['total', 'correct', 'zeros', 'null', 'error'],
index=['TOTAL', 'WDS', 'AP/CLIENT'])
print 'STATISTICS'
get_df_statistics(final_df[final_df.radioMode == 'wds'], 'WDS', stats_df)
get_df_statistics(final_df[(final_df.radioMode == 'ap') | (final_df.radioMode == 'client')],'AP/CLIENT', stats_df)
get_df_statistics(final_df, 'TOTAL', stats_df)
#get_df_statistics(final_df[final_df.radioMode == 'ap'], 'ap')
#get_df_statistics(final_df[final_df.radioMode == 'client'], 'client')
title = str(root) + ' core' if core else str(root)
stats_df[['correct', 'zeros', 'null','error']].plot(kind='bar', stacked=True, title=title)
#pyplot.savefig(os.path.join( os.getcwd(), 'guifiAnalyzerOut', 'results', file_name+'_stats.pdf'))
#for final_df in dfs:
# if isinstance(df, pandas.DataFrame):
# fig, ax = pyplot.subplots()
# df.plot(ax=ax)
#fig, axes = pyplot.subplots(nrows=2, ncols=1)
#for i, c in enumerate(data_frame.columns):
# data_frame[c].plot(
# ax=axes[i],
# figsize=(
# 12,
# 10),
# title=c + " 2444")
#df['proxiesPer100Nodes'].plot(figsize=(12, 10), title='proxiesPer100Nodes'+" "+g.zone.title)
#pyplot.show()
raw_input("End")
| gpl-3.0 |
Mistobaan/tensorflow | tensorflow/contrib/learn/python/learn/grid_search_test.py | 137 | 2035 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import test
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3)
grid_search = GridSearchCV(
classifier, {'hidden_units': [[5, 5], [10, 10]]},
scoring='accuracy',
fit_params={'steps': [50]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
test.main()
| apache-2.0 |
jrderuiter/im-fusion | src/imfusion/model.py | 2 | 12866 | # -*- coding: utf-8 -*-
"""Module containing model classes for fusions and insertions."""
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from __future__ import absolute_import, division, print_function
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
import collections
import itertools
import operator
import numpy as np
import pandas as pd
import toolz
from imfusion.util.frozendict import frozendict
class FrameMixin(object):
@classmethod
def _get_columns(cls):
return cls._fields
@classmethod
def _to_record(cls, obj):
return obj._asdict()
@classmethod
def _to_obj(cls, record):
return cls(**record._asdict())
@classmethod
def read_csv(cls, file_path, **kwargs):
"""Reads objects from a csv file into a pandas DataFrame."""
df = pd.read_csv(str(file_path), **kwargs)
return cls.format_frame(df)
@classmethod
def to_csv(cls, file_path, objects, index=False, **kwargs):
"""Writes objects to a csv file."""
df = cls.to_frame(objects)
df.to_csv(str(file_path), index=index, **kwargs)
@classmethod
def from_csv(cls, file_path, **kwargs):
"""Reads objects from a csv file."""
df = cls.read_csv(file_path, **kwargs)
for obj in cls.from_frame(df):
yield obj
@classmethod
def from_frame(cls, df):
"""Converts dataframe into an interable of objects."""
for tup in df.itertuples():
yield cls._to_obj(tup)
@classmethod
def format_frame(cls, df):
"""Formats dataframe into a cohorent format."""
return cls._reorder_columns(df, order=cls._get_columns())
@classmethod
def to_frame(cls, objects):
"""Converts list of objects to a dataframe representation."""
# Check if insertions is empty.
is_empty, objects = cls._is_empty(objects)
if is_empty:
df = pd.DataFrame.from_records([], columns=cls._get_columns())
else:
rows = (cls._to_record(obj) for obj in objects)
df = pd.DataFrame.from_records(rows)
df = cls.format_frame(df)
return df
@staticmethod
def _is_empty(iterable):
try:
_, iterable = toolz.peek(iterable)
empty = False
except StopIteration:
empty = True
return empty, iterable
@classmethod
def _reorder_columns(cls, df, order):
extra_cols = set(df.columns) - set(order)
col_order = list(order) + sorted(extra_cols)
return df[col_order]
class MetadataFrameMixin(FrameMixin):
"""Mixin class adding namedtuple/frame conversion support."""
@classmethod
def _get_columns(cls):
fields = list(cls._fields)
del fields[fields.index('metadata')]
return fields
@classmethod
def _to_record(cls, obj):
obj_data = obj._asdict()
metadata = obj_data.pop('metadata')
return toolz.merge(metadata, obj_data)
@classmethod
def _to_obj(cls, record):
record_dict = record._asdict()
metadata_fields = [
k for k in record_dict.keys() if k not in set(cls._get_columns())
]
metadata = {k: record_dict.pop(k) for k in metadata_fields}
metadata.pop('Index', None)
return cls(metadata=frozendict(metadata), **record_dict)
def __getattr__(self, name):
if name in self.metadata:
return self.metadata[name]
else:
raise AttributeError
_Fusion = collections.namedtuple('Fusion', [
'seqname_a', 'location_a', 'strand_a', 'seqname_b', 'location_b',
'strand_b', 'flank_a', 'flank_b', 'support_junction', 'support_spanning'
])
class Fusion(FrameMixin, _Fusion):
"""Model Fusion class."""
__slots__ = ()
@property
def support(self):
"""Support score for the fusion."""
return self.support_junction + self.support_spanning
def normalize(self, seqname=None):
"""Normalizes fusions so that the side whose seqname has the
lowest lexical ordering is used as donor.
"""
if seqname is not None:
if self.seqname_a == seqname:
is_norm = True
else:
if self.seqname_b != seqname:
raise ValueError('Fusion does not include given seqname')
is_norm = False
elif self.seqname_a != self.seqname_b:
is_norm = self.seqname_a < self.seqname_b
else:
is_norm = self.location_a < self.location_b
if is_norm:
return self
else:
return self._replace(
seqname_a=self.seqname_b,
location_a=self.location_b,
strand_a=self.strand_b * -1,
seqname_b=self.seqname_a,
location_b=self.location_a,
strand_b=self.strand_a * -1,
flank_a=self.flank_b,
flank_b=self.flank_a)
def distance(self, other):
"""Determine distance to other fusion."""
if (self.seqname_a != other.seqname_a or
self.seqname_b != other.seqname_b):
raise ValueError('Fusions are on different reference sequences')
return (abs(self.location_a - other.location_a) +
abs(self.location_b - other.location_b))
@classmethod
def merge(cls, junctions, max_dist):
"""Merges fusion within genomic distance into a single fusion."""
for grp in cls._groupby_genomic_position(junctions, max_dist):
if len(grp) == 1:
yield grp[0]
else:
get_support = operator.attrgetter('support_junction')
sorted_grp = sorted(grp, key=get_support)
yield sorted_grp[-1]._replace(
support_junction=sum(f.support_junction
for f in sorted_grp),
support_spanning=sum(f.support_spanning
for f in sorted_grp),
flank_a=max(f.flank_a for f in sorted_grp),
flank_b=max(f.flank_b for f in sorted_grp))
@classmethod
def _groupby_genomic_position(cls, junctions, max_dist):
# Group junctions by strand and sequence.
def _keyfunc(fusion):
return (fusion.seqname_a, fusion.strand_a, fusion.seqname_b,
fusion.strand_b)
sorted_juncs = sorted(junctions, key=_keyfunc)
grouped_juncs = itertools.groupby(sorted_juncs, key=_keyfunc)
for _, grp in grouped_juncs:
for grp_a in cls._groupby_position(grp, 'a', max_dist):
for grp_b in cls._groupby_position(grp_a, 'b', max_dist):
yield grp_b
@staticmethod
def _groupby_position(junctions, side, max_dist):
get_loc = operator.attrgetter('location_' + side)
sorted_juncs = sorted(junctions, key=get_loc)
grp, prev_pos = [], np.nan
for junc in sorted_juncs:
if (get_loc(junc) - prev_pos) > max_dist:
yield grp
grp = [junc]
else:
grp.append(junc)
prev_pos = get_loc(junc)
if len(grp) > 0:
yield grp
_TransposonFusion = collections.namedtuple('TransposonFusion', [
'seqname', 'anchor_genome', 'anchor_transposon', 'strand_genome',
'strand_transposon', 'flank_genome', 'flank_transposon',
'support_junction', 'support_spanning', 'metadata'
])
class TransposonFusion(MetadataFrameMixin, _TransposonFusion):
"""Model class representing a gene-transposon fusion."""
__slots__ = ()
@property
def support(self):
"""Support score for the fusion."""
return self.support_junction + self.support_spanning
@property
def genome_region(self):
"""Returns the spanned region on the genome."""
if self.flank_genome > 0:
start = self.anchor_genome
end = self.anchor_genome + self.flank_genome
else:
start = self.anchor_genome + self.flank_genome
end = self.anchor_genome
return self.seqname, start, end
@property
def transposon_region(self):
"""Returns the spanned region on the transposon."""
if self.flank_transposon > 0:
start = self.anchor_transposon
end = self.anchor_transposon + self.flank_transposon
else:
start = self.anchor_transposon + self.flank_transposon
end = self.anchor_transposon
return start, end
@classmethod
def from_fusion(cls, fusion, transposon_name, metadata=None):
"""Converts the fusion to a transposon fusion object."""
if (fusion.seqname_a == transposon_name and
fusion.seqname_b == transposon_name):
raise ValueError('Fusion does not involve genomic sequence')
elif (fusion.seqname_a != transposon_name and
fusion.seqname_b != transposon_name):
raise ValueError('Fusion does not involve transposon')
elif fusion.seqname_a == transposon_name:
tr_key, gen_key = 'a', 'b'
tr_flank = fusion.flank_a * -fusion.strand_a
gen_flank = fusion.flank_b * fusion.strand_b
else:
tr_key, gen_key = 'b', 'a'
tr_flank = fusion.flank_b * fusion.strand_b
gen_flank = fusion.flank_a * -fusion.strand_a
return TransposonFusion(
seqname=getattr(fusion, 'seqname_' + gen_key),
anchor_genome=getattr(fusion, 'location_' + gen_key),
anchor_transposon=getattr(fusion, 'location_' + tr_key),
strand_genome=getattr(fusion, 'strand_' + gen_key),
strand_transposon=getattr(fusion, 'strand_' + tr_key),
flank_genome=gen_flank,
flank_transposon=tr_flank,
support_junction=fusion.support_junction,
support_spanning=fusion.support_spanning,
metadata=frozendict(metadata or {}))
_Insertion = collections.namedtuple('Insertion', [
'id', 'seqname', 'position', 'strand', 'support_junction',
'support_spanning', 'support', 'metadata'
])
class Insertion(MetadataFrameMixin, _Insertion):
"""Model class representing an insertion."""
__slots__ = ()
@classmethod
def from_transposon_fusion(cls, fusion, id_=None, drop_metadata=None):
"""Converts (annotated) transposon fusion to an insertion.
Requires
Parameters
----------
id : str
ID to use for the insertion.
Returns
-------
Insertion
Insertion object derived from the transposon fusion.
"""
if drop_metadata is None:
drop_metadata = {'strand_genome', 'strand_transposon'}
# Fusion should have annotated transposon feature.
if 'feature_name' not in fusion.metadata:
raise ValueError(
'Fusion does not have an annotated transposon feature')
strand = fusion.strand_genome * fusion.strand_transposon
if 'gene_strand' in fusion.metadata:
gene_strand = fusion.metadata['gene_strand']
orientation = 'sense' if strand == gene_strand else 'antisense'
else:
orientation = None
ins_metadata = toolz.keyfilter(lambda k: k not in drop_metadata,
fusion.metadata)
ins_metadata['transposon_anchor'] = fusion.anchor_transposon
if orientation is not None:
ins_metadata['orientation'] = orientation
return Insertion(
id=id_,
seqname=fusion.seqname,
position=fusion.anchor_genome,
strand=strand,
support_junction=fusion.support_junction,
support_spanning=fusion.support_spanning,
support=fusion.support,
metadata=frozendict(ins_metadata))
@classmethod
def from_transposon_fusions(cls,
fusions,
id_fmt_str=None,
drop_metadata=None):
"""Converts annotated transposon fusions to insertions."""
if id_fmt_str is not None:
insertions = (cls.from_transposon_fusion(
fusion,
id_=id_fmt_str.format(i + 1),
drop_metadata=drop_metadata)
for i, fusion in enumerate(fusions))
else:
insertions = (cls.from_transposon_fusion(
fusion, drop_metadata=drop_metadata) for fusion in fusions)
for insertion in insertions:
yield insertion
| mit |
giserh/SFrame | oss_src/unity/python/sframe/test/test_sarray_sketch.py | 5 | 11930 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
# from nose import with_setup
# -*- coding: utf-8 -*-
from ..data_structures.sarray import SArray
import pandas as pd
import numpy as np
import unittest
import random
import copy
import os
import math
import shutil
import array
import util
import time
import itertools
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
class SArraySketchTest(unittest.TestCase):
def setUp(self):
pass
def __validate_sketch_result(self, sketch, sa, delta = 1E-7):
df = pd.DataFrame(list(sa.dropna()))
pds = pd.Series(list(sa.dropna()))
if (sa.dtype() == int or sa.dtype() == float):
if (len(sa) == 0):
self.assertTrue(math.isnan(sketch.min()))
self.assertTrue(math.isnan(sketch.min()))
self.assertEquals(sketch.sum(), 0.0)
self.assertEquals(sketch.mean(), 0.0)
self.assertEquals(sketch.var(), 0.0)
self.assertEquals(sketch.std(), 0.0)
else:
self.assertEquals(sketch.min(), sa.min())
self.assertEquals(sketch.max(), sa.max())
self.assertEquals(sketch.sum(), sa.sum())
self.assertAlmostEqual(sketch.mean(), sa.dropna().mean(), delta=delta)
self.assertAlmostEqual(sketch.var(), sa.dropna().var(), delta=delta)
self.assertAlmostEqual(sketch.std(), sa.dropna().std(), delta=delta)
self.assertAlmostEqual(sketch.quantile(0.5), df.quantile(0.5)[0], delta=1)
self.assertEqual(sketch.quantile(0), df.quantile(0)[0])
self.assertEqual(sketch.quantile(1), df.quantile(1)[0])
self.assertEqual(sketch.frequent_items(), SArray(pds).sketch_summary().frequent_items())
for item in pds.value_counts().index:
self.assertEqual(sketch.frequency_count(item), pds.value_counts()[item])
self.assertAlmostEqual(sketch.num_unique(), len(sa.unique()), delta=3)
else:
with self.assertRaises(RuntimeError):
sketch.quantile((0.5))
self.assertEqual(sketch.num_undefined(), sa.num_missing())
self.assertEqual(sketch.size(), len(sa))
self.assertEqual(sketch.sketch_ready(), True)
self.assertEqual(sketch.num_elements_processed(), sketch.size())
def __validate_nested_sketch_result(self, sa):
sketch = sa.sketch_summary()
self.__validate_sketch_result(sketch, sa)
# element length summary
t = sketch.element_length_summary()
len_sa = sa.dropna().item_length()
self.__validate_sketch_result(t, len_sa)
def test_sketch_int(self):
int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, None]
sa = SArray(data=int_data)
self.__validate_sketch_result(sa.sketch_summary(), sa)
def test_sketch_float(self):
int_data = [1.2, 3,.4, 6.789, None]
sa = SArray(data=int_data)
self.__validate_sketch_result(sa.sketch_summary(), sa)
def test_vector_sketch(self):
vector_data = [[], [1,2], [3], [4,5,6,7], [8,9,10], None]
sa = SArray(data=vector_data)
sketch = sa.sketch_summary();
self.__validate_sketch_result(sketch, sa)
self.__validate_sketch_result(sketch.element_length_summary(), sa.dropna().item_length())
flattened = list(itertools.chain.from_iterable(list(sa.dropna())))
self.__validate_sketch_result(sketch.element_summary(), SArray(flattened))
fi = sketch.frequent_items()
self.assertEqual(len(fi), 5)
self.assertEqual((fi['[1 2]']), 1)
self.assertEqual((fi['[4 5 6 7]']), 1)
# sub sketch with one key
s = sa.sketch_summary(sub_sketch_keys = 1).element_sub_sketch(1)
expected = sa.vector_slice(1)
self.__validate_sketch_result(s, expected)
# sub sketch with multiple keys
keys = [1,3]
s = sa.sketch_summary(sub_sketch_keys = keys).element_sub_sketch(keys)
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(s.has_key(key))
expected = sa.vector_slice(key)
self.__validate_sketch_result(s[key], expected)
indexes = range(0,10)
s = sa.sketch_summary(sub_sketch_keys = indexes).element_sub_sketch()
self.assertEqual(len(s), len(indexes))
def test_list_sketch(self):
list_data = [[], [1,2],[1,2], ['a', 'a', 'a', 'b'], [ 1 ,1 , 2], None]
sa = SArray(list_data)
self.__validate_nested_sketch_result(sa)
sketch = sa.sketch_summary();
self.assertEqual(sketch.num_unique(), 4)
element_summary = sketch.element_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dropna())))
self.__validate_sketch_result(element_summary, SArray(another_rep, str))
fi = sketch.frequent_items()
self.assertEqual(len(fi), 4)
self.assertEqual((fi['[1,2]']), 2)
self.assertEqual((fi['["a","a","a","b"]']), 1)
def test_dict_sketch_int_value(self):
dict_data = [{}, {'a':1, 'b':2}, {'a':1, 'b':2}, {'a':3, 'c':1}, {'a': 1, 'b': 2, 'c': 3}, None]
sa = SArray(data=dict_data)
self.__validate_nested_sketch_result(sa)
sketch = sa.sketch_summary()
self.assertEqual(sketch.num_unique(), 4)
fi = sketch.frequent_items()
self.assertEqual(len(fi), 4)
self.assertEqual((fi['{"a":1, "b":2}']), 2)
self.assertEqual((fi['{"a":3, "c":1}']), 1)
# Get dict key sketch
key_summary = sketch.dict_key_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_keys().dropna())))
self.__validate_sketch_result(key_summary, SArray(another_rep))
# Get dict value sketch
value_summary = sketch.dict_value_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_values().dropna())))
self.__validate_sketch_result(value_summary, SArray(another_rep))
# sub sketch with one key
s = sa.sketch_summary(sub_sketch_keys ='a').element_sub_sketch('a')
expected = sa.unpack(column_name_prefix="")['a']
self.__validate_sketch_result(s, expected)
s = sa.sketch_summary(sub_sketch_keys ='Nonexist').element_sub_sketch('Nonexist')
self.assertEqual(s.num_undefined(), len(sa))
# sub sketch with multiple keys
keys = ['a', 'b']
s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch(keys)
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(s.has_key(key))
expected = sa.unpack(column_name_prefix="")[key]
self.__validate_sketch_result(s[key], expected)
def test_dict_sketch_str_value(self):
# Dict value sketch type should be auto inferred
dict_data = [{'a':'b', 'b':'c'}, {'a':'b', 'b':'c'}, {'a':'d', 'b':'4'}, None]
sa = SArray(data=dict_data)
self.__validate_nested_sketch_result(sa)
sketch = sa.sketch_summary()
fi = sketch.frequent_items()
self.assertEqual(len(fi), 2)
self.assertEqual(fi['{"a":"b", "b":"c"}'], 2)
self.assertEqual(fi['{"a":"d", "b":"4"}'], 1)
# Get dict key sketch
key_summary = sketch.dict_key_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_keys().dropna())))
self.__validate_sketch_result(key_summary, SArray(another_rep))
# Get dict value sketch
value_summary = sketch.dict_value_summary()
another_rep = list(itertools.chain.from_iterable(list(sa.dict_values().dropna())))
self.__validate_sketch_result(value_summary, SArray(another_rep))
# sub sketch with one key
s = sa.sketch_summary(sub_sketch_keys ='a').element_sub_sketch('a')
expected = sa.unpack(column_name_prefix="")['a']
self.__validate_sketch_result(s, expected)
s = sa.sketch_summary(sub_sketch_keys ='Nonexist').element_sub_sketch('Nonexist')
self.assertEqual(s.num_undefined(), len(sa))
# sub sketch with multiple keys
keys = ['a', 'b']
s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch(keys)
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(s.has_key(key))
expected = sa.unpack(column_name_prefix="")[key]
self.__validate_sketch_result(s[key], expected)
# allow pass in empty keys, which will retrieve all keys
s = sa.sketch_summary(sub_sketch_keys =keys).element_sub_sketch()
self.assertEqual(len(s), len(keys))
for key in keys:
self.assertTrue(s.has_key(key))
expected = sa.unpack(column_name_prefix="")[key]
self.__validate_sketch_result(s[key], expected)
def test_dict_many_nones(self):
sa = SArray([None] * 200 + [{'a':'b'}])
self.assertEqual(sa.sketch_summary().num_elements_processed(), 201)
def test_str_sketch(self):
str_data = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", None]
sa = SArray(data=str_data)
sketch = sa.sketch_summary()
with self.assertRaises(RuntimeError):
sketch.min()
with self.assertRaises(RuntimeError):
sketch.max()
with self.assertRaises(RuntimeError):
sketch.sum()
with self.assertRaises(RuntimeError):
sketch.mean()
with self.assertRaises(RuntimeError):
sketch.var()
with self.assertRaises(RuntimeError):
sketch.std()
self.assertAlmostEqual(sketch.num_unique(), 10, delta=3)
self.assertEqual(sketch.num_undefined(), 1)
self.assertEqual(sketch.size(), len(str_data))
with self.assertRaises(RuntimeError):
sketch.quantile(0.5)
self.assertEqual(sketch.frequency_count("1"), 1)
self.assertEqual(sketch.frequency_count("2"), 1)
t = sketch.frequent_items()
self.assertEqual(len(t), 10)
def test_empty_sketch(self):
int_data = []
sa = SArray(data=int_data)
sketch = sa.sketch_summary()
self.assertTrue(math.isnan(sketch.min()))
self.assertTrue(math.isnan(sketch.max()))
self.assertEquals(sketch.sum(), 0)
self.assertEqual(sketch.mean(), 0)
self.assertEqual(sketch.var(), 0)
self.assertEqual(sketch.std(), 0)
self.assertEqual(sketch.num_unique(), 0)
self.assertEqual(sketch.num_undefined(),0)
self.assertEqual(sketch.size(), 0)
with self.assertRaises(RuntimeError):
sketch.quantile(0.5)
t = sketch.frequent_items()
self.assertEqual(len(t), 0)
def test_background_sketch(self):
dict_data = [{str(i):1} for i in range(1,10000)]
sa = SArray(dict_data)
s = sa.sketch_summary(background=True, sub_sketch_keys=[str(i ) for i in range(100,200)])
s.sketch_ready() # cannot check the actual value as it depends on the speed of processing
t = s.element_sub_sketch([str(i) for i in range(100, 105)])
self.assertEqual(len(t), 5)
def test_large_value_sketch(self):
sa = SArray([1234567890 for i in range(100)])
sk = sa.sketch_summary();
self.__validate_sketch_result(sa.sketch_summary(), sa, 1E-5)
def test_cancelation(self):
sa = SArray(range(1,10000))
s = sa.sketch_summary(background=True)
s.cancel()
# this can be rather non-deterministic, so there is very little
# real output validation that can be done...
| bsd-3-clause |
vermouthmjl/scikit-learn | sklearn/utils/random.py | 37 | 10511 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
zhenv5/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
nomadcube/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
justacec/bokeh | examples/models/anscombe.py | 6 | 2964 | from __future__ import print_function
import numpy as np
import pandas as pd
from bokeh.util.browser import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle, Line
from bokeh.models import (
ColumnDataSource, Grid, GridPlot, LinearAxis, Plot, Range1d
)
from bokeh.resources import INLINE
raw_columns=[
[10.0, 8.04, 10.0, 9.14, 10.0, 7.46, 8.0, 6.58],
[8.0, 6.95, 8.0, 8.14, 8.0, 6.77, 8.0, 5.76],
[13.0, 7.58, 13.0, 8.74, 13.0, 12.74, 8.0, 7.71],
[9.0, 8.81, 9.0, 8.77, 9.0, 7.11, 8.0, 8.84],
[11.0, 8.33, 11.0, 9.26, 11.0, 7.81, 8.0, 8.47],
[14.0, 9.96, 14.0, 8.10, 14.0, 8.84, 8.0, 7.04],
[6.0, 7.24, 6.0, 6.13, 6.0, 6.08, 8.0, 5.25],
[4.0, 4.26, 4.0, 3.10, 4.0, 5.39, 19.0, 12.5],
[12.0, 10.84, 12.0, 9.13, 12.0, 8.15, 8.0, 5.56],
[7.0, 4.82, 7.0, 7.26, 7.0, 6.42, 8.0, 7.91],
[5.0, 5.68, 5.0, 4.74, 5.0, 5.73, 8.0, 6.89]]
quartet = pd.DataFrame(data=raw_columns, columns=
['Ix','Iy','IIx','IIy','IIIx','IIIy','IVx','IVy'])
circles_source = ColumnDataSource(
data = dict(
xi = quartet['Ix'],
yi = quartet['Iy'],
xii = quartet['IIx'],
yii = quartet['IIy'],
xiii = quartet['IIIx'],
yiii = quartet['IIIy'],
xiv = quartet['IVx'],
yiv = quartet['IVy'],
)
)
x = np.linspace(-0.5, 20.5, 10)
y = 3 + 0.5 * x
lines_source = ColumnDataSource(data=dict(x=x, y=y))
xdr = Range1d(start=-0.5, end=20.5)
ydr = Range1d(start=-0.5, end=20.5)
def make_plot(title, xname, yname):
plot = Plot(
x_range=xdr, y_range=ydr,
title=title, plot_width=400, plot_height=400,
border_fill_color='white', background_fill_color='#e9e0db'
)
xaxis = LinearAxis(axis_line_color=None)
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_line_color=None)
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
line = Line(x='x', y='y', line_color="#666699", line_width=2)
plot.add_glyph(lines_source, line)
circle = Circle(
x=xname, y=yname, size=12,
fill_color="#cc6633", line_color="#cc6633", fill_alpha=0.5
)
plot.add_glyph(circles_source, circle)
return plot
#where will this comment show up
I = make_plot('I', 'xi', 'yi')
II = make_plot('II', 'xii', 'yii')
III = make_plot('III', 'xiii', 'yiii')
IV = make_plot('IV', 'xiv', 'yiv')
grid = GridPlot(children=[[I, II], [III, IV]])
doc = Document()
doc.add_root(grid)
if __name__ == "__main__":
filename = "anscombe.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Anscombe's Quartet"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
LIDSOL-NPL/Pubmed_minning | Cosine_Similarity.py | 1 | 1163 | import sys
import glob
import argparse
import os
import operator
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from pprint import pprint
parser = argparse.ArgumentParser(description='Cosine classify some files')
parser.add_argument('archivos', type=argparse.FileType('r'), nargs='+', help='one or more files, or a glob like path_to/*txt')
args = parser.parse_args()
titulos=[]
Corpus=[]
for archivo in args.archivos:
titulos.append(os.path.basename(archivo.name))
Corpus.append(archivo.read())
documents=Corpus
tfidf_vectorizer = TfidfVectorizer()
tfidf_matrix = tfidf_vectorizer.fit_transform(documents)
dicts = {t:{} for t in titulos}
i = 0
for row in cosine_similarity(tfidf_matrix[:], tfidf_matrix):
tmp = {}
j = 0
for t in list(row):
tmp[ titulos[j]]=t
j += 1
x = tmp
sorted_x = sorted(x.items(), key=operator.itemgetter(1), reverse=True)
dicts[titulos[i]] = sorted_x
i += 1
pprint(dicts)
# names=[]
# for i in range(len(Corpus_titles)):
# names.append(Corpus_titles[i][len(path)+1:])
# print names
| gpl-3.0 |
juliandewit/kaggle_ndsb2017 | step2_train_mass_segmenter.py | 1 | 21174 | import settings
import helpers
import os
import glob
import random
import ntpath
import cv2
import numpy
from typing import List, Tuple
from keras.optimizers import Adam, SGD
from keras.layers import Input, Convolution2D, MaxPooling2D, UpSampling2D, merge, BatchNormalization, SpatialDropout2D
from keras.models import Model
from keras import backend as K
from keras.callbacks import ModelCheckpoint, Callback
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
import pandas
import shutil
MEAN_FRAME_COUNT = 1
CHANNEL_COUNT = 1
def random_scale_img(img, xy_range, lock_xy=False):
if random.random() > xy_range.chance:
return img
if not isinstance(img, list):
img = [img]
import cv2
scale_x = random.uniform(xy_range.x_min, xy_range.x_max)
scale_y = random.uniform(xy_range.y_min, xy_range.y_max)
if lock_xy:
scale_y = scale_x
org_height, org_width = img[0].shape[:2]
xy_range.last_x = scale_x
xy_range.last_y = scale_y
res = []
for img_inst in img:
scaled_width = int(org_width * scale_x)
scaled_height = int(org_height * scale_y)
scaled_img = cv2.resize(img_inst, (scaled_width, scaled_height), interpolation=cv2.INTER_CUBIC)
if scaled_width < org_width:
extend_left = (org_width - scaled_width) / 2
extend_right = org_width - extend_left - scaled_width
scaled_img = cv2.copyMakeBorder(scaled_img, 0, 0, extend_left, extend_right, borderType=cv2.BORDER_CONSTANT)
scaled_width = org_width
if scaled_height < org_height:
extend_top = (org_height - scaled_height) / 2
extend_bottom = org_height - extend_top - scaled_height
scaled_img = cv2.copyMakeBorder(scaled_img, extend_top, extend_bottom, 0, 0, borderType=cv2.BORDER_CONSTANT)
scaled_height = org_height
start_x = (scaled_width - org_width) / 2
start_y = (scaled_height - org_height) / 2
tmp = scaled_img[start_y: start_y + org_height, start_x: start_x + org_width]
res.append(tmp)
return res
class XYRange:
def __init__(self, x_min, x_max, y_min, y_max, chance=1.0):
self.chance = chance
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
self.last_x = 0
self.last_y = 0
def get_last_xy_txt(self):
res = "x_" + str(int(self.last_x * 100)).replace("-", "m") + "-" + "y_" + str(int(self.last_y * 100)).replace("-", "m")
return res
def random_translate_img(img, xy_range, border_mode="constant"):
if random.random() > xy_range.chance:
return img
import cv2
if not isinstance(img, list):
img = [img]
org_height, org_width = img[0].shape[:2]
translate_x = random.randint(xy_range.x_min, xy_range.x_max)
translate_y = random.randint(xy_range.y_min, xy_range.y_max)
trans_matrix = numpy.float32([[1, 0, translate_x], [0, 1, translate_y]])
border_const = cv2.BORDER_CONSTANT
if border_mode == "reflect":
border_const = cv2.BORDER_REFLECT
res = []
for img_inst in img:
img_inst = cv2.warpAffine(img_inst, trans_matrix, (org_width, org_height), borderMode=border_const)
res.append(img_inst)
if len(res) == 1:
res = res[0]
xy_range.last_x = translate_x
xy_range.last_y = translate_y
return res
def random_rotate_img(img, chance, min_angle, max_angle):
import cv2
if random.random() > chance:
return img
if not isinstance(img, list):
img = [img]
angle = random.randint(min_angle, max_angle)
center = (img[0].shape[0] / 2, img[0].shape[1] / 2)
rot_matrix = cv2.getRotationMatrix2D(center, angle, scale=1.0)
res = []
for img_inst in img:
img_inst = cv2.warpAffine(img_inst, rot_matrix, dsize=img_inst.shape[:2], borderMode=cv2.BORDER_CONSTANT)
res.append(img_inst)
if len(res) == 0:
res = res[0]
return res
def random_flip_img(img, horizontal_chance=0, vertical_chance=0):
import cv2
flip_horizontal = False
if random.random() < horizontal_chance:
flip_horizontal = True
flip_vertical = False
if random.random() < vertical_chance:
flip_vertical = True
if not flip_horizontal and not flip_vertical:
return img
flip_val = 1
if flip_vertical:
flip_val = -1 if flip_horizontal else 0
if not isinstance(img, list):
res = cv2.flip(img, flip_val) # 0 = X axis, 1 = Y axis, -1 = both
else:
res = []
for img_item in img:
img_flip = cv2.flip(img_item, flip_val)
res.append(img_flip)
return res
ELASTIC_INDICES = None # needed to make it faster to fix elastic deformation per epoch.
def elastic_transform(image, alpha, sigma, random_state=None):
global ELASTIC_INDICES
shape = image.shape
if ELASTIC_INDICES == None:
if random_state is None:
random_state = numpy.random.RandomState(1301)
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = numpy.meshgrid(numpy.arange(shape[0]), numpy.arange(shape[1]))
ELASTIC_INDICES = numpy.reshape(y + dy, (-1, 1)), numpy.reshape(x + dx, (-1, 1))
return map_coordinates(image, ELASTIC_INDICES, order=1).reshape(shape)
def prepare_image_for_net(img):
img = img.astype(numpy.float)
img /= 255.
if len(img.shape) == 3:
img = img.reshape(img.shape[-3], img.shape[-2], img.shape[-1])
else:
img = img.reshape(1, img.shape[-2], img.shape[-1], 1)
return img
def get_train_holdout_files(model_type, holdout, train_percentage=80, frame_count=8):
print("Get train/holdout files.")
file_paths = glob.glob("resources/segmenter_traindata/" + "*_1.png")
file_paths.sort()
train_res = []
holdout_res = []
for index, file_path in enumerate(file_paths):
file_name = ntpath.basename(file_path)
overlay_path = file_path.replace("_1.png", "_o.png")
train_set = False
if "1.3.6.1.4" in file_name or "spie" in file_name or "TIME" in file_name:
train_set = True
else:
patient_id = file_name.split("_")[0]
if helpers.get_patient_fold(patient_id) % 3 != holdout:
train_set = True
if train_set:
train_res.append((file_path, overlay_path))
else:
holdout_res.append((file_path, overlay_path))
print("Train count: ", len(train_res), ", holdout count: ", len(holdout_res))
return train_res, holdout_res
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + 100) / (K.sum(y_true_f) + K.sum(y_pred_f) + 100)
def dice_coef_np(y_true, y_pred):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = numpy.sum(y_true_f * y_pred_f)
return (2. * intersection + 100) / (numpy.sum(y_true_f) + numpy.sum(y_pred_f) + 100)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
class DumpPredictions(Callback):
def __init__(self, dump_filelist : List[Tuple[str, str]], model_type):
super(DumpPredictions, self).__init__()
self.dump_filelist = dump_filelist
self.batch_count = 0
if not os.path.exists("workdir/segmenter/"):
os.mkdir("workdir/segmenter/")
for file_path in glob.glob("workdir/segmenter/*.*"):
os.remove(file_path)
self.model_type = model_type
def on_epoch_end(self, epoch, logs=None):
model = self.model # type: Model
generator = image_generator(self.dump_filelist, 1, train_set=False, model_type=self.model_type)
for i in range(0, 10):
x, y = next(generator)
y_pred = model.predict(x, batch_size=1)
x = x.swapaxes(0, 3)
x = x[0]
# print(x.shape, y.shape, y_pred.shape)
x *= 255.
x = x.reshape((x.shape[0], x.shape[0])).astype(numpy.uint8)
y *= 255.
y = y.reshape((y.shape[1], y.shape[2])).astype(numpy.uint8)
y_pred *= 255.
y_pred = y_pred.reshape((y_pred.shape[1], y_pred.shape[2])).astype(numpy.uint8)
# cv2.imwrite("workdir/segmenter/img_{0:03d}_{1:02d}_i.png".format(epoch, i), x)
# cv2.imwrite("workdit/segmenter/img_{0:03d}_{1:02d}_o.png".format(epoch, i), y)
# cv2.imwrite("workdit/segmenter/img_{0:03d}_{1:02d}_p.png".format(epoch, i), y_pred)
def image_generator(batch_files, batch_size, train_set, model_type):
global ELASTIC_INDICES
while True:
if train_set:
random.shuffle(batch_files)
img_list = []
overlay_list = []
ELASTIC_INDICES = None
for batch_file_idx, batch_file in enumerate(batch_files):
images = []
img = cv2.imread(batch_file[0], cv2.IMREAD_GRAYSCALE)
images.append(img)
overlay = cv2.imread(batch_file[1], cv2.IMREAD_GRAYSCALE)
if train_set:
if random.randint(0, 100) > 50:
for img_index, img in enumerate(images):
images[img_index] = elastic_transform(img, 128, 15)
overlay = elastic_transform(overlay, 128, 15)
if True:
augmented = images + [overlay]
augmented = random_rotate_img(augmented, 0.8, -20, 20)
augmented = random_flip_img(augmented, 0.5, 0.5)
# processed = helpers_augmentation.random_flip_img(processed, horizontal_chance=0.5, vertical_chance=0)
# processed = helpers_augmentation.random_scale_img(processed, xy_range=helpers_augmentation.XYRange(x_min=0.8, x_max=1.2, y_min=0.8, y_max=1.2, chance=1.0))
augmented = random_translate_img(augmented, XYRange(-30, 30, -30, 30, 0.8))
images = augmented[:-1]
overlay = augmented[-1]
for index, img in enumerate(images):
# img = img[crop_y: crop_y + settings.TRAIN_IMG_HEIGHT3D, crop_x: crop_x + settings.TRAIN_IMG_WIDTH3D]
img = prepare_image_for_net(img)
images[index] = img
# helpers_augmentation.dump_augmented_image(img, mean_img=None, target_path="c:\\tmp\\" + batch_file[0])
# overlay = overlay[crop_y: crop_y + settings.TRAIN_IMG_HEIGHT3D, crop_x: crop_x + settings.TRAIN_IMG_WIDTH3D]
overlay = prepare_image_for_net(overlay)
# overlay = overlay.reshape(1, overlay.shape[-3] * overlay.shape[-2])
# overlay *= settings.OVERLAY_MULTIPLIER
images3d = numpy.vstack(images)
images3d = images3d.swapaxes(0, 3)
img_list.append(images3d)
overlay_list.append(overlay)
if len(img_list) >= batch_size:
x = numpy.vstack(img_list)
y = numpy.vstack(overlay_list)
# if len(img_list) >= batch_size:
yield x, y
img_list = []
overlay_list = []
def get_unet(learn_rate, load_weights_path=None) -> Model:
inputs = Input((settings.SEGMENTER_IMG_SIZE, settings.SEGMENTER_IMG_SIZE, CHANNEL_COUNT))
filter_size = 32
growth_step = 32
x = BatchNormalization()(inputs)
conv1 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(x)
conv1 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
pool1 = BatchNormalization()(pool1)
filter_size += growth_step
conv2 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
pool2 = BatchNormalization()(pool2)
filter_size += growth_step
conv3 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
pool3 = BatchNormalization()(pool3)
filter_size += growth_step
conv4 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
pool4 = BatchNormalization()(pool4)
conv5 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same', name="conv5b")(conv5)
pool5 = MaxPooling2D(pool_size=(2, 2), name="pool5")(conv5)
pool5 = BatchNormalization()(pool5)
conv6 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(pool5)
conv6 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same', name="conv6b")(conv6)
up6 = UpSampling2D(size=(2, 2), name="up6")(conv6)
up6 = merge([up6, conv5], mode='concat', concat_axis=3)
up6 = BatchNormalization()(up6)
# up6 = SpatialDropout2D(0.1)(up6)
filter_size -= growth_step
conv66 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(up6)
conv66 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(conv66)
up7 = merge([UpSampling2D(size=(2, 2))(conv66), conv4], mode='concat', concat_axis=3)
up7 = BatchNormalization()(up7)
# up7 = SpatialDropout2D(0.1)(up7)
filter_size -= growth_step
conv7 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv3], mode='concat', concat_axis=3)
up8 = BatchNormalization()(up8)
filter_size -= growth_step
conv8 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv2], mode='concat', concat_axis=3)
up9 = BatchNormalization()(up9)
conv9 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(filter_size, 3, 3, activation='relu', border_mode='same')(conv9)
# conv9 = BatchNormalization()(conv9)
up10 = UpSampling2D(size=(2, 2))(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(up10)
model = Model(input=inputs, output=conv10)
# model.load_weights(load_weights_path)
# model.compile(optimizer=Adam(lr=1.0e-5), loss=dice_coef_loss, metrics=[dice_coef])
model.compile(optimizer=SGD(lr=learn_rate, momentum=0.9, nesterov=True), loss=dice_coef_loss, metrics=[dice_coef])
model.summary()
return model
def train_model(holdout, model_type, continue_from=None):
batch_size = 4
train_percentage = 80 if model_type == "masses" else 90
train_files, holdout_files = get_train_holdout_files( model_type, holdout, train_percentage, frame_count=CHANNEL_COUNT)
# train_files = train_files[:100]
# holdout_files = train_files[:10]
tmp_gen = image_generator(train_files[:2], 2, True, model_type)
for i in range(10):
x = next(tmp_gen)
img = x[0][0].reshape((settings.SEGMENTER_IMG_SIZE, settings.SEGMENTER_IMG_SIZE))
img *= 255
# cv2.imwrite("c:/tmp/img_" + str(i).rjust(3, '0') + "i.png", img)
img = x[1][0].reshape((settings.SEGMENTER_IMG_SIZE, settings.SEGMENTER_IMG_SIZE))
img *= 255
# cv2.imwrite("c:/tmp/img_" + str(i).rjust(3, '0') + "o.png", img)
# print(x.shape)
train_gen = image_generator(train_files, batch_size, True, model_type)
holdout_gen = image_generator(holdout_files, batch_size, False, model_type)
if continue_from is None:
model = get_unet(0.001)
else:
model = get_unet(0.0001)
model.load_weights(continue_from)
checkpoint1 = ModelCheckpoint("workdir/" + model_type +"_model_h" + str(holdout) + "_{epoch:02d}-{val_loss:.2f}.hd5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
checkpoint2 = ModelCheckpoint("workdir/" + model_type +"_model_h" + str(holdout) + "_best.hd5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
files = []
idx = 0
while (idx < (len(holdout_files))):
files.append(holdout_files[idx])
idx += 5
dumper = DumpPredictions(holdout_files[::10], model_type)
epoch_div = 1
epoch_count = 200 if model_type == "masses" else 50
model.fit_generator(train_gen, len(train_files) / epoch_div, epoch_count, validation_data=holdout_gen, nb_val_samples=len(holdout_files) / epoch_div, callbacks=[checkpoint1, checkpoint2, dumper])
if not os.path.exists("models"):
os.mkdir("models")
shutil.copy("workdir/" + model_type +"_model_h" + str(holdout) + "_best.hd5", "models/" + model_type +"_model_h" + str(holdout) + "_best.hd5")
def predict_patients(patients_dir, model_path, holdout, patient_predictions, model_type):
model = get_unet(0.001)
model.load_weights(model_path)
for item_name in os.listdir(patients_dir):
if not os.path.isdir(patients_dir + item_name):
continue
patient_id = item_name
if holdout >= 0:
patient_fold = helpers.get_patient_fold(patient_id, submission_set_neg=True)
if patient_fold < 0:
if holdout != 0:
continue
else:
patient_fold %= 3
if patient_fold != holdout:
continue
# if "100953483028192176989979435275" not in patient_id:
# continue
print(patient_id)
patient_dir = patients_dir + patient_id + "/"
mass = 0
img_type = "_i" if model_type == "masses" else "_c"
slices = glob.glob(patient_dir + "*" + img_type + ".png")
if model_type == "emphysema":
slices = slices[int(len(slices) / 2):]
for img_path in slices:
src_img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
src_img = cv2.resize(src_img, dsize=(settings.SEGMENTER_IMG_SIZE, settings.SEGMENTER_IMG_SIZE))
src_img = prepare_image_for_net(src_img)
p = model.predict(src_img, batch_size=1)
p[p < 0.5] = 0
mass += p.sum()
p = p[0, :, :, 0] * 255
# cv2.imwrite(img_path.replace("_i.png", "_mass.png"), p)
src_img = src_img.reshape((settings.SEGMENTER_IMG_SIZE, settings.SEGMENTER_IMG_SIZE))
src_img *= 255
# src_img = cv2.cvtColor(src_img.astype(numpy.uint8), cv2.COLOR_GRAY2BGR)
# p = cv2.cvtColor(p.astype(numpy.uint8), cv2.COLOR_GRAY2BGRA)
src_img = cv2.addWeighted(p.astype(numpy.uint8), 0.2, src_img.astype(numpy.uint8), 1 - 0.2, 0)
cv2.imwrite(img_path.replace(img_type + ".png", "_" + model_type + "o.png"), src_img)
if mass > 1:
print(model_type + ": ", mass)
patient_predictions.append((patient_id, mass))
df = pandas.DataFrame(patient_predictions, columns=["patient_id", "prediction"])
df.to_csv(settings.BASE_DIR + model_type + "_predictions.csv", index=False)
if __name__ == "__main__":
continue_from = None
if True:
for model_type_name in ["masses"]:
train_model(holdout=0, model_type=model_type_name, continue_from=continue_from)
train_model(holdout=1, model_type=model_type_name, continue_from=continue_from)
train_model(holdout=2, model_type=model_type_name, continue_from=continue_from)
if True:
for model_type_name in ["masses"]:
patient_predictions_global = []
for holdout_no in [0, 1, 2]:
patient_base_dir = settings.NDSB3_EXTRACTED_IMAGE_DIR
predict_patients(patients_dir=patient_base_dir, model_path="models/" + model_type_name + "_model_h" + str(holdout_no) + "_best.hd5", holdout=holdout_no, patient_predictions=patient_predictions_global, model_type=model_type_name)
| mit |
tranpthuan/blabla | Cybercore/task1_cybercore.py | 1 | 1320 | import numpy as np
import matplotlib.pyplot as plt
import csv
read1 = []
read2 = []
with open('train.csv',"rb") as csvfile:
read = csv.reader(csvfile)
read.next()
for row in read :
if len(row) <= 1 : #data preprocessing c
continue
read1.append(row[0])
read2.append(row[1])
X = np.array([read1], dtype = float).T
Y = np.array([read2], dtype = float).T
#Xbar for the mean value
one = np.ones((X.shape[0], 1))
Xbar = np.concatenate((one, X), axis = 1)
#processing lines
A = np.dot(Xbar.T, Xbar)
b = np.dot(Xbar.T,Y)
w = np.dot(np.linalg.pinv(A),b)
w0 = w[0][0]
w1 = w[1][0]
print(w0)
print(w1)
x0 = np.linspace(0, 110, 2)
y0 = w0 + w1*x0
plt.plot(X, Y, 'm.') # data
plt.plot(x0, y0, 'c') # the fitting line
plt.axis([0, 110, 0, 110])
plt.xlabel('X')
plt.ylabel('')
plt.show()
temp = []
data = []
with open('test.csv',"rb") as csvtest :
test = csv.reader(csvtest)
test.next()
for i in test:
if(len(i) < 1) :
continue
temp.append(i[0]);
data = np.array(temp, dtype = float)
with open('predict.csv',"wb") as output :
writer = csv.writer(output)
writer.writerow(['x','y'])
for j in data :
y1 = j*w1 + w0
writer.writerow([j, y1])
csvtest.close()
csvfile.close()
output.close() | gpl-3.0 |
Chandra-MARX/marxs | marxs/visualization/threejsjson.py | 1 | 8747 | # Licensed under GPL version 3 - see LICENSE.rst
'''Plot routines for json output to be loaded into `three.js <threejs.org>`__.
Each routine returns a json dictionary. These json dictionaries can be collected in
a list and written to a file with `~marxs.visualization.threesjson.write`, which
checks the formatting and adds metadata.
Just as `~marxs.visualization.threejs` this backend provides input for a webpage that
will then use the `three.js <threejs.org>`_ library to render the 3D model. The main
difference is that `~marxs.visualization.threejs` outputs plain text for each object,
while the json written by this module is much smaller if the model contains many copies
of the same object (e.g. hundreds of diffraction gratings). Also, the json data can be
updated with no changes necessary to the html page.
MARXS includes ``loader.html`` as an example how ``MARXSloader.js`` (included in MARXS
as well) can be used to load the json file and construct the three.js objects.
Note that routine to display a torus is modified
relative to the official three.js release to allow more parameters, the modified
version is included in MARXS.
For reference, here is a short summary of the json data layout.
There are two main entries: ``metadata`` (a list with the MARXS version, writer, etc...)
and ``objects`` which is a list of lists.
Each of the sublists has the following fields:
- ``n``: number of objects in list
- ``name``: string or list (if there are sevaral object in this sublist)
- ``material``: string
- ``materialpropterties``: dict
- ``geometry``: type (e.g. ``BufferGeometry``, see `three.js <threejs.org>`_)
- if ``geometry`` is a buffer Geometry: ``pos``, ``color`` (lists of numbers)
- otherwise:
- ``pos4d``: list of lists of 16 numbers
- ``geometrypars``: list (meaning depends on geometry, e.g. radius of torus)
'''
import os
import json
import datetime
import warnings
import numpy as np
from astropy.utils.decorators import format_doc
from ..version import version
from . import threejs
from . import utils
try:
import jsonschema
HAS_JSONSCHEMA = True
except ImportError:
HAS_JSONSCHEMA = False
doc_plot='''
{__doc__}
Parameters
----------
obj : `marxs.base.MarxsElement`
The element that should be plotted.
display : dict of None
Dictionary with display settings.
Returns
--------
outjson : dict
``outjson`` is a (possibly nested) dictionaries that describes the scene
in form that the included MARXSloader.js can read.
'''
@format_doc(doc_plot)
def plot_object(obj, display=None, **kwargs):
''''Format any MARXS object as a json string.
This method will inspect the object that is passed in and select the
correct plotting method for its shape.
'''
return utils.plot_object_general(plot_registry, obj, display=display, **kwargs)
def container(obj, display=None, **kwargs):
''''Recursivey output three.js json to describe all elements of a container.
Output of each element can be a dict (if it is a leaf) or a list
(if it is a container). We need to flatten the list here to avoid
arbitrarily deep recursion.
'''
out = []
for elem in obj.elements:
elemout = plot_obj(elem, elem.display)
if isinstance(elemout, list):
out.extend(elemout)
elif (elemout is None):
pass
else:
out.append(elemout)
return out
def box(obj, display):
'''Describe a box-shaped optical elements.'''
out = {}
out['n'] = 1
out['name'] = str(obj.name)
out['material'] = 'MeshStandardMaterial'
out['materialproperties'] = threejs.materialdict(display, out['material'])
out['geometry'] = 'BoxGeometry'
out['geometrypars'] = (2, 2, 2)
out['pos4d'] = [obj.pos4d.T.flatten().tolist()]
if not ('side' in display):
out['materialproperties']['side'] = 2
return out
def triangulation(obj, display):
'''Describe a plane with a hole, such as an aperture of baffle.'''
xyz, triangles = obj.geometry.triangulate(display)
out = {}
out['n'] = 1
out['name'] = str(obj.name)
out['material'] = 'MeshStandardMaterial'
out['materialproperties'] = threejs.materialdict(display, out['material'])
out['geometry'] = 'BufferGeometry'
out['geometrytype'] = 'Mesh'
out['pos'] = [xyz.flatten().tolist()]
out['faces'] = [triangles.flatten().tolist()]
if not ('side' in display):
out['materialproperties']['side'] = 2
return out
def torus(obj, display, theta0=0., thetaarc=2*np.pi, phi0=0., phiarc=np.pi * 2):
'''Describe a (possibly incomplete) torus.
The plot range for theta and phi is taken from the values for ``coo1`` and ``coo2``
in the ``display`` dictionary. There entries should be a list of value if less then
the full torus (ranging from 0 to 2 pi in each coordinate) is desired for the plot.
'''
theta = display.get('coo1', [0, 2 * np.pi])
phi = display.get('coo2', [0, 2 * np.pi])
out = {}
out['n'] = 1
out['name'] = str(obj.name)
out['material'] = 'MeshStandardMaterial'
out['materialproperties'] = threejs.materialdict(display, out['material'])
out['geometry'] = 'ModifiedTorusBufferGeometry'
out['geometrypars'] = (obj.R, obj.r,
int(np.rad2deg(theta[1])), int(np.rad2deg(phi[1])),
theta[1], theta[0], phi[1], phi[0])
out['pos4d'] = [obj.pos4d.flatten().tolist()]
if not ('side' in display):
out['materialproperties']['side'] = 2
return out
def plot_rays(data, scalar=None, prop={}, name='Photon list', cmap=None):
'''Plot lines for simulated rays.
Parameters
----------
data : np.array of shape(n, N, 3)
where n is the number of rays, N the number of positions per ray and
the last dimension is the (x,y,z) of an Eukledian position vector.
scalar : None or nd.array of shape (n,) or (n, N)
This quantity is used to color the rays. If ``None`` all rays will have the same
color. If it has n elements, each ray will have exactly one color (e.g. color
according to the energy of the ray), if it has n*N elements, rays will be
multicolored.
prop : dict
keyword arguments for line material.
name : string
Identifier "name" for three.js objects. This only matters if your website
identifies elements by name for interactive features.
cmap : `matplotlib.colors.Colormap` instance or string or None
`matplotlib` color maps are used to convert ``scalar`` values to rgb colors.
If ``None`` the default matplotlib colormap will be used, otherwise the colormap
can be specified in this keyword.
'''
data, s_rgb, prob, n = threejs._format_plot_rays_input(data, scalar, cmap, prop)
out = {}
out['n'] = n
out['name'] = name
out['material'] = 'LineBasicMaterial'
out['materialproperties'] = threejs.materialdict(prop, out['material'])
out['geometry'] = 'BufferGeometry'
out['geometrytype'] = 'Line'
out['pos'] = data.reshape((n, -1)).tolist()
out['color'] = s_rgb[:, :, :3].reshape((n, -1)).tolist()
return out
def write(fileobject, data, photons=None):
'''Add metadata and write json for three.js to disk
Parameters
----------
fileobject : writeable file-like object
data : list of dict or single dict
Output of ``xxx.plot(format='threejsjson')`` calls. This can either
be a list of dictionaries or a single dictionary.
photons : `astropy.table.Table` or None
Some metadata is copied from a photon list, if available.
'''
if not isinstance(data, list):
data = [data]
date = datetime.datetime.now()
jdata = {'meta': {'format_version': 1,
'origin': 'MARXS:threejsjson output',
'date': str(date.date()),
'time': str(date.time()),
'marxs_version': version},
'elements': data}
if photons is not None:
data['runinfo'] = photons.meta()
if HAS_JSONSCHEMA:
path = os.path.abspath(os.path.dirname(__file__))
schemafile = os.path.join(path, 'threejs_files', 'jsonschema.json')
with open(schemafile) as f:
schema = json.load(f)
jsonschema.validate(jdata, schema)
else:
warnings.warn('Module jsonschema not installed. json file will be written without further verification.')
json.dump(jdata, fileobject)
plot_registry = {'triangulation': triangulation,
'torus': torus,
'box': box,
'container': container,
}
| gpl-3.0 |
luo66/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/io/json/json.py | 2 | 30286 | # pylint: disable-msg=E1101,W0613,W0603
from itertools import islice
import os
import numpy as np
import pandas._libs.json as json
from pandas._libs.tslib import iNaT
from pandas.compat import StringIO, long, u
from pandas import compat, isna
from pandas import Series, DataFrame, to_datetime, MultiIndex
from pandas.io.common import (get_filepath_or_buffer, _get_handle,
_infer_compression, _stringify_path,
BaseIterator)
from pandas.io.parsers import _validate_integer
from pandas.core.common import AbstractMethodError
from pandas.core.reshape.concat import concat
from pandas.io.formats.printing import pprint_thing
from .normalize import _convert_to_line_delimits
from .table_schema import build_table_schema
from pandas.core.dtypes.common import is_period_dtype
loads = json.loads
dumps = json.dumps
TABLE_SCHEMA_VERSION = '0.20.0'
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression=None):
path_or_buf = _stringify_path(path_or_buf)
if lines and orient != 'records':
raise ValueError(
"'lines' keyword only valid when 'orient' is records")
if orient == 'table' and isinstance(obj, Series):
obj = obj.to_frame(name=obj.name or 'values')
if orient == 'table' and isinstance(obj, DataFrame):
writer = JSONTableWriter
elif isinstance(obj, Series):
writer = SeriesWriter
elif isinstance(obj, DataFrame):
writer = FrameWriter
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
s = writer(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
if lines:
s = _convert_to_line_delimits(s)
if isinstance(path_or_buf, compat.string_types):
fh, handles = _get_handle(path_or_buf, 'w', compression=compression)
try:
fh.write(s)
finally:
fh.close()
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
return dumps(
self.obj,
orient=self.orient,
double_precision=self.double_precision,
ensure_ascii=self.ensure_ascii,
date_unit=self.date_unit,
iso_dates=self.date_format == 'iso',
default_handler=self.default_handler
)
class SeriesWriter(Writer):
_default_orient = 'index'
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'{orient}'".format(orient=self.orient))
class FrameWriter(Writer):
_default_orient = 'columns'
def _format_axes(self):
""" try to axes if they are datelike """
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'{orient}'.".format(orient=self.orient))
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'{orient}'.".format(orient=self.orient))
class JSONTableWriter(FrameWriter):
_default_orient = 'records'
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
"""
Adds a `schema` attribut with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super(JSONTableWriter, self).__init__(
obj, orient, date_format, double_precision, ensure_ascii,
date_unit, default_handler=default_handler)
if date_format != 'iso':
msg = ("Trying to write with `orient='table'` and "
"`date_format='{fmt}'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`"
.format(fmt=date_format))
raise ValueError(msg)
self.schema = build_table_schema(obj)
# NotImplementd on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
raise NotImplementedError(
"orient='table' is not supported for MultiIndex")
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if ((obj.ndim == 1) and (obj.name in set(obj.index.names)) or
len(obj.columns & obj.index.names)):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=['timedelta']).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(
lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
self.obj = obj.reset_index()
self.date_format = 'iso'
self.orient = 'records'
def write(self):
data = super(JSONTableWriter, self).write()
serialized = '{{"schema": {schema}, "data": {data}}}'.format(
schema=dumps(self.schema), data=data)
return serialized
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
convert_axes=True, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False, chunksize=None, compression='infer'):
"""
Convert a JSON string to pandas object
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file
could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
If True, infer dtypes, if a dict of column to dtype, then use those,
if False, then don't infer dtypes at all, applies only to the data.
convert_axes : boolean, default True
Try to convert the axes to the proper dtypes.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
chunksize: integer, default None
Return JsonReader object for iteration.
See the `line-delimted json docs
<http://pandas.pydata.org/pandas-docs/stable/io.html#io-jsonl>`_
for more information on ``chunksize``.
This can only be passed if `lines=True`.
If this is None, the file will be read into memory all at once.
.. versionadded:: 0.21.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buf is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
.. versionadded:: 0.21.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
See Also
--------
DataFrame.to_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
compression = _infer_compression(path_or_buf, compression)
filepath_or_buffer, _, compression = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression,
)
json_reader = JsonReader(
filepath_or_buffer, orient=orient, typ=typ, dtype=dtype,
convert_axes=convert_axes, convert_dates=convert_dates,
keep_default_dates=keep_default_dates, numpy=numpy,
precise_float=precise_float, date_unit=date_unit, encoding=encoding,
lines=lines, chunksize=chunksize, compression=compression,
)
if chunksize:
return json_reader
return json_reader.read()
class JsonReader(BaseIterator):
"""
JsonReader provides an interface for reading in a JSON file.
If initialized with ``lines=True`` and ``chunksize``, can be iterated over
``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the
whole document.
"""
def __init__(self, filepath_or_buffer, orient, typ, dtype, convert_axes,
convert_dates, keep_default_dates, numpy, precise_float,
date_unit, encoding, lines, chunksize, compression):
self.path_or_buf = filepath_or_buffer
self.orient = orient
self.typ = typ
self.dtype = dtype
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.keep_default_dates = keep_default_dates
self.numpy = numpy
self.precise_float = precise_float
self.date_unit = date_unit
self.encoding = encoding
self.compression = compression
self.lines = lines
self.chunksize = chunksize
self.nrows_seen = 0
self.should_close = False
if self.chunksize is not None:
self.chunksize = _validate_integer("chunksize", self.chunksize, 1)
if not self.lines:
raise ValueError("chunksize can only be passed if lines=True")
data = self._get_data_from_filepath(filepath_or_buffer)
self.data = self._preprocess_data(data)
def _preprocess_data(self, data):
"""
At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method.
"""
if hasattr(data, 'read') and not self.chunksize:
data = data.read()
if not hasattr(data, 'read') and self.chunksize:
data = StringIO(data)
return data
def _get_data_from_filepath(self, filepath_or_buffer):
"""
read_json accepts three input types:
1. filepath (string-like)
2. file-like object (e.g. open file object, StringIO)
3. JSON string
This method turns (1) into (2) to simplify the rest of the processing.
It returns input types (2) and (3) unchanged.
"""
data = filepath_or_buffer
exists = False
if isinstance(data, compat.string_types):
try:
exists = os.path.exists(filepath_or_buffer)
# gh-5874: if the filepath is too long will raise here
except (TypeError, ValueError):
pass
if exists or self.compression is not None:
data, _ = _get_handle(filepath_or_buffer, 'r',
encoding=self.encoding,
compression=self.compression)
self.should_close = True
self.open_stream = data
return data
def _combine_lines(self, lines):
"""Combines a list of JSON objects into one JSON object"""
lines = filter(None, map(lambda x: x.strip(), lines))
return '[' + ','.join(lines) + ']'
def read(self):
"""Read the whole JSON input into a pandas object"""
if self.lines and self.chunksize:
obj = concat(self)
elif self.lines:
obj = self._get_object_parser(
self._combine_lines(self.data.split('\n'))
)
else:
obj = self._get_object_parser(self.data)
self.close()
return obj
def _get_object_parser(self, json):
"""parses a json document into a pandas object"""
typ = self.typ
dtype = self.dtype
kwargs = {
"orient": self.orient, "dtype": self.dtype,
"convert_axes": self.convert_axes,
"convert_dates": self.convert_dates,
"keep_default_dates": self.keep_default_dates, "numpy": self.numpy,
"precise_float": self.precise_float, "date_unit": self.date_unit
}
obj = None
if typ == 'frame':
obj = FrameParser(json, **kwargs).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
dtype = dict(data=dtype)
obj = SeriesParser(json, **kwargs).parse()
return obj
def close(self):
"""
If we opened a stream earlier, in _get_data_from_filepath, we should
close it. If an open stream or file was passed, we leave it open.
"""
if self.should_close:
try:
self.open_stream.close()
except (IOError, AttributeError):
pass
def __next__(self):
lines = list(islice(self.data, self.chunksize))
if lines:
lines_json = self._combine_lines(lines)
obj = self._get_object_parser(lines_json)
# Make sure that the returned objects have the right index.
obj.index = range(self.nrows_seen, self.nrows_seen + len(obj))
self.nrows_seen += len(obj)
return obj
self.close()
raise StopIteration
class Parser(object):
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
's': long(31536000),
'ms': long(31536000000),
'us': long(31536000000000),
'ns': long(31536000000000000)}
def __init__(self, json, orient, dtype=True, convert_axes=True,
convert_dates=True, keep_default_dates=False, numpy=False,
precise_float=False, date_unit=None):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of {units}'
.format(units=self._STAMP_UNITS))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"checks that dict has only the appropriate keys for orient='split'"
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(u("JSON data had unexpected key(s): {bad_keys}")
.format(bad_keys=pprint_thing(bad_keys)))
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
""" try to convert axes """
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
""" try to parse a ndarray like into a column by inferring dtype """
# don't try to coerce, unless a force conversion
if use_dtypes:
if self.dtype is False:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except:
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == 'object':
# try float
try:
data = data.astype('float64')
result = True
except:
pass
if data.dtype.kind == 'f':
if data.dtype != 'float64':
# coerce floats to 64
try:
data = data.astype('float64')
result = True
except:
pass
# do't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
# coerce ints if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
data = new_data
result = True
except:
pass
# coerce ints to 64
if data.dtype == 'int':
# coerce floats to 64
try:
data = data.astype('int64')
result = True
except:
pass
return data, result
def _try_convert_to_date(self, data):
""" try to parse a ndarray like into a date column
try to coerce object in epoch/iso formats and
integer/float in epcoh formats, return a boolean if parsing
was successful """
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == 'object':
try:
new_data = data.astype('int64')
except:
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (isna(new_data.values) | (new_data > self.min_stamp) |
(new_data.values == iNaT))
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors='raise',
unit=date_unit)
except ValueError:
continue
except:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(
loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
'data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
_split_keys = ('columns', 'index', 'data')
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(json, dtype=None, numpy=True, labelled=True,
precise_float=self.precise_float)
if args:
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
else:
self.obj = DataFrame(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None).T
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
def _process_converter(self, f, filt=None):
""" take a conversion function and possibly recreate the frame """
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False))
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
""" return if this col is ok to try for a date parse """
if not isinstance(col, compat.string_types):
return False
col_lower = col.lower()
if (col_lower.endswith('_at') or
col_lower.endswith('_time') or
col_lower == 'modified' or
col_lower == 'date' or
col_lower == 'datetime' or
col_lower.startswith('timestamp')):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: ((self.keep_default_dates and is_ok(col)) or
col in convert_dates))
| apache-2.0 |
Lab603/PicEncyclopedias | jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/grid_search_test.py | 5 | 2016 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(tf.test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
steps=50)
grid_search = GridSearchCV(classifier,
{'hidden_units': [[5, 5], [10, 10]],
'learning_rate': [0.1, 0.01]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
tf.test.main()
| mit |
JT5D/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 3 | 4949 | import sys
import re
import numpy as np
from scipy.sparse import csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
"""BernoulliRBM should work on small sparse matrices."""
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
"""
Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
from the same input
"""
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
"""
Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
the same input even when the input is sparse, and test against non-sparse
"""
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
""" just seek if we don't get NaNs sampling the full digits dataset """
rng = np.random.RandomState(42)
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=10,
n_iter=20, random_state=rng)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
def test_score_samples():
"""Test score_samples (pseudo-likelihood) method."""
# Assert that pseudo-likelihood is computed without clipping.
# http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
"""
Make sure RBM works with sparse input when verbose=True
"""
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sio = sys.stdout
sys.stdout = old_stdout
| bsd-3-clause |
PanTomaszRoszczynialski/XRayTracer | backends/raycing/__init__.py | 1 | 21197 | # -*- coding: utf-8 -*-
"""
Package :mod:`~xrt.backends.raycing` provides the internal backend of xrt. It
defines beam sources in the module :mod:`~xrt.backends.raycing.sources`,
rectangular and round apertures in :mod:`~xrt.backends.raycing.apertures`,
optical elements in :mod:`~xrt.backends.raycing.oes`, material properties
(essentially reflectivity, transmittivity and absorption coefficient) for
interfaces and crystals in :mod:`~xrt.backends.raycing.materials` and screens
in :mod:`~xrt.backends.raycing.screens`.
.. _scriptingRaycing:
Coordinate systems
------------------
The following coordinate systems are considered (always right-handed):
1) *The global coordinate system*. It is arbitrary (user-defined) with one
requirement driven by code simplification: Z-axis is vertical. For example,
the system origin of Alba synchrotron is in the center of the ring at the
ground level with Y-axis northward, Z upright and the units in mm. The
positions of all optical elements, sources, screens etc. are given in the
global coordinate system. This feature simplifies the beamline alignment
when 3D CAD models are available.
2) *The local systems*.
a) *of the beamline*. The local Y direction (the direction of the source)
is determined by *azimuth* parameter of
:class:`~xrt.backends.raycing.BeamLine` -- the angle measured cw from the
global Y axis. The local beamline Z is also vertical and upward. The
local beamline X is to the right. At *azimuth* = 0 the global system and
the local beamline system are parallel to each other.
b) *of an optical element*. The origin is on the optical surface. Z is
out-of-surface. At pitch, roll and yaw all zeros the local oe system
and the local beamline system are parallel to each other. Pitch, roll and
yaw rotations (correspondingly: Rx, Ry and Rz) are defined relative to
the local axes of the optical element.
.. note::
The rotations are done in the following default sequence: yaw, roll,
pitch. It can be changed by the user for any particular optical
element. The local axes rotate together with the optical element!
Sometimes it is necessary to define misalignment angles in addition
to the positional angles. Because rotations do not commute, an extra
set of angles may become unavoidable, which are applied after the
positional rotations. See :class:`~xrt.backends.raycing.oes.OE`.
The user-supplied functions for the surface height (z) and the normal as
functions of (x, y) are defined in the local oe system.
c) *of other beamline elements: sources, apertures, screens*. Z is upward
and Y is along the beam line. The origin is given by the user. Usually it
is on the original beam line.
Units
-----
For the internal calculations, lengths are assumed to be in mm, although for
reflection geometries and simple Bragg cases (thick crystals) this convention
is not used. Angles are unitless (radians). Energy is in eV.
For plotting, the user may select units and conversion factors. The latter are
usually automatically deduced from the units.
Beam categories
---------------
xrt discriminates rays by several categories:
a) ``good``: reflected within the working optical surface;
b) ``out``: reflected outside of the working optical surface, i.e. outside of
a metal stripe on a mirror;
c) ``over``: propagated over the surface without intersection;
d) ``dead``: arrived below the optical surface and thus absorbed by the OE.
This distinction simplifies the adjustment of entrance and exit slits. The
user supplies `physical` and `optical` limits, where the latter is used to
define the ``out`` category (for rays between `physical` and `optical` limits).
An alarm is triggered if the fraction of dead rays exceeds a specified level.
Scripting in python
-------------------
The user of :mod:`~xrt.backends.raycing` must do the following:
1) Instantiate class :class:`~xrt.backends.raycing.BeamLine` and fill it with
optical elements -- descendants of class
:class:`~xrt.backends.raycing.oes.OE`.
2) Create a module-level function that returns a dictionary of beams -- the
instances of :class:`~xrt.backends.raycing.sources.Beam`. Assign this
function to the variable `xrt.backends.raycing.run.run_process`.
The beams should be obtained by the methods shine() of a source, expose() of
a screen, reflect() or multiple_reflect() of an optical element, propagate()
of an aperture.
3) Use the keys in this dictionary for creating the plots (instances of
:class:`~xrt.plotter.XYCPlot`). Note that at the time of instantiation the
plots are just empty placeholders for the future 2D and 1D histograms.
4) Run :func:`~xrt.runner.run_ray_tracing()` function for the created plots.
Additionally, the user may define a generator that will run a loop of ray
tracing for changing geometry (mimics a real scan) or for different material
properties etc. The generator should modify the beamline elements and output
file names of the plots before *yield*. After the *yield* the plots are ready
and the generator may use their fields, e.g. *intensity* or *dE* or *dy* or
others to prepare a scan plot. Typically, this sequence is within a loop; after
the loop the user may prepare the final scan plot using matplotlib
functionality. The generator is given to :func:`~xrt.runner.run_ray_tracing()`
as a parameter.
See the supplied examples."""
__module__ = "raycing"
__author__ = "Konstantin Klementiev, Roman Chernikov"
__date__ = "10 Apr 2015"
# import copy
import types
import numpy as np
# import psyco #!!!psyco speeds it up!!!
# psyco.full()
# psyco.full(memory=200)
# psyco.profile(0.05, memory=200)
elec_e = 1.602176462e-19
zEps = 1e-12 # mm: target accuracy in z while searching for intersection
misalignmentTolerated = 0.1 # for automatic checking of oe center position
accuracyInPosition = 0.1 # accuracy for positioning of oe
dt = 1e-3 # mm: margin around OE within which the intersection is searched
ds = 0. # mm: margin used in multiple reflections
nrays = 100000
maxIteration = 100 # max number of iterations while searching for intersection
maxHalfSizeOfOE = 1000.
maxDepthOfOE = 100.
# maxZDeviationAtOE = 100.
# colors of the rays in a 0-10 range (red-violet)
hueGood = 3.
hueOut = 8.
hueOver = 1.6
hueDead = 0.2
hueMin = 0.
hueMax = 10.
def is_sequence(arg):
"""Checks whether *arg* is a sequence."""
result = (not hasattr(arg, "strip") and hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
if result:
try:
arg[0]
except IndexError:
result = False
return result
def distance_xy(p1, p2):
"""Calculates 2D distance between p1 and p2. p1 and p2 are vectors of
length >= 2."""
return ((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)**0.5
def distance_xyz(p1, p2):
"""Calculates 2D distance between p1 and p2. p1 and p2 are vectors of
length >= 3."""
return ((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 + (p1[2]-p2[2])**2)**0.5
def rotate_x(y, z, cosangle, sinangle):
"""3D rotaion around *x* (pitch). *y* and *z* are values or arrays.
Positive rotation is for positive *sinangle*. Returns *yNew, zNew*."""
return cosangle*y - sinangle*z, sinangle*y + cosangle*z
def rotate_y(x, z, cosangle, sinangle):
"""3D rotaion around *y* (roll). *x* and *z* are values or arrays.
Positive rotation is for positive *sinangle*. Returns *xNew, zNew*."""
return cosangle*x + sinangle*z, -sinangle*x + cosangle*z
def rotate_z(x, y, cosangle, sinangle):
"""3D rotaion around *z*. *x* and *y* are values or arrays.
Positive rotation is for positive *sinangle*. Returns *xNew, yNew*."""
return cosangle*x - sinangle*y, sinangle*x + cosangle*y
def rotate_beam(beam, indarr=None, rotationSequence='RzRyRx',
pitch=0, roll=0, yaw=0, skip_xyz=False, skip_abc=False):
"""Rotates the *beam* indexed by *indarr* by the angles *yaw, roll, pitch*
in the sequence given by *rotationSequence*. A leading '-' symbol of
*rotationSequence* reverses the sequences.
"""
angles = {'z': yaw, 'y': roll, 'x': pitch}
rotates = {'z': rotate_z, 'y': rotate_y, 'x': rotate_x}
if not skip_xyz:
coords1 = {'z': beam.x, 'y': beam.x, 'x': beam.y}
coords2 = {'z': beam.y, 'y': beam.z, 'x': beam.z}
if not skip_abc:
vcomps1 = {'z': beam.a, 'y': beam.a, 'x': beam.b}
vcomps2 = {'z': beam.b, 'y': beam.c, 'x': beam.c}
if rotationSequence[0] == '-':
seq = rotationSequence[6] + rotationSequence[4] + rotationSequence[2]
else:
seq = rotationSequence[1] + rotationSequence[3] + rotationSequence[5]
for s in seq:
angle, rotate = angles[s], rotates[s]
if not skip_xyz:
c1, c2 = coords1[s], coords2[s]
if not skip_abc:
v1, v2 = vcomps1[s], vcomps2[s]
if angle != 0:
cA = np.cos(angle)
sA = np.sin(angle)
if indarr is None:
indarr = slice(None)
if not skip_xyz:
c1[indarr], c2[indarr] = rotate(c1[indarr], c2[indarr], cA, sA)
if not skip_abc:
v1[indarr], v2[indarr] = rotate(v1[indarr], v2[indarr], cA, sA)
def rotate_point(point, rotationSequence='RzRyRx', pitch=0, roll=0, yaw=0):
"""Rotates the *point* (3-sequence) by the angles *yaw, roll, pitch*
in the sequence given by *rotationSequence*. A leading '-' symbol of
*rotationSequence* reverses the sequences.
"""
angles = {'z': yaw, 'y': roll, 'x': pitch}
rotates = {'z': rotate_z, 'y': rotate_y, 'x': rotate_x}
ind1 = {'z': 0, 'y': 0, 'x': 1}
ind2 = {'z': 1, 'y': 2, 'x': 2}
newp = [coord for coord in point]
if rotationSequence[0] == '-':
seq = rotationSequence[6] + rotationSequence[4] + rotationSequence[2]
else:
seq = rotationSequence[1] + rotationSequence[3] + rotationSequence[5]
for s in seq:
angle, rotate = angles[s], rotates[s]
if angle != 0:
cA = np.cos(angle)
sA = np.sin(angle)
newp[ind1[s]], newp[ind2[s]] = rotate(
newp[ind1[s]], newp[ind2[s]], cA, sA)
return newp
def global_to_virgin_local(bl, beam, lo, center=None, part=None):
"""Transforms *beam* from the global to the virgin (i.e. with pitch, roll
and yaw all zeros) local system. The resulting local beam is *lo*. If
*center* is provided, the rotation Rz is about it, otherwise is about the
origin of *beam*. The beam arrays can be sliced by *part* indexing array.
*bl* is an instance of :class:`BeamLine`"""
if part is None:
part = np.ones(beam.x.shape, dtype=np.bool)
a0, b0 = bl.sinAzimuth, bl.cosAzimuth
if center is not None:
lo.x[part] = beam.x[part] - center[0]
lo.y[part] = beam.y[part] - center[1]
lo.z[part] = beam.z[part] - center[2]
else:
lo.x[part] = beam.x[part]
lo.y[part] = beam.y[part]
lo.z[part] = beam.z[part]
if a0 == 0:
lo.a[part] = beam.a[part]
lo.b[part] = beam.b[part]
else:
lo.x[part], lo.y[part] = rotate_z(lo.x[part], lo.y[part], b0, a0)
lo.a[part], lo.b[part] = rotate_z(beam.a[part], beam.b[part], b0, a0)
lo.c[part] = beam.c[part] # unchanged
def virgin_local_to_global(bl, vlb, center=None, part=None):
"""Transforms *vlb* from the virgin (i.e. with pitch, roll and yaw all
zeros) local to the global system and overwrites the result to *vlb*. If
*center* is provided, the rotation Rz is about it, otherwise is about the
origin of *beam*. The beam arrays can be sliced by *part* indexing array.
*bl* is an instance of :class:`BeamLine`"""
if part is None:
part = np.ones(vlb.x.shape, dtype=np.bool)
a0, b0 = bl.sinAzimuth, bl.cosAzimuth
if a0 != 0:
vlb.a[part], vlb.b[part] = rotate_z(vlb.a[part], vlb.b[part], b0, -a0)
vlb.x[part], vlb.y[part] = rotate_z(vlb.x[part], vlb.y[part], b0, -a0)
if center is not None:
vlb.x[part] += center[0]
vlb.y[part] += center[1]
vlb.z[part] += center[2]
def check_alarm(self, incoming, beam):
"""Appends an alarm string to the list of beamline alarms if the alarm
condition is fulfilled."""
incomingSum = incoming.sum()
if incomingSum > 0:
badSum = (beam.state == self.lostNum).sum()
ratio = float(badSum)/incomingSum
if ratio > self.alarmLevel:
alarmStr = ('{0}{1} absorbes {2:.2%} of rays ' +
'at {3:.0%} alarm level!').format(
'Alarm! ', self.name, ratio, self.alarmLevel)
self.bl.alarms.append(alarmStr)
else:
self.bl.alarms.append('no incident rays to {0}!'.format(self.name))
# print self.name
# raise
def get_x(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.x
def get_y(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.y
def get_z(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.z
def get_s(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.s
def get_phi(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.phi
def get_r(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.r
def get_xprime(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.a / beam.b
def get_zprime(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.c / beam.b
def get_path(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.path
def get_order(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.order if hasattr(beam, 'order') else np.ones_like(beam.state)
def get_energy(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.E
def get_reflection_number(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.nRefl # if hasattr(beam, 'nRefl') else beam.state
def get_elevation_d(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.elevationD
# if hasattr(beam, 'elevationD') else np.zeros_like(beam.x)
def get_elevation_x(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.elevationX # if hasattr(beam, 'elevationX') else beam.x
def get_elevation_y(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.elevationY # if hasattr(beam, 'elevationY') else beam.y
def get_elevation_z(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.elevationZ # if hasattr(beam, 'elevationZ') else beam.z
def get_Es_amp(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return np.abs(beam.Es)
def get_Ep_amp(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return np.abs(beam.Ep)
def get_Es_phase(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return np.angle(beam.Es)
# return np.arctan2(beam.Es.imag, beam.Es.real)
def get_Ep_phase(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return np.angle(beam.Ep)
# return np.arctan2(beam.Ep.imag, beam.Ep.real)
def get_polarization_degree(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
I = (beam.Jss + beam.Jpp)
I[I <= 0] = 1.
pd = np.sqrt((beam.Jss-beam.Jpp)**2 + 4.*abs(beam.Jsp)**2) / I
pd[I <= 0] = 0.
return pd
def get_ratio_ellipse_axes(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
dI2 = (beam.Jss - beam.Jpp)**2
return 2. * beam.Jsp.imag /\
(np.sqrt(dI2 + 4*abs(beam.Jsp)**2) + np.sqrt(dI2 + 4*beam.Jsp.real**2))
def get_circular_polarization_rate(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
I = (beam.Jss + beam.Jpp)
I[I <= 0] = 1.
cpr = 2. * beam.Jsp.imag / I
cpr[I <= 0] = 0.
return cpr
def get_polarization_psi(beam):
"""Angle between the semimajor axis of the polarization ellipse relative to
the s polarization. Used for retrieving data for x-, y- or c-axis of a
plot."""
# return 0.5 * np.arctan2(2.*beam.Jsp.real, beam.Jss-beam.Jpp) * 180 / np.pi
return 0.5 * np.arctan2(2.*beam.Jsp.real, beam.Jss-beam.Jpp)
def get_phase_shift(beam): # in units of pi!
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return np.angle(beam.Jsp) / np.pi
def get_incidence_angle(beam):
"""Used for retrieving data for x-, y- or c-axis of a plot."""
return beam.theta if hasattr(beam, 'theta') else np.zeros_like(beam.x)
get_theta = get_incidence_angle
def get_output(plot, beamsReturnedBy_run_process):
"""Used by :mod:`multipro` for creating images of *plot* - instance of
:class:`XYCPlot`. *beamsReturnedBy_run_process* is a dictionary of
:class:`Beam` instances returned by user-defined :func:`run_process`.
:func:`get_output` creates an indexing array corresponding to the requested
properties of rays in *plot*. It also calculates the number of rays with
various properties defined in `raycing` backend.
"""
beam = beamsReturnedBy_run_process[plot.beam]
if plot.beamState is None:
beamState = beam.state
else:
beamState = beamsReturnedBy_run_process[plot.beamState].state
nrays = len(beam.x)
locAlive = (beamState > 0).sum()
part = np.zeros(nrays, dtype=np.bool)
locGood = 0
locOut = 0
locOver = 0
locDead = 0
for rayFlag in plot.rayFlag:
locPart = beamState == rayFlag
if rayFlag == 1:
locGood = locPart.sum()
if rayFlag == 2:
locOut = locPart.sum()
if rayFlag == 3:
locOver = locPart.sum()
if rayFlag < 0:
locDead += locPart.sum()
part = part | locPart
if hasattr(beam, 'accepted'):
locAccepted = beam.accepted
locAcceptedE = beam.acceptedE
locSeeded = beam.seeded
locSeededI = beam.seededI
else:
locAccepted = 0
locAcceptedE = 0
locSeeded = 0
locSeededI = 0
if hasattr(beam, 'displayAsAbsorbedPower'):
plot.displayAsAbsorbedPower = True
if isinstance(plot.xaxis.data, types.FunctionType):
x = plot.xaxis.data(beam) * plot.xaxis.factor
elif isinstance(plot.xaxis.data, np.ndarray):
x = plot.xaxis.data * plot.xaxis.factor
else:
raise ValueError('cannot find data for x!')
if isinstance(plot.yaxis.data, types.FunctionType):
y = plot.yaxis.data(beam) * plot.yaxis.factor
elif isinstance(plot.yaxis.data, np.ndarray):
y = plot.yaxis.data * plot.yaxis.factor
else:
raise ValueError('cannot find data for y!')
if plot.caxis.useCategory:
cData = np.zeros_like(beamState)
cData[beamState == 1] = hueGood
cData[beamState == 2] = hueOut
cData[beamState == 3] = hueOver
cData[beamState < 0] = hueDead
flux = np.ones_like(x)
else:
if plot.beamC is None:
beamC = beam
else:
beamC = beamsReturnedBy_run_process[plot.beamC]
if isinstance(plot.caxis.data, types.FunctionType):
cData = plot.caxis.data(beamC) * plot.caxis.factor
elif isinstance(plot.caxis.data, np.ndarray):
cData = plot.caxis.data * plot.caxis.factor
else:
raise ValueError('cannot find data for cData!')
if plot.fluxKind.startswith('power'):
flux = ((beam.Jss + beam.Jpp) *
beam.E * beam.accepted / beam.seeded * elec_e)
elif plot.fluxKind.startswith('s'):
flux = beam.Jss
elif plot.fluxKind.startswith('p'):
flux = beam.Jpp
elif plot.fluxKind.startswith('+-45'):
flux = 2*beam.Jsp.real
elif plot.fluxKind.startswith('left-right'):
flux = 2*beam.Jsp.imag
else:
flux = beam.Jss + beam.Jpp
return x[part], y[part], flux[part], cData[part], nrays, locAlive,\
locGood, locOut, locOver, locDead, locAccepted, locAcceptedE,\
locSeeded, locSeededI
class BeamLine(object):
"""Container class for beamline components. It also defines the beam line
direction and height"""
def __init__(self, azimuth=0., height=0.):
"""
*azimuth* is counted in cw direction from the global Y axis. At
*azimuth* = 0 the local Y coincides with the global Y."""
self.azimuth = azimuth
self.sinAzimuth = np.sin(azimuth) # a0
self.cosAzimuth = np.cos(azimuth) # b0
self.height = height
self.sources = []
self.oes = []
self.slits = []
self.screens = []
self.alarms = []
| mit |
b09dan/universities_sentiment | text_class/pynlc/text_classifier.py | 1 | 4379 | import copy
import itertools
import numpy
from sklearn.metrics import mean_squared_error
from .classifier import Classifier
from .text_processor import TextProcessor
class TextClassifier:
"""
Text classifier class
"""
def __init__(self, text_processor, backend=None, classes=None, backend_extra_args=None):
"""
Initialize classifier
:param text_processor: text processor
:type text_processor: TextProcessor
:param backend: pretrained network configuration or None
:type backend: dict|NoneType
:param classes: class names list (for pretrained classifier) or None
:type classes: list[str]|NoneType
:param backend_extra_args: Backend optional arguments. \
See filter_sizes, nb_filter, hidden_size on Classifier
:type backend_extra_args: NoneType|dict
"""
self.backend_extra_args = backend_extra_args
if backend is not None:
backend_config = copy.deepcopy(backend)
if backend_extra_args is not None:
for key, value in backend_extra_args.items():
backend_config[key] = value
self.backend = Classifier(**backend_config)
else:
self.backend = None
self.text_processor = text_processor
self.classes = classes
@property
def config(self):
"""
Get configuration dictionary
:return: config
:rtype: dict
"""
return {
"backend": self.backend.config,
"classes": self.classes
}
def train(self, texts, classes, epochs, verbose=False, validation_split=0.3, callbacks=[]):
"""
Train on given texts
:param texts: texts
:type texts: list[str]
:param classes: class names (one list for one texts item)
:type classes: list[list[str]]
:param epochs: epochs count
:type epochs: int
:param verbose: verbose train process?
:type verbose: bool
:param validation_split: validation split (0 <= x <= 1)
:type validation_split: float
:param callbacks: Train callbacks (e.g. early stopping)
"""
self.classes = list(set(itertools.chain(*classes)))
self.classes.sort()
matrixes = [self.text_processor.matrix(text)
for text in texts]
vector_count = 2 * max([len(item) for item in matrixes])
vector_size = matrixes[0].shape[1]
labels = numpy.array([[int(class_name in text_classes) for class_name in self.classes]
for text_classes in classes])
backend_config = {}
if self.backend_extra_args is not None:
for key, value in self.backend_extra_args.items():
backend_config[key] = value
self.backend = Classifier(vector_count, vector_size, len(self.classes), **backend_config)
self.backend.train(matrixes, labels, epochs, verbose,
validation_split=validation_split, callbacks=callbacks)
def predict(self, texts):
"""
Predict classes
:param texts: texts
:type texts: list[str]
:return: classification results (one per one texts item)
:rtype: list[dict[str, float]]
"""
matrixes = [self.text_processor.matrix(text)
for text in texts]
labels = self.backend.predict(matrixes)
result = []
for row in labels:
data = {}
for i, class_name in enumerate(self.classes):
data[class_name] = float(row[i])
result.append(data)
return result
def error(self, texts, right_classes):
"""
Calculate classification error
:param texts: texts
:type texts: list[str]
:param right_classes: classes
:type right_classes: list[list[str]]
:return: error
:rtype: float
"""
prediction = self.predict(texts)
right_values = [[int(class_name in item_classes) for class_name in self.classes]
for item_classes in right_classes]
values = [[item_classes[class_name] for class_name in self.classes]
for item_classes in prediction]
return mean_squared_error(numpy.array(right_values), numpy.array(values))
| mit |
mngad/findStiffness | plotStiffness.py | 1 | 1198 |
import os.path
import findStiffness as fs
import matplotlib.pyplot as plt
# ---------------
numspecs = 1
direc = 'M:\Compression_tests_12_that_were_used/PostVP'
# ---------------
def plot(specs, file_type):
data = fs.findStiffness(10, 1, True, direc)
per_spec = int((len(data) - 1) / specs)
for i in range(0, specs):
fig = plt.figure()
title = str(data[i * per_spec][2][:9]).replace('_', ' ')
fig.suptitle(title, fontsize=22)
ax = fig.add_axes([0.1, 0.1, 0.6, 0.75])
for a in range(per_spec):
ax.plot(
data[(i * per_spec) + a][0],
data[(i * per_spec) + a][1],
label=str(data[i * per_spec + a][2][10:-16]),
linewidth=2)
# print(data[i * per_spec + a][2])
# print('i = ' + str(i) + ', a = ' + str(a))
ax.legend(
bbox_to_anchor=(1.05, 1.),
loc=2,
borderaxespad=0.,
fontsize=9)
plt.xlabel('Displacement (mm)')
plt.ylabel('Load (N)')
plt.grid(True)
# plt.show()
fname = direc + '/' + str(title) + '.' + str(file_type)
if os.path.isfile(fname):
print("File exists already")
else:
plt.savefig(fname, bbox_inches='tight')
# print('\n')
plt.close()
if __name__ == "__main__":
plot(numspecs, 'pdf')
plot(numspecs, 'png')
| gpl-3.0 |
ChenglongChen/Kaggle_HomeDepot | Code/Igor&Kostia/word2vec.py | 1 | 8654 | # -*- coding: utf-8 -*-
"""
Code for calculating word2vec features.
Competition: HomeDepot Search Relevance
Author: Kostia Omelianchuk
Team: Turing test
"""
from config_IgorKostia import *
import gensim
import logging
import numpy as np
from sklearn.ensemble import RandomForestRegressor, BaggingRegressor, GradientBoostingRegressor
from sklearn.ensemble import AdaBoostRegressor
from nltk.stem.snowball import SnowballStemmer, PorterStemmer
import nltk
from time import time
import re
import os
import math as m
import pandas as pd
from gensim import models
#loading data
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
df_all=pd.read_csv(PROCESSINGTEXT_DIR+"/df_train_and_test_processed.csv", encoding="ISO-8859-1")
df_all1=pd.read_csv(PROCESSINGTEXT_DIR+"/df_product_descriptions_processed.csv", encoding="ISO-8859-1")
df_all2 = pd.merge(df_all, df_all1, how="left", on="product_uid")
df_all = df_all2
df_all1=pd.read_csv(PROCESSINGTEXT_DIR+"/df_attribute_bullets_processed.csv", encoding="ISO-8859-1")
df_all2 = pd.merge(df_all, df_all1, how="left", on="product_uid")
df_all = df_all2
df_attr = pd.read_csv(PROCESSINGTEXT_DIR+'/df_attributes_kostia.csv', encoding="ISO-8859-1")
df_all = pd.merge(df_all, df_attr, how='left', on='product_uid')
#repalce nan
p = df_all.keys()
for i in range(len(p)):
print p[i]
def replace_nan(s):
if pd.isnull(s)==True:
s=""
return s
df_all['search_term_stemmed'] = df_all['search_term_stemmed'].map(lambda x:replace_nan(x))
df_all['product_title_stemmed'] = df_all['product_title_stemmed'].map(lambda x:replace_nan(x))
df_all['product_description_stemmed'] = df_all['product_description_stemmed'].map(lambda x:replace_nan(x))
df_all['brand_parsed'] = df_all['brand_parsed'].map(lambda x:replace_nan(x))
df_all['material_parsed'] = df_all['material_parsed'].map(lambda x:replace_nan(x))
df_all['attribute_bullets_stemmed'] = df_all['attribute_bullets_stemmed'].map(lambda x:replace_nan(x))
df_all['attribute_stemmed'] = df_all['attribute_stemmed'].map(lambda x:replace_nan(x))
df_all['search_term'] = df_all['search_term'].map(lambda x:replace_nan(x))
df_all['product_title'] = df_all['product_title'].map(lambda x:replace_nan(x))
df_all['product_description'] = df_all['product_description'].map(lambda x:replace_nan(x))
df_all['brand'] = df_all['brand'].map(lambda x:replace_nan(x))
df_all['material'] = df_all['material'].map(lambda x:replace_nan(x))
df_all['attribute_bullets'] = df_all['attribute_bullets'].map(lambda x:replace_nan(x))
df_all['value'] = df_all['value'].map(lambda x:replace_nan(x))
#build a set of sentenxes in 4 way
st = df_all["search_term_stemmed"]
pt = df_all["product_title_stemmed"]
pd = df_all["product_description_stemmed"]
br = df_all["brand_parsed"]
mr = df_all["material_parsed"]
ab = df_all["attribute_bullets_stemmed"]
at = df_all["attribute_stemmed"]
#st + pt +pd vocab
t = list()
for i in range(len(st)):
p = st[i].split()
t.append(p)
for i in range(len(pt)):
p = pt[i].split()
t.append(p)
for i in range(len(pd)):
p = pd[i].split()
t.append(p)
for i in range(len(ab)):
p = ab[i].split()
t.append(p)
for i in range(len(at)):
p = at[i].split()
t.append(p)
print "first vocab"
#st conc pt conc pd vocab
t1 = list()
for i in range(len(st)):
p = st[i].split()+pt[i].split()+pd[i].split()+br[i].split()+mr[i].split()+ab[i].split()+at[i].split()
t1.append(p)
print "second vocab"
#st + pt +pd +br + mr vocab w/o pars
st1 = df_all["search_term"]
pt1 = df_all["product_title"]
pd1 = df_all["product_description"]
br1 = df_all["brand"]
mr1 = df_all["material"]
ab1 = df_all["attribute_bullets"]
at1 = df_all["value"]
t2 = list()
for i in range(len(st)):
p = st1[i].split()
t2.append(p)
for i in range(len(pt)):
p = pt1[i].split()
t2.append(p)
for i in range(len(pd)):
p = pd1[i].split()
t2.append(p)
for i in range(len(ab1)):
p = ab1[i].split()
t2.append(p)
for i in range(len(at1)):
p = at1[i].split()
t2.append(p)
print "third vocab"
#st conc pt conc pd conc br conc mr vocab w/o pars
t3 = list()
for i in range(len(st)):
p = st1[i].split()+pt1[i].split()+pd1[i].split()+br1[i].split()+mr1[i].split()+ab1[i].split()+at1[i].split()
t3.append(p)
print "fourth vocab"
#trin models
model0 = gensim.models.Word2Vec(t, sg=1, window=10, sample=1e-5, negative=5, size=300)
model1 = gensim.models.Word2Vec(t1, sg=1, window=10, sample=1e-5, negative=5, size=300)
model2 = gensim.models.Word2Vec(t2, sg=1, window=10, sample=1e-5, negative=5, size=300)
model3 = gensim.models.Word2Vec(t3, sg=1, window=10, sample=1e-5, negative=5, size=300)
#model4 = gensim.models.Word2Vec(t, sg=0, hs=1, window=10, size=300)
#model5 = gensim.models.Word2Vec(t1, sg=0, hs=1,window=10, size=300)
#model6 = gensim.models.Word2Vec(t2, sg=0, hs=1, window=10, size=300)
#model7 = gensim.models.Word2Vec(t3, sg=0, hs=1,window=10, size=300)
print "model prepared"
#for each model calculate features^ n_similarity between st and something else
model_list=[model0,model1,model2,model3] #,model4 ,model5,model6,model7]
n_sim=list()
for model in model_list:
n_sim_pt=list()
for i in range(len(st)):
w1=st[i].split()
w2=pt[i].split()
d1=[]
d2=[]
for j in range(len(w1)):
if w1[j] in model.vocab:
d1.append(w1[j])
for j in range(len(w2)):
if w2[j] in model.vocab:
d2.append(w2[j])
if d1==[] or d2==[]:
n_sim_pt.append(0)
else:
n_sim_pt.append(model.n_similarity(d1,d2))
n_sim.append(n_sim_pt)
n_sim_pd=list()
for i in range(len(st)):
w1=st[i].split()
w2=pd[i].split()
d1=[]
d2=[]
for j in range(len(w1)):
if w1[j] in model.vocab:
d1.append(w1[j])
for j in range(len(w2)):
if w2[j] in model.vocab:
d2.append(w2[j])
if d1==[] or d2==[]:
n_sim_pd.append(0)
else:
n_sim_pd.append(model.n_similarity(d1,d2))
n_sim.append(n_sim_pd)
n_sim_at=list()
for i in range(len(st)):
w1=st[i].split()
w2=at[i].split()
d1=[]
d2=[]
for j in range(len(w1)):
if w1[j] in model.vocab:
d1.append(w1[j])
for j in range(len(w2)):
if w2[j] in model.vocab:
d2.append(w2[j])
if d1==[] or d2==[]:
n_sim_at.append(0)
else:
n_sim_at.append(model.n_similarity(d1,d2))
n_sim.append(n_sim_at)
n_sim_all=list()
for i in range(len(st)):
w1=st[i].split()
w2=pt[i].split()+pd[i].split()+br[i].split()+mr[i].split()+ab[i].split()+at[i].split()
d1=[]
d2=[]
for j in range(len(w1)):
if w1[j] in model.vocab:
d1.append(w1[j])
for j in range(len(w2)):
if w2[j] in model.vocab:
d2.append(w2[j])
if d1==[] or d2==[]:
n_sim_all.append(0)
else:
n_sim_all.append(model.n_similarity(d1,d2))
n_sim.append(n_sim_all)
n_sim_all1=list()
for i in range(len(st)):
w1=st1[i].split()
w2=pt1[i].split()+pd1[i].split()+br1[i].split()+mr1[i].split()+ab1[i].split()+at1[i].split()
d1=[]
d2=[]
for j in range(len(w1)):
if w1[j] in model.vocab:
d1.append(w1[j])
for j in range(len(w2)):
if w2[j] in model.vocab:
d2.append(w2[j])
if d1==[] or d2==[]:
n_sim_all1.append(0)
else:
n_sim_all1.append(model.n_similarity(d1,d2))
n_sim.append(n_sim_all1)
n_sim_ptpd=list()
for i in range(len(st)):
w1=pt[i].split()
w2=pd[i].split()
d1=[]
d2=[]
for j in range(len(w1)):
if w1[j] in model.vocab:
d1.append(w1[j])
for j in range(len(w2)):
if w2[j] in model.vocab:
d2.append(w2[j])
if d1==[] or d2==[]:
n_sim_ptpd.append(0)
else:
n_sim_ptpd.append(model.n_similarity(d1,d2))
n_sim.append(n_sim_ptpd)
print "model features done"
st_names=["id"]
for j in range(len(n_sim)):
df_all["word2vec_"+str(j)]=n_sim[j]
st_names.append("word2vec_"+str(j))
#save features
b=df_all[st_names]
b.to_csv(FEATURES_DIR+"/df_word2vec_new.csv", index=False)
| mit |
ArcticWarriors/snobot-2016 | LoggerAnalyzer2016/LoggerAnalyzer/LoggerTool.py | 1 | 6354 | '''
Created on Jan 29, 2016
@author: 1335draco
'''
import numpy
import time
import os
import matplotlib.pyplot as plt
import Tkinter, tkFileDialog
from read_csv_file import load_csv_file
def parse_datetime(time_string):
# strptime has no option for milliseconds, so ignore the last 3 characters
time_struct = time.strptime(time_string[:-3], "%Y%m%d_%H%M%S")
hour = time_struct.tm_hour
minute = time_struct.tm_min
sec = time_struct.tm_sec
msec = int(time_string[-3:]) # ms are the last three columns
return (msec + (sec * 1000) + (minute * 60000) + (hour * 3600000)) / 1000.0
def plot_driver_joysticks(converted_dict):
print converted_dict.keys()
tank_right = converted_dict["RightMotorSpeed"]
tank_left = converted_dict["LeftMotorSpeed"]
x_axis = numpy.arange(len(tank_right))
plt.subplot(2, 1, 1)
plt.title('Driver Joysticks')
plt.ylabel('Tank Right')
plt.scatter(x_axis, tank_right, c=tank_right, marker="o", cmap=plt.get_cmap("gist_rainbow_r"), edgecolors='None')
plt.colorbar()
plt.subplot(2, 1, 2)
plt.ylabel('Tank Left')
plt.scatter(x_axis, tank_left, c=tank_left, marker="o", cmap=plt.get_cmap("gist_rainbow_r"), edgecolors = "None")
plt.colorbar()
def plot_encoders(converted_dict):
left_encoder = converted_dict["LeftEncoderDistance"]
right_encoder = converted_dict["RightEncoderDistance"]
x_axis = numpy.arange(len(left_encoder))
plt.subplot(2,1,1)
plt.title("Encoder Distances")
plt.ylabel("Left Encoder")
plt.scatter(x_axis, left_encoder, c = "g", marker = "o", edgecolors = 'None')
plt.subplot(2,1,2)
plt.ylabel("Right Encoder")
plt.scatter(x_axis, right_encoder, c = "g", marker = "o", edgecolors = 'None')
def plot_scale_tilt(converted_dict):
scale_tilt_motor_speed = converted_dict["ScaleTiltMotorSpeed"]
scale_tilt_angle = converted_dict["ScaleTiltAngle"]
x_axis = numpy.arange(len(scale_tilt_motor_speed))
# x_axis2 = numpy.arange(90)
plt.subplot(4,1,1)
plt.title("Scaling")
plt.ylabel("Scaling Motor Speed")
plt.scatter(x_axis, scale_tilt_motor_speed, c=scale_tilt_motor_speed, marker="o", cmap=plt.get_cmap("gist_rainbow_r"), edgecolors = "None")
plt.subplot(4,1,2)
plt.ylabel("Angle")
plt.scatter(x_axis, scale_tilt_angle, c=scale_tilt_angle, marker="o", cmap=plt.get_cmap("gist_rainbow_r"), edgecolors = "None")
plot_scaling_mechanism(converted_dict)
def plot_extension(converted_dict):
extension_motor_speed = converted_dict["ScaleExtensionMotorSpeed"]
extension_percentage = converted_dict["PercentageExtended"]
extension_pot_voltage = converted_dict["ExtensionPotVoltage"]
x_axis = numpy.arange(len(extension_pot_voltage))
plt.subplot(3,1,1)
plt.title("Extension")
plt.ylabel("Percentage")
plt.scatter(x_axis, extension_percentage, c=extension_percentage, marker="o", cmap=plt.get_cmap("gist_rainbow_r"), edgecolors = "None")
plt.subplot(3,1,2)
plt.ylabel("EVoltage")
plt.scatter(x_axis, extension_pot_voltage, c=extension_pot_voltage, marker="o", cmap=plt.get_cmap("gist_rainbow_r"), edgecolors = "None")
plt.subplot(3,1,3)
plt.ylabel("Motor Speed")
plt.scatter(x_axis,extension_motor_speed, c = extension_motor_speed , marker="o", cmap = plt.get_cmap("gist_rainbow_r"), edgecolors = "None")
def plot_scaling_mechanism(converted_dict):
scaling_mechanism_up = converted_dict["IsScalingMechanismUp"]
scaling_mechanism_down = converted_dict["IsScalingMechanismDown"]
# limit_switch_activated = numpy.logical_or(scaling_mechanism_up, scaling_mechanism_down)
plt.subplot(4, 1, 3)
plt.ylabel('Scaling Up')
plt.plot(scaling_mechanism_up, c='b', marker=".")
plt.subplot(4, 1, 4)
plt.ylabel('Scaling Down')
plt.plot(scaling_mechanism_down, c='b', marker=".")
plt.plot
# plt.subplot(4, 2, 7)
# plt.ylabel('A Switch Hit')
# plt.plot(limit_switch_activated, c='b', marker="o")
def plot_harvester(converted_dict):
pivot_speed = converted_dict["PivotSpeed"]
roller_speed = converted_dict["RollerSpeed"]
harvester_pot_voltage = converted_dict["HarvesterPotVoltage"]
harvester_percentage = converted_dict["HarvesterPercentage"]
x_axis = numpy.arange(len(pivot_speed))
plt.subplot(4, 1, 1)
plt.title("Harvester")
plt.ylabel("Pivot Speed")
plt.scatter(x_axis, pivot_speed, c=pivot_speed, marker = "o", cmap = plt.get_cmap("gist_rainbow_r"), edgecolors = "None")
plt.subplot(4, 1, 2)
plt.ylabel("Roller Speed")
plt.scatter(x_axis, roller_speed, c=roller_speed, marker = "o", cmap = plt.get_cmap("gist_rainbow_r"), edgecolors = "None")
plt.subplot(4,1,3)
plt.ylabel("HVoltage")
plt.scatter(x_axis, harvester_pot_voltage, c= harvester_pot_voltage, marker = "o", cmap = plt.get_cmap("gist_rainbow_r"), edgecolors = "None")
plt.subplot(4,1,4)
plt.ylabel("Harvester Percentage")
plt.scatter(x_axis, harvester_percentage, c= harvester_percentage, marker = "o", cmap = plt.get_cmap("gist_rainbow_r"), edgecolors = "None")
def plot_dt(converted_dict):
the_time = converted_dict['Date and Time']
dt = the_time[1:] - the_time[:-1]
plt.title('delta time')
plt.plot(dt, c='b', marker=".")
def main():
root = Tkinter.Tk()
root.withdraw()
file_name = tkFileDialog.askopenfilename(filetypes = (("All files", "*"), ("Template files", "*.type")))
# file_name = "Team 174 Robot Logs - RobotLog_20150210_054531870_log.csv"
converted_dict = load_csv_file(file_name, {'Date and Time': parse_datetime})
image_dir = "images/"
if not os.path.exists(image_dir):
os.mkdir(image_dir)
plt.figure(1)
plot_driver_joysticks(converted_dict)
plt.savefig(image_dir + "DriverSticks.png")
plt.figure(2)
plot_encoders(converted_dict)
plt.savefig(image_dir + "Encoders.png")
plt.figure(3)
plot_scale_tilt(converted_dict)
plt.savefig(image_dir + "Scaling.png")
plt.figure(4)
plot_extension(converted_dict)
plt.savefig(image_dir + "Extension.png")
plt.figure(5)
plot_harvester(converted_dict)
plt.savefig(image_dir + "Harvester.png")
plt.show()
if __name__ == "__main__":
main()
| mit |
nmayorov/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 24 | 2480 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <[email protected]>
# Licence: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
sumspr/scikit-learn | sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
vvvityaaa/PyImgProcess | template_matching/champfer_matching.py | 1 | 2355 | from PIL import Image
from template_matching.distance_transformation import distance_transformation
import numpy as np
import matplotlib.pyplot as plt
import time
import math
from open_image import open_image
def champfer_matching(path, template_path):
'''
Champfer Matching Algorithm - comparison of binary images based on distance transformation and
Manhattan-Distance
:param path: path to the image
:param template_path: path to the template
:return: resulting image, minimum values on this image show points, where template can match
'''
# initializing image and other helping variables
image_array = open_image(path)
template_array = open_image(template_path)
image_height, image_width = image_array.shape
template_height, template_width = template_array.shape
half_template_height = math.floor(template_height / 2)
half_template_width = math.floor(template_width / 2)
distance_transformation_matrix = distance_transformation(path)
resulting_img = np.zeros((image_height-template_height+1) * (image_width-template_width+1)).reshape(image_height-template_height+1, image_width-template_width+1)
quantity_of_foreground_pix = 0 # quantity of foreground pixels in template
for i in range(template_height-1):
for j in range(template_width-1):
if template_array[i, j] == 0:
quantity_of_foreground_pix += 1
# iterating over the image
for i in range(0, image_height - template_height):
for j in range(0, image_width - template_width):
value = 0
for k in range(template_height):
for m in range(template_width):
# summing up values in distance transformation matrix
if template_array[k, m] == 0:
value += distance_transformation_matrix[i+k, j+m]
if quantity_of_foreground_pix != 0:
resulting_img[i, j] = value/quantity_of_foreground_pix
return resulting_img
if __name__ == "__main__":
algorithm_laufzeit = time.time()
champ_img = champfer_matching('../img/binary_lena.png', '../img/template_binary_lena.png')
algorithm_laufzeit = time.time() - algorithm_laufzeit
print(algorithm_laufzeit)
plt.imshow(champ_img, cmap='gray', interpolation='nearest')
plt.show()
| mit |
JeanKossaifi/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 138 | 14048 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
bwohlberg/sporco | setup.py | 1 | 3780 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""SPORCO package configuration."""
from __future__ import print_function
from builtins import next
from builtins import filter
import os
from glob import glob
from setuptools import setup
import os.path
from ast import parse
name = 'sporco'
# Get version number from sporco/__init__.py
# See http://stackoverflow.com/questions/2058802
with open(os.path.join(name, '__init__.py')) as f:
version = parse(next(filter(
lambda line: line.startswith('__version__'),
f))).body[0].value.s
packages = ['sporco', 'sporco.prox', 'sporco.admm', 'sporco.pgm',
'sporco.dictlrn']
docdirbase = 'share/doc/%s-%s' % (name, version)
data = [(os.path.join(docdirbase, 'examples/scripts'),
['examples/scripts/index.rst'])]
for d in glob('examples/scripts/*'):
if os.path.isdir(d):
data.append((os.path.join(docdirbase, d),
[os.path.join(d, 'index.rst')] +
glob(os.path.join(d, '*.py'))))
longdesc = \
"""
SPORCO is a Python package for solving optimisation problems with
sparsity-inducing regularisation. These consist primarily of sparse
coding and dictionary learning problems, including convolutional
sparse coding and dictionary learning, but there is also support for
other problems such as Total Variation regularisation and Robust
PCA. The optimisation algorithms in the current version are based
on the Alternating Direction Method of Multipliers (ADMM) or on
the Fast Iterative Shrinkage-Thresholding Algorithm (PGM).
"""
install_requires = ['future', 'numpy', 'scipy', 'imageio', 'matplotlib']
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
print("Building on ReadTheDocs")
install_requires.append('ipython')
else:
install_requires.append('pyfftw')
tests_require = ['pytest', 'pytest-runner']
setup(
name = name,
version = version,
description = 'Sparse Optimisation Research Code: A Python package ' \
'for sparse coding and dictionary learning',
long_description = longdesc,
keywords = ['Sparse Representations', 'Sparse Coding',
'Dictionary Learning',
'Convolutional Sparse Representations',
'Convolutional Sparse Coding', 'Optimization',
'ADMM', 'PGM'],
platforms = 'Any',
license = 'BSD',
url = 'https://github.com/bwohlberg/sporco',
author = 'Brendt Wohlberg',
author_email = '[email protected]',
packages = packages,
package_data = {'sporco': ['data/*.png', 'data/*.jpg', 'data/*.npz']},
data_files = data,
include_package_data = True,
setup_requires = ['future'],
tests_require = tests_require,
install_requires = install_requires,
extras_require = {
'tests': tests_require,
'docs': ['sphinx >=2.2', 'numpydoc', 'sphinxcontrib-bibtex',
'sphinx_tabs', 'sphinx_fontawesome', 'jonga',
'ipython >=6.3.1', 'jupyter', 'py2jn', 'pypandoc'],
'gpu': ['cupy', 'gputil', 'wurlitzer'],
'optional': ['numexpr', 'mpldatacursor']},
classifiers = [
'License :: OSI Approved :: BSD License',
'Development Status :: 4 - Beta',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules'
],
zip_safe = False
)
| bsd-3-clause |
Bogdacutu/STLinux-Kernel | scripts/tracing/dma-api/trace.py | 96 | 12420 | """Main program and stuff"""
#from pprint import pprint
from sys import stdin
import os.path
import re
from argparse import ArgumentParser
import cPickle as pickle
from collections import namedtuple
from plotting import plotseries, disp_pic
import smmu
class TracelineParser(object):
"""Parse the needed information out of an ftrace line"""
# <...>-6 [000] d..2 5.287079: dmadebug_iommu_map_page: device=sdhci-tegra.3, addr=0x01048000, size=4096 page=c13e7214 archdata=ed504640
def __init__(self):
self.pattern = re.compile("device=(?P<dev>.*), addr=(?P<addr>.*), size=(?P<size>.*) page=(?P<page>.*) archdata=(?P<archdata>.*)")
def parse(self, args):
args = self.pattern.match(args)
return (args.group("dev"), int(args.group("addr"), 16),
int(args.group("size")), int(args.group("page"), 16),
int(args.group("archdata"), 16))
def biggest_indices(items, n):
"""Return list of indices of n biggest elements in items"""
with_indices = [(x, i) for i, x in enumerate(items)]
ordered = sorted(with_indices)
return [i for x, i in ordered[-n:]]
def by_indices(xs, ids):
"""Get elements from the list xs by their indices"""
return [xs[i] for i in ids]
"""Event represents one input line"""
Event = namedtuple("Event", ["time", "dev", "data", "delta"])
class Trace(object):
def __init__(self, args):
smmu.VERBOSITY = args.verbosity
self._args = args
self.devlist = []
self.events = []
self.metrics = {
"max_peak": self._usage_peak,
"activity_rate": self._usage_activity,
"average_mem": self._usage_avg
}
self.traceliner = TracelineParser()
@staticmethod
def get_metrics():
"""What filter metrics to get max users"""
return ["max_peak", "activity_rate", "average_mem"]
def show(self):
"""Shuffle events around, build plots, and show them"""
if self._args.max_plots:
evs = self.merge_events()
else:
evs = self.events
series, devlist = self.unload(evs)
if not self._args.no_plots:
self.plot(series, devlist)
def _get_usage(self, evs):
"""Return a metric of how active the events in evs are"""
return self.metrics[self._args.max_metric](evs)
def _usage_peak(self, evs):
"""Return the biggest peak"""
return max(e.data for e in evs)
def _usage_activity(self, evs):
"""Return the activity count: simply the length of the event list"""
return len(evs)
def _usage_avg(self, evs):
"""Return the average over all points"""
# FIXME: the data points are not uniform in time, so this might be
# somewhat off.
return float(sum(e.data for e in evs)) / len(e)
def merge_events(self):
"""Find out biggest users, keep them and flatten others to a single user"""
sizes = []
dev_evs = []
for i, dev in enumerate(self.devlist):
dev_evs.append([e for e in self.events if e.dev == dev])
sizes.append(self._get_usage(dev_evs[i]))
# indices of the devices
biggestix = biggest_indices(sizes, self._args.max_plots)
print biggestix
is_big = {}
for i, dev in enumerate(self.devlist):
is_big[dev] = i in biggestix
evs = []
for e in self.events:
if not is_big[e.dev]:
e = Event(e.time, "others", e.data, e.delta)
evs.append(e)
self.devlist.append("others")
return evs
def unload(self, events):
"""Prepare the event list for plotting
series ends up as [([time0], [data0]), ([time1], [data1]), ...]
"""
# ([x], [y]) for matplotlib
series = [([], []) for x in self.devlist]
devidx = dict([(d, i) for i, d in enumerate(self.devlist)])
for event in events:
devid = devidx[event.dev]
series[devid][0].append(event.time)
series[devid][1].append(event.data) # self.dev_data(event.dev))
series_out = []
devlist_out = []
for ser, dev in zip(series, self.devlist):
if len(ser[0]) > 0:
series_out.append(ser)
devlist_out.append(dev)
return series_out, devlist_out
def plot(self, series, devlist):
"""Display the plots"""
#series, devlist = flatten_axes(self.series, self.devlist,
# self._args.max_plots)
devinfo = (series, map(str, devlist))
allocfreeinfo = (self.allocsfrees, ["allocd", "freed", "current"])
plotseries(devinfo, allocfreeinfo)
#plotseries(devinfo)
def dev_data(self, dev):
"""what data to plot against time"""
return dev._cur_alloc
def _cache_hash(self, filename):
"""The trace files are probably not of the same size"""
return str(os.path.getsize(filename))
def load_cache(self):
"""Get the trace data from a database file, if one exists"""
has = self._cache_hash(self._args.filename)
try:
cache = open("trace." + has)
except IOError:
pass
else:
self._load_cache(pickle.load(cache))
return True
return False
def save_cache(self):
"""Store the raw trace data to a database"""
data = self._save_cache()
fh = open("trace." + self._cache_hash(self._args.filename), "w")
pickle.dump(data, fh)
def _save_cache(self):
"""Return the internal data that is needed to be pickled"""
return self.events, self.devlist, self.allocsfrees
def _load_cache(self, data):
"""Get the data from an unpickled object"""
self.events, self.devlist, self.allocsfrees = data
def load_events(self):
"""Get the internal data from a trace file or cache"""
if self._args.filename:
if self._args.cache and self.load_cache():
return
fh = open(self._args.filename)
else:
fh = stdin
self.parse(fh)
if self._args.cache and self._args.filename:
self.save_cache()
def parse(self, fh):
"""Parse the trace file in fh, store data to self"""
mems = {}
dev_by_name = {}
devlist = []
buf_owners = {}
events = []
allocsfrees = [([], []), ([], []), ([], [])] # allocs, frees, current
allocs = 0
frees = 0
curbufs = 0
mem_bytes = 1024 * 1024 * 1024
npages = mem_bytes / 4096
ncols = 512
le_pic = [0] * npages
lastupd = 0
for lineidx, line in enumerate(fh):
# no comments
if line.startswith("#"):
continue
taskpid, cpu, flags, timestamp, func, args = line.strip().split(None, 5)
func = func[:-len(":")]
# unneeded events may be there too
if not func.startswith("dmadebug"):
continue
if self._args.verbosity >= 3:
print line.rstrip()
timestamp = float(timestamp[:-1])
if timestamp < self._args.start:
continue
if timestamp >= self._args.end:
break
devname, addr, size, page, archdata = self.traceliner.parse(args)
if self._args.processes:
devname = taskpid.split("-")[0]
mapping = archdata
try:
memmap = mems[mapping]
except KeyError:
memmap = mem(mapping)
mems[mapping] = memmap
try:
dev = dev_by_name[devname]
except KeyError:
dev = smmu.Device(devname, memmap)
dev_by_name[devname] = dev
devlist.append(dev)
allocfuncs = ["dmadebug_map_page", "dmadebug_map_sg", "dmadebug_alloc_coherent"]
freefuncs = ["dmadebug_unmap_page", "dmadebug_unmap_sg", "dmadebug_free_coherent"]
ignfuncs = []
if timestamp-lastupd > 0.1:
# just some debug prints for now
lastupd = timestamp
print lineidx,timestamp
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
#disp_pic(le_pic2)
# animating the bitmap would be cool
#for row in le_pic:
# for i, a in enumerate(row):
# pass
#row[i] = 0.09 * a
if func in allocfuncs:
pages = dev_by_name[devname].alloc(addr, size)
for p in pages:
le_pic[p] = 1
buf_owners[addr] = dev_by_name[devname]
allocs += 1
curbufs += 1
allocsfrees[0][0].append(timestamp)
allocsfrees[0][1].append(allocs)
elif func in freefuncs:
if addr not in buf_owners:
if self._args.verbosity >= 1:
print "warning: %s unmapping unmapped %s" % (dev, addr)
buf_owners[addr] = dev
# fixme: move this to bitmap handling
# get to know the owners of bits
# allocs/frees calls should be traced separately from maps?
# map_pages is traced per page :(
if buf_owners[addr] != dev and self._args.verbosity >= 2:
print "note: %s unmapping [%d,%d) mapped by %s" % (
dev, addr, addr+size, buf_owners[addr])
pages = buf_owners[addr].free(addr, size)
for p in pages:
le_pic[p] = 0
frees -= 1
curbufs -= 1
allocsfrees[1][0].append(timestamp)
allocsfrees[1][1].append(frees)
elif func not in ignfuncs:
raise ValueError("unhandled %s" % func)
allocsfrees[2][0].append(timestamp)
allocsfrees[2][1].append(curbufs)
events.append(Event(timestamp, dev, self.dev_data(dev), size))
self.events = events
self.devlist = devlist
self.allocsfrees = allocsfrees
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
# FIXME: not quite ready yet
disp_pic(le_pic2)
return
def mem(asid):
"""Create a new memory object for the given asid space"""
SZ_2G = 2 * 1024 * 1024 * 1024
SZ_1M = 1 * 1024 * 1024
# arch/arm/mach-tegra/include/mach/iomap.h TEGRA_SMMU_(BASE|SIZE)
base = 0x80000000
size = SZ_2G - SZ_1M
return smmu.Memory(base, size, asid)
def get_args():
"""Eat command line arguments, return argparse namespace for settings"""
parser = ArgumentParser()
parser.add_argument("filename", nargs="?",
help="trace file dump, stdin if not given")
parser.add_argument("-s", "--start", type=float, default=0,
help="start timestamp")
parser.add_argument("-e", "--end", type=float, default=1e9,
help="end timestamp")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="amount of extra information: once for warns (dup addrs), "
"twice for notices (different client in map/unmap), "
"three for echoing all back")
parser.add_argument("-p", "--processes", action="store_true",
help="use processes as memory clients instead of devices")
parser.add_argument("-n", "--no-plots", action="store_true",
help="Don't draw the plots, only read the trace")
parser.add_argument("-c", "--cache", action="store_true",
help="Pickle the data and make a cache file for fast reloading")
parser.add_argument("-m", "--max-plots", type=int,
help="Maximum number of clients to show; show biggest and sum others")
parser.add_argument("-M", "--max-metric", choices=Trace.get_metrics(),
default=Trace.get_metrics()[0],
help="Metric to use when choosing clients in --max-plots")
return parser.parse_args()
def main():
args = get_args()
trace = Trace(args)
trace.load_events()
trace.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
etkirsch/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 93 | 2471 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_array_equal(xi.data, X_csr[idx].data)
assert_array_equal(xi.indices, X_csr[idx].indices)
assert_array_equal(xi.indptr, X_csr[idx].indptr)
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_array_equal(xi.data, X_csr[idx].data)
assert_array_equal(xi.indices, X_csr[idx].indices)
assert_array_equal(xi.indptr, X_csr[idx].indptr)
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
jnatkins/incubator-spot | spot-oa/oa/dns/dns_oa.py | 6 | 18012 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import json
import shutil
import sys
import datetime
import csv, math
from tld import get_tld
import api.resources.impala_engine as impala
import api.resources.hdfs_client as HDFSClient
from collections import OrderedDict
from utils import Util
from components.data.data import Data
from components.iana.iana_transform import IanaTransform
from components.nc.network_context import NetworkContext
from multiprocessing import Process
import pandas as pd
import time
class OA(object):
def __init__(self,date,limit=500,logger=None):
self._initialize_members(date,limit,logger)
def _initialize_members(self,date,limit,logger):
# get logger if exists. if not, create new instance.
self._logger = logging.getLogger('OA.DNS') if logger else Util.get_logger('OA.DNS',create_file=False)
# initialize required parameters.
self._scrtip_path = os.path.dirname(os.path.abspath(__file__))
self._date = date
self._table_name = "dns"
self._dns_results = []
self._limit = limit
self._data_path = None
self._ipynb_path = None
self._ingest_summary_path = None
self._dns_scores = []
self._dns_scores_headers = []
self._results_delimiter = '\t'
self._details_limit = 250
# get app configuration.
self._spot_conf = Util.get_spot_conf()
# get scores fields conf
conf_file = "{0}/dns_conf.json".format(self._scrtip_path)
self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict)
# initialize data engine
self._db = self._spot_conf.get('conf', 'DBNAME').replace("'", "").replace('"', '')
def start(self):
####################
start = time.time()
####################
self._clear_previous_executions()
self._create_folder_structure()
self._add_ipynb()
self._get_dns_results()
self._add_tld_column()
self._add_reputation()
self._add_hh_column()
self._add_iana()
self._add_network_context()
self._create_dns_scores()
self._get_oa_details()
self._ingest_summary()
##################
end = time.time()
print(end - start)
##################
def _clear_previous_executions(self):
self._logger.info("Cleaning data from previous executions for the day")
yr = self._date[:4]
mn = self._date[4:6]
dy = self._date[6:]
table_schema = []
HUSER = self._spot_conf.get('conf', 'HUSER').replace("'", "").replace('"', '')
table_schema=['suspicious', 'edge', 'dendro', 'threat_dendro', 'threat_investigation', 'storyboard', 'summary' ]
for path in table_schema:
HDFSClient.delete_folder("{0}/{1}/hive/oa/{2}/y={3}/m={4}/d={5}".format(HUSER,self._table_name,path,yr,int(mn),int(dy)),user="impala")
impala.execute_query("invalidate metadata")
#removes Feedback file
HDFSClient.delete_folder("{0}/{1}/scored_results/{2}{3}{4}/feedback/ml_feedback.csv".format(HUSER,self._table_name,yr,mn,dy))
#removes json files from the storyboard
HDFSClient.delete_folder("{0}/{1}/oa/{2}/{3}/{4}/{5}".format(HUSER,self._table_name,"storyboard",yr,mn,dy))
def _create_folder_structure(self):
# create date folder structure if it does not exist.
self._logger.info("Creating folder structure for OA (data and ipynb)")
self._data_path,self._ingest_summary_path,self._ipynb_path = Util.create_oa_folders("dns",self._date)
def _add_ipynb(self):
if os.path.isdir(self._ipynb_path):
self._logger.info("Adding advanced mode IPython Notebook")
shutil.copy("{0}/ipynb_templates/Advanced_Mode_master.ipynb".format(self._scrtip_path),"{0}/Advanced_Mode.ipynb".format(self._ipynb_path))
self._logger.info("Adding threat investigation IPython Notebook")
shutil.copy("{0}/ipynb_templates/Threat_Investigation_master.ipynb".format(self._scrtip_path),"{0}/Threat_Investigation.ipynb".format(self._ipynb_path))
else:
self._logger.error("There was a problem adding the IPython Notebooks, please check the directory exists.")
def _get_dns_results(self):
self._logger.info("Getting {0} Machine Learning Results from HDFS".format(self._date))
dns_results = "{0}/dns_results.csv".format(self._data_path)
# get hdfs path from conf file.
HUSER = self._spot_conf.get('conf', 'HUSER').replace("'", "").replace('"', '')
hdfs_path = "{0}/dns/scored_results/{1}/scores/dns_results.csv".format(HUSER,self._date)
# get results file from hdfs.
get_command = Util.get_ml_results_form_hdfs(hdfs_path,self._data_path)
self._logger.info("{0}".format(get_command))
if os.path.isfile(dns_results):
# read number of results based in the limit specified.
self._logger.info("Reading {0} dns results file: {1}".format(self._date,dns_results))
self._dns_results = Util.read_results(dns_results,self._limit,self._results_delimiter)[:]
if len(self._dns_results) == 0: self._logger.error("There are not dns results.");sys.exit(1)
else:
self._logger.error("There was an error getting ML results from HDFS")
sys.exit(1)
# add dns content.
self._dns_scores = [ conn[:] for conn in self._dns_results][:]
def _move_time_stamp(self,dns_data):
# return dns_data_ordered
return dns_data
def _create_dns_scores(self):
# get date parameters.
yr = self._date[:4]
mn = self._date[4:6]
dy = self._date[6:]
value_string = ""
dns_scores_final = self._move_time_stamp(self._dns_scores)
self._dns_scores = dns_scores_final
for row in dns_scores_final:
value_string += str(tuple(Util.cast_val(item) for item in row)) + ","
load_into_impala = ("""
INSERT INTO {0}.dns_scores partition(y={2}, m={3}, d={4}) VALUES {1}
""").format(self._db, value_string[:-1], yr, mn, dy)
impala.execute_query(load_into_impala)
def _add_tld_column(self):
qry_name_col = self._conf['dns_results_fields']['dns_qry_name']
self._dns_scores = [conn + [ get_tld("http://" + str(conn[qry_name_col]), fail_silently=True) if "http://" not in str(conn[qry_name_col]) else get_tld(str(conn[qry_name_col]), fail_silently=True)] for conn in self._dns_scores ]
def _add_reputation(self):
# read configuration.
reputation_conf_file = "{0}/components/reputation/reputation_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
self._logger.info("Reading reputation configuration file: {0}".format(reputation_conf_file))
rep_conf = json.loads(open(reputation_conf_file).read())
# initialize reputation services.
self._rep_services = []
self._logger.info("Initializing reputation services.")
for service in rep_conf:
config = rep_conf[service]
module = __import__("components.reputation.{0}.{0}".format(service), fromlist=['Reputation'])
self._rep_services.append(module.Reputation(config,self._logger))
# get columns for reputation.
rep_cols = {}
indexes = [ int(value) for key, value in self._conf["add_reputation"].items()]
self._logger.info("Getting columns to add reputation based on config file: dns_conf.json".format())
for index in indexes:
col_list = []
for conn in self._dns_scores:
col_list.append(conn[index])
rep_cols[index] = list(set(col_list))
# get reputation per column.
self._logger.info("Getting reputation for each service in config")
rep_services_results = []
if self._rep_services :
for key,value in rep_cols.items():
rep_services_results = [ rep_service.check(None,value) for rep_service in self._rep_services]
rep_results = {}
for result in rep_services_results:
rep_results = {k: "{0}::{1}".format(rep_results.get(k, ""), result.get(k, "")).strip('::') for k in set(rep_results) | set(result)}
if rep_results:
self._dns_scores = [ conn + [ rep_results[conn[key]] ] for conn in self._dns_scores ]
else:
self._dns_scores = [ conn + [""] for conn in self._dns_scores ]
else:
self._dns_scores = [ conn + [""] for conn in self._dns_scores ]
def _add_hh_column(self):
# add hh value column.
dns_date_index = self._conf["dns_results_fields"]["frame_time"]
self._dns_scores = [conn + [ filter(None,conn[dns_date_index].split(" "))[3].split(":")[0]] for conn in self._dns_scores ]
def _add_iana(self):
iana_conf_file = "{0}/components/iana/iana_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if os.path.isfile(iana_conf_file):
iana_config = json.loads(open(iana_conf_file).read())
dns_iana = IanaTransform(iana_config["IANA"])
dns_qry_class_index = self._conf["dns_results_fields"]["dns_qry_class"]
dns_qry_type_index = self._conf["dns_results_fields"]["dns_qry_type"]
dns_qry_rcode_index = self._conf["dns_results_fields"]["dns_qry_rcode"]
self._dns_scores = [ conn + [ dns_iana.get_name(conn[dns_qry_class_index],"dns_qry_class")] + [dns_iana.get_name(conn[dns_qry_type_index],"dns_qry_type")] + [dns_iana.get_name(conn[dns_qry_rcode_index],"dns_qry_rcode")] for conn in self._dns_scores ]
else:
self._dns_scores = [ conn + ["","",""] for conn in self._dns_scores ]
def _add_network_context(self):
nc_conf_file = "{0}/components/nc/nc_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if os.path.isfile(nc_conf_file):
nc_conf = json.loads(open(nc_conf_file).read())["NC"]
dns_nc = NetworkContext(nc_conf,self._logger)
ip_dst_index = self._conf["dns_results_fields"]["ip_dst"]
self._dns_scores = [ conn + [dns_nc.get_nc(conn[ip_dst_index])] for conn in self._dns_scores ]
else:
self._dns_scores = [ conn + [0] for conn in self._dns_scores ]
def _get_oa_details(self):
self._logger.info("Getting OA DNS suspicious details/dendro diagram")
# start suspicious connects details process.
p_sp = Process(target=self._get_suspicious_details)
p_sp.start()
# start chord diagram process.
p_dn = Process(target=self._get_dns_dendrogram)
p_dn.start()
p_sp.join()
p_dn.join()
def _get_suspicious_details(self):
iana_conf_file = "{0}/components/iana/iana_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if os.path.isfile(iana_conf_file):
iana_config = json.loads(open(iana_conf_file).read())
dns_iana = IanaTransform(iana_config["IANA"])
for conn in self._dns_scores:
timestamp = conn[self._conf["dns_score_fields"]["unix_tstamp"]]
full_date = datetime.datetime.utcfromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S')
date = full_date.split(" ")[0].split("-")
# get date parameters.
yr = date[0]
mn = date[1]
dy = date[2]
time = full_date.split(" ")[1].split(":")
hh = int(time[0])
dns_qry_name = conn[self._conf["dns_score_fields"]["dns_qry_name"]]
self._get_dns_details(dns_qry_name,yr,mn,dy,hh,dns_iana)
def _get_dns_details(self,dns_qry_name,year,month,day,hh,dns_iana):
value_string = ""
query_to_load =("""
SELECT unix_tstamp,frame_len,ip_dst,ip_src,dns_qry_name,dns_qry_class,dns_qry_type,dns_qry_rcode,dns_a,h as hh
FROM {0}.{1} WHERE y={2} AND m={3} AND d={4} AND dns_qry_name LIKE '%{5}%' AND h={6} LIMIT {7};
""").format(self._db,self._table_name,year,month,day,dns_qry_name,hh,self._details_limit)
try:
dns_details = impala.execute_query(query_to_load)
except:
self._logger.info("WARNING. Details couldn't be retreived for {0}, skipping this step".format(dns_qry_name))
else:
# add IANA to results.
update_rows = []
if dns_iana:
self._logger.info("Adding IANA translation to details results")
dns_details = [ conn + (dns_iana.get_name(str(conn[5]),"dns_qry_class"),dns_iana.get_name(str(conn[6]),"dns_qry_type"),dns_iana.get_name(str(conn[7]),"dns_qry_rcode")) for conn in dns_details ]
else:
self._logger.info("WARNING: NO IANA configured.")
dns_details = [ conn + ("","","") for conn in dns_details ]
nc_conf_file = "{0}/components/nc/nc_config.json".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if os.path.isfile(nc_conf_file):
nc_conf = json.loads(open(nc_conf_file).read())["NC"]
dns_nc = NetworkContext(nc_conf,self._logger)
dns_details = [ conn + (dns_nc.get_nc(conn[2]),) for conn in dns_details ]
else:
dns_details = [ conn + (0,) for conn in dns_details ]
for row in dns_details:
value_string += str(tuple(item for item in row)) + ","
if value_string != "":
query_to_insert=("""
INSERT INTO {0}.dns_edge PARTITION (y={1}, m={2}, d={3}) VALUES ({4});
""").format(self._db,year, month, day, value_string[:-1])
impala.execute_query(query_to_insert)
def _get_dns_dendrogram(self):
for conn in self._dns_scores:
timestamp = conn[self._conf["dns_score_fields"]["unix_tstamp"]]
full_date = datetime.datetime.utcfromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S')
date = full_date.split(" ")[0].split("-")
# get date parameters.
yr = date[0]
mn = date[1]
dy = date[2]
ip_dst=conn[self._conf["dns_score_fields"]["ip_dst"]]
query_to_load = ("""
INSERT INTO TABLE {0}.dns_dendro PARTITION (y={2}, m={3},d={4})
SELECT unix_tstamp, dns_a, dns_qry_name, ip_dst
FROM (SELECT unix_tstamp, susp.ip_dst, susp.dns_qry_name, susp.dns_a
FROM {0}.{1} as susp WHERE susp.y={2} AND susp.m={3} AND susp.d={4} AND susp.ip_dst='{5}'
LIMIT {6}) AS tmp GROUP BY dns_a, dns_qry_name, ip_dst, unix_tstamp
""").format(self._db,self._table_name,yr,mn,dy,ip_dst,self._details_limit)
impala.execute_query(query_to_load)
def _ingest_summary(self):
# get date parameters.
yr = self._date[:4]
mn = self._date[4:6]
dy = self._date[6:]
self._logger.info("Getting ingest summary data for the day")
ingest_summary_cols = ["date","total"]
result_rows = []
df_filtered = pd.DataFrame()
query_to_load = ("""
SELECT frame_time, COUNT(*) as total FROM {0}.{1}
WHERE y={2} AND m={3} AND d={4} AND unix_tstamp IS NOT NULL
AND frame_time IS NOT NULL AND frame_len IS NOT NULL
AND dns_qry_name IS NOT NULL AND ip_src IS NOT NULL
AND (dns_qry_class IS NOT NULL AND dns_qry_type IS NOT NULL
AND dns_qry_rcode IS NOT NULL ) GROUP BY frame_time;
""").format(self._db,self._table_name, yr, mn, dy)
results = impala.execute_query_as_list(query_to_load)
df = pd.DataFrame(results)
# Forms a new dataframe splitting the minutes from the time column
df_new = pd.DataFrame([["{0}-{1}-{2} {3}:{4}".format(yr, mn, dy,\
val['frame_time'].replace(" "," ").split(" ")[3].split(":")[0].zfill(2),\
val['frame_time'].replace(" "," ").split(" ")[3].split(":")[1].zfill(2)),\
int(val['total']) if not math.isnan(val['total']) else 0 ] for key,val in df.iterrows()],columns = ingest_summary_cols)
#Groups the data by minute
sf = df_new.groupby(by=['date'])['total'].sum()
df_per_min = pd.DataFrame({'date':sf.index, 'total':sf.values})
df_final = df_filtered.append(df_per_min, ignore_index=True).to_records(False,False)
if len(df_final) > 0:
query_to_insert=("""
INSERT INTO {0}.dns_ingest_summary PARTITION (y={1}, m={2}, d={3}) VALUES {4};
""").format(self._db, yr, mn, dy, tuple(df_final))
impala.execute_query(query_to_insert) | apache-2.0 |
LennonLab/Micro-Encounter | fig-scripts/TimeSeries/TimeSeriesHulls-Encounters.py | 1 | 8477 | from __future__ import division
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from random import randint
import os
import sys
import statsmodels.api as sm
from math import isnan, isinf
def randcolor():
c1 = randint(0,255)
c2 = randint(0,255)
c3 = randint(0,255)
clr = '#%02x%02x%02x' % (c1, c2, c3)
return clr
def plot_dat(fig, mdat, ylab, clr, clim = 65):
simlist = list(set(mdat['sim'].tolist()))
print len(simlist)
X = []
Y = []
for i, sim in enumerate(simlist):
d = mdat[mdat['sim'] == sim]
x = d['ct'].tolist()
if ylab == '%Dormant':
y = d[ylab].tolist()
else:
y = np.log10(d[ylab]).tolist()
for ii, val in enumerate(y):
if isnan(val) == False and isinf(val) == False:
X.append(x[ii])
Y.append(val)
xran = np.arange(1, 2000, 1).tolist()
binned = np.digitize(X, xran).tolist()
bins = [list([]) for _ in xrange(2000)]
for i, val in enumerate(binned):
bins[val-1].append(Y[i])
pct5 = []
pct95 = []
xran = []
for i, _bin in enumerate(bins):
if len(_bin) > 0:
pct5.append(np.percentile(_bin, 100 - clim))
pct95.append(np.percentile(_bin, clim))
xran.append(i+1)
if clr == '0.3':
plt.fill_between(xran, pct5, pct95, facecolor= clr, alpha=0.82, lw=0.2)
else:
plt.fill_between(xran, pct5, pct95, facecolor= clr, alpha=0.5, lw=0.2)
return fig
mydir = os.path.expanduser('~/GitHub/Micro-Encounter')
sys.path.append(mydir+'/tools')
mydir2 = os.path.expanduser("~/")
df = pd.read_csv(mydir + '/results/simulated_data/2016_09_21_SimData-TSA.csv')
#-------------------------DATA TRANSFORMATIONS -----------------------
df['%Dormant'] = 100*df['DormantN']/df['N']
#### plot figure ###############################################################
fig = plt.figure()
fs = 8 # fontsize
xlimit = 2000
xlab = 'Generations'
#### PLOT 1 #################################################################
dat = pd.DataFrame(df)
dat = dat[dat['SpatialComplexity'].str.contains('-chemotaxis-')]
dat1 = dat[dat['ResourceComplexity'].str.contains('-lockandkey-')]
dat2 = dat[dat['ResourceComplexity'].str.contains('-polyculture-')]
dat2 = dat2[dat2['ResourceComplexity'].str.contains('-simple-')]
dat3 = dat[dat['ResourceComplexity'].str.contains('-monoculture-')]
dat3 = dat3[dat3['ResourceComplexity'].str.contains('-simple-')]
fig.add_subplot(3, 3, 1)
plt.plot([0, 1], [-10, -10], c = '0.3', ls = '-', alpha = 0.5, lw=10, label='Lock&Key')
plt.plot([0, 1], [-10, -10], c = 'm', ls = '-', alpha = 0.5, lw=10, label='Polyculture')
plt.plot([0, 1], [-10, -10], c = 'c', ls = '-', alpha = 0.5, lw=10, label='Monoculture')
ylab = '$log$'+r'$_{10}$'+'($Encounters$)'
datylab = 'Encounters'
fig = plot_dat(fig, dat3, datylab, 'c')
fig = plot_dat(fig, dat2, datylab, 'm')
fig = plot_dat(fig, dat1, datylab, '0.3')
plt.ylabel(ylab, fontsize=fs+1)
plt.xlabel(xlab, fontsize=fs+1)
plt.ylim(-0.1, 2.1)
plt.tick_params(axis='both', which='major', labelsize=fs-1)
plt.text(300, -1.3, 'Chemotaxis', fontsize=fs+6)
plt.legend(bbox_to_anchor=(-0.05, 1.14, 3.9, .2), loc=10, ncol=3, mode="expand",prop={'size':fs+3})
#### PLOT 2 #################################################################
fig.add_subplot(3, 3, 2)
dat = pd.DataFrame(df)
dat = dat[dat['SpatialComplexity'].str.contains('-randwalk-')]
dat1 = dat[dat['ResourceComplexity'].str.contains('-lockandkey-')]
dat2 = dat[dat['ResourceComplexity'].str.contains('-polyculture-')]
dat2 = dat2[dat2['ResourceComplexity'].str.contains('-simple-')]
dat3 = dat[dat['ResourceComplexity'].str.contains('-monoculture-')]
dat3 = dat3[dat3['ResourceComplexity'].str.contains('-simple-')]
ylab = '$log$'+r'$_{10}$'+'($Encounters$)'
datylab = 'Encounters'
fig = plot_dat(fig, dat3, datylab, 'c')
fig = plot_dat(fig, dat2, datylab, 'm')
fig = plot_dat(fig, dat1, datylab, '0.3')
plt.ylabel(ylab, fontsize=fs+1)
plt.xlabel(xlab, fontsize=fs+1)
plt.ylim(-0.1, 2.1)
plt.tick_params(axis='both', which='major', labelsize=fs-1)
plt.text(100, -1.3, 'Run and tumble', fontsize=fs+6)
#### PLOT 3 #################################################################
fig.add_subplot(3, 3, 3)
dat = pd.DataFrame(df)
dat = dat[dat['SpatialComplexity'].str.contains('-none-')]
dat1 = dat[dat['ResourceComplexity'].str.contains('-lockandkey-')]
dat2 = dat[dat['ResourceComplexity'].str.contains('-polyculture-')]
dat2 = dat2[dat2['ResourceComplexity'].str.contains('-simple-')]
dat3 = dat[dat['ResourceComplexity'].str.contains('-monoculture-')]
dat3 = dat3[dat3['ResourceComplexity'].str.contains('-simple-')]
ylab = '$log$'+r'$_{10}$'+'($Encounters$)'
datylab = 'Encounters'
fig = plot_dat(fig, dat3, datylab, 'c')
fig = plot_dat(fig, dat2, datylab, 'm')
fig = plot_dat(fig, dat1, datylab, '0.3')
plt.ylabel(ylab, fontsize=fs+1)
plt.xlabel(xlab, fontsize=fs+1)
plt.ylim(-0.1, 2.1)
plt.tick_params(axis='both', which='major', labelsize=fs-1)
plt.text(500, -1.3, 'Passive', fontsize=fs+6)
#### PLOT 4 #################################################################
dat = pd.DataFrame(df)
dat = dat[dat['TrophicComplexity'].str.contains('-none-')]
dat1 = dat[dat['ResourceComplexity'].str.contains('-lockandkey-')]
dat2 = dat[dat['ResourceComplexity'].str.contains('-polyculture-')]
dat2 = dat2[dat2['ResourceComplexity'].str.contains('-simple-')]
dat3 = dat[dat['ResourceComplexity'].str.contains('-monoculture-')]
dat3 = dat3[dat3['ResourceComplexity'].str.contains('-simple-')]
fig.add_subplot(3, 3, 4)
plt.plot([0, 1], [-10, -10], c = '0.3', ls = '-', alpha = 0.5, lw=10, label='Lock&Key')
plt.plot([0, 1], [-10, -10], c = 'm', ls = '-', alpha = 0.5, lw=10, label='Polyculture')
plt.plot([0, 1], [-10, -10], c = 'c', ls = '-', alpha = 0.5, lw=10, label='Monoculture')
ylab = '$log$'+r'$_{10}$'+'($Encounters$)'
datylab = 'Encounters'
fig = plot_dat(fig, dat3, datylab, 'c')
fig = plot_dat(fig, dat2, datylab, 'm')
fig = plot_dat(fig, dat1, datylab, '0.3')
plt.ylabel(ylab, fontsize=fs+1)
plt.xlabel(xlab, fontsize=fs+1)
plt.ylim(-0.1, 2.1)
plt.tick_params(axis='both', which='major', labelsize=fs-1)
plt.text(600, -1.3, 'Simple', fontsize=fs+6)
#plt.legend(bbox_to_anchor=(-0.05, 1.14, 3.9, .2), loc=10, ncol=3, mode="expand",prop={'size':fs+3})
#### PLOT 5 #################################################################
fig.add_subplot(3, 3, 5)
dat = pd.DataFrame(df)
dat = dat[dat['TrophicComplexity'].str.contains('-crossfeeding-')]
dat1 = dat[dat['ResourceComplexity'].str.contains('-lockandkey-')]
dat2 = dat[dat['ResourceComplexity'].str.contains('-polyculture-')]
dat2 = dat2[dat2['ResourceComplexity'].str.contains('-simple-')]
dat3 = dat[dat['ResourceComplexity'].str.contains('-monoculture-')]
dat3 = dat3[dat3['ResourceComplexity'].str.contains('-simple-')]
ylab = '$log$'+r'$_{10}$'+'($Encounters$)'
datylab = 'Encounters'
fig = plot_dat(fig, dat3, datylab, 'c')
fig = plot_dat(fig, dat2, datylab, 'm')
fig = plot_dat(fig, dat1, datylab, '0.3')
plt.ylabel(ylab, fontsize=fs+1)
plt.xlabel(xlab, fontsize=fs+1)
plt.ylim(-0.1, 2.1)
plt.tick_params(axis='both', which='major', labelsize=fs-1)
plt.text(250, -1.3, 'Crossfeeding', fontsize=fs+6)
#### PLOT 6 #################################################################
fig.add_subplot(3, 3, 6)
dat = pd.DataFrame(df)
dat = dat[dat['TrophicComplexity'].str.contains('-scavenging-')]
dat1 = dat[dat['ResourceComplexity'].str.contains('-lockandkey-')]
dat2 = dat[dat['ResourceComplexity'].str.contains('-polyculture-')]
dat2 = dat2[dat2['ResourceComplexity'].str.contains('-simple-')]
dat3 = dat[dat['ResourceComplexity'].str.contains('-monoculture-')]
dat3 = dat3[dat3['ResourceComplexity'].str.contains('-simple-')]
ylab = '$log$'+r'$_{10}$'+'($Encounters$)'
datylab = 'Encounters'
fig = plot_dat(fig, dat3, datylab, 'c')
fig = plot_dat(fig, dat2, datylab, 'm')
fig = plot_dat(fig, dat1, datylab, '0.3')
plt.ylabel(ylab, fontsize=fs+1)
plt.xlabel(xlab, fontsize=fs+1)
plt.ylim(-0.1, 2.1)
plt.tick_params(axis='both', which='major', labelsize=fs-1)
plt.text(400, -1.3, 'Scavenging', fontsize=fs+6)
#### Final Format and Save #####################################################
plt.subplots_adjust(wspace=0.4, hspace=0.8)
plt.savefig(mydir + '/results/figures/TimeSeries-ResourceEffectsOnEncounters.png', dpi=600, bbox_inches = "tight")
plt.close()
| gpl-3.0 |
robin-lai/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
ishank08/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
Kaaos/kaaosgit | Sounding_sweeper.py | 1 | 7022 | # -*- coding: utf-8 -*-
# Removes too shallow soundings from the point data
# Fairway areas must be mechanically swept to make sure not hard targets are removed from the data!
# Other restrictions for possible use cases exist, use good judgement!
# Experimental, use at your own risk
# Load packages (install first):
from shapely.geometry import Point
import geopandas as gpd
import Tkinter as Tk # File IO-dialog
from tkFileDialog import askopenfilename # File IO-dialog
# Function for fairway areas shapefile input:
def input_fairwayareas_path():
Tk.Tk().withdraw() # Keep the root window from appearing
filetype = [('ESRI Shapefile', '.shp')]
info = 'Select fairway areas shapefile:'
try:
filepath = askopenfilename(filetypes=filetype, initialdir='C:\Users', title=info) # show an 'Open' dialog box and return the path to the selected file
except Exception:
filepath = None
return filepath
# Function for points file input:
def input_points():
Tk.Tk().withdraw()
filetype = [('Ascii XYZ', '.xyz')]
info = 'Select XYZ points file:'
try:
filepath = askopenfilename(filetypes=filetype, initialdir='C:\Users', title=info) # show an 'Open' dialog box and return the path to the selected file
except Exception:
filepath = None
return filepath
# Function for output (point-) file paths parsing:
def parse_outputfilepaths(input_path):
corrected_points_path = input_path.split(".")[0] + "_corrected.xyz"
tracking_list_path = input_path.split(".")[0] + "_tracking_list.xyz"
return corrected_points_path, tracking_list_path
# Function for EPSG CRS code input:
def epsg_input():
print 'Enter EPSG code for point data CRS:'
while True:
epsg = raw_input('')
if (epsg == ''):
print 'No EPSG code defined. Exiting.'
exit()
try:
epsg = int(epsg)
break
except Exception:
print "Illegal argument. Enter an (integer type) EPSG code."
return epsg
# Function to read in fairwayareas:
def read_fairways(path):
try:
fairway = gpd.read_file(path) # NOTICE: CRS must match ASCII points CRS for intersection to work properly!
deepsweep = max(fairway["SDEPFWYARE"]) # Get deepest swept depth
epsg = fairway.crs # Get dataset CRS as EPSG code
except Exception:
print "Error reading fairway shapefile. Exiting."
exit()
return fairway, deepsweep, epsg
# Function to reproject vector data to given CRS:
def reproject_data(data, point_epsg):
try:
print "\nReprojecting data to: ", point_epsg
data = data.to_crs(epsg=point_epsg)
print "Reprojection OK."
except Exception:
print "Error: Reprojection failed."
exit()
# Function to erase new files that include errors:
def deleteContent(cleanfile):
cleanfile.seek(0)
cleanfile.truncate()
# Function to check the correction outcome and erase files and exit if errors were found
def check_correction(loop, corrected, tracklist, points):
if (loop is False): # Was looping not clean, came up with errors?
print "Something went wrong. Emptying new files and exiting."
deleteContent(corrected)
deleteContent(tracklist)
exit()
# Point iterator function:
def iterate_points(points, fairwayareas, deepest_sweep, corrected_out, tracklist_out):
try:
print "\nIterating over points file.. (please be patient as this might take a while)"
for p in points:
point_coordinates = p.split(" ")
if (abs(float(point_coordinates[2])) >= deepest_sweep):
corrected_out.write(p) # Point depth >= deepest sweep, can be written right away to make processing faster
else:
writerFunction(fairwayareas, p, point_coordinates, corrected_out, tracklist_out) # Point directed to further processing
return True
except Exception:
return False
# Function to check, correct and write points:
def writerFunction(fairway, p, point_coordinates, corrected_out, tracklist_out):
try:
var_point = Point(float(point_coordinates[0]), float(point_coordinates[1]))
var_depth = abs(float(point_coordinates[2]))
for i in range(len(fairway)): # Loop trough fairway areas
sweep_depth = fairway.loc[i]["SDEPFWYARE"]
sweep_geometry = fairway.loc[i]["geometry"]
if (sweep_geometry.intersects(var_point)): # Point on fairway area?
if (var_depth < sweep_depth): # Depth shallower than swept depth?
tracklist_out.write(p) # Original points are stored on a tracking list
point_coordinates[2] = str(0.0 - sweep_depth) # Set point depth to swept depth
row = point_coordinates[0] + " " + point_coordinates[1] + " " + point_coordinates[2] + "\n" # Define row (single point in XYZ)
corrected_out.write(row) # Write corrected point
print("Conflicting point detected: Point Z = " + str(var_depth) + ", swept depth = " + str(sweep_depth))
return # Point written, return
else:
corrected_out.write(p) # Point OK --> write
return # Point written, return
corrected_out.write(p) # Point not on fairways --> OK --> write
return # Point written, return
except Exception: # Error protection
print "Error: error writing points to a file. Exiting."
exit()
# # # # # # # #
# Main method #
# # # # # # # #
# Get points file, parse output filepaths:
points_fp = input_points() # Get original points filepath
corrected_fp, tracklist_fp = parse_outputfilepaths(points_fp) # Parse output filepaths
point_crs = epsg_input() # Get point data EPSG
# Read in fairways in ESRI shapefile format:
fairway_fp = input_fairwayareas_path() # Get path
fairways, deepest_sweep, fairway_epsg = read_fairways(fairway_fp) # Get file pointer, deepest swepth depth and EPSG code
# Reproject fairway areas to match points CRS:
reproject_data(fairways, point_crs)
# Open files (also takes care of closing the files in all cases):
with open(points_fp, "r") as points, \
open(corrected_fp, "a") as corrected_out, \
open(tracklist_fp, "a") as tracklist_out:
# Correct the points, write output files and check if everything went ok:
correction_successful = iterate_points(points, fairways, deepest_sweep, corrected_out, tracklist_out)
check_correction(correction_successful, corrected_out, tracklist_out, points) # If errors were found, output files will be empty
# In case of success:
print "\nSweeping successful! Check new files: "
print corrected_fp
print tracklist_fp, "\n"
| mit |
antoinecarme/pyaf | tests/lgbm/test_air_passengers_lgb_only.py | 1 | 1537 | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
import logging
import logging.config
#logging.config.fileConfig('logging.conf')
logging.basicConfig(level=logging.INFO)
#get_ipython().magic('matplotlib inline')
b1 = tsds.load_airline_passengers()
df = b1.mPastData
df.head()
lEngine = autof.cForecastEngine()
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
lEngine.mOptions.mDebugPerformance = True;
lEngine.mOptions.mParallelMode = True;
lEngine.mOptions.set_active_autoregressions(['LGB']);
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
print(lEngine.mSignalDecomposition.mTrPerfDetails.head());
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
lEngine.standardPlots(name = "outputs/my_airline_passengers_lgb_only")
dfapp_in = df.copy();
dfapp_in.tail()
#H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
lForecastColumnName = b1.mSignalVar + '_Forecast'
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, lForecastColumnName , lForecastColumnName + '_Lower_Bound', lForecastColumnName + '_Upper_Bound' ]]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(2*H));
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(2*H).to_json(date_format='iso'))
print("</Forecast>\n\n")
# lEngine.standardPlots(name = "outputs/airline_passengers")
| bsd-3-clause |
jmmease/pandas | pandas/core/generic.py | 1 | 261745 | # pylint: disable=W0231,E1101
import collections
import warnings
import operator
import weakref
import gc
import json
import numpy as np
import pandas as pd
from pandas._libs import tslib, lib, properties
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
is_scalar,
is_number,
is_integer, is_bool,
is_bool_dtype,
is_numeric_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
is_datetime64tz_dtype,
is_list_like,
is_dict_like,
is_re_compilable,
pandas_dtype)
from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame
from pandas.core.common import (_values_from_object,
_maybe_box_datetimelike,
SettingWithCopyError, SettingWithCopyWarning,
AbstractMethodError)
from pandas.core.base import PandasObject, SelectionMixin
from pandas.core.index import (Index, MultiIndex, _ensure_index,
InvalidIndexError)
import pandas.core.indexing as indexing
from pandas.core.indexing import maybe_convert_indices
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex, Period
from pandas.core.internals import BlockManager
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.core.missing as missing
from pandas.io.formats.printing import pprint_thing
from pandas.io.formats.format import format_percentiles, DataFrameFormatter
from pandas.tseries.frequencies import to_offset
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.compat import (map, zip, lzip, lrange, string_types,
isidentifier, set_function_name, cPickle as pkl)
from pandas.core.ops import _align_method_FRAME
import pandas.core.nanops as nanops
from pandas.util._decorators import (Appender, Substitution,
deprecate_kwarg)
from pandas.util._validators import validate_bool_kwarg
from pandas.core import config
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = dict()
_shared_doc_kwargs = dict(
axes='keywords for axes', klass='NDFrame',
axes_single_arg='int or labels for object',
args_transpose='axes to permute (int or label for object)',
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""")
def _single_replace(self, to_replace, method, inplace, limit):
if self.ndim != 1:
raise TypeError('cannot replace {0} with method {1} on a {2}'
.format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index,
dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
class NDFrame(PandasObject, SelectionMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : boolean, default False
"""
_internal_names = ['_data', '_cacher', '_item_cache', '_cache', 'is_copy',
'_subtyp', '_name', '_index', '_default_kind',
'_default_fill_value', '_metadata', '__array_struct__',
'__array_interface__']
_internal_names_set = set(_internal_names)
_accessors = frozenset([])
_deprecations = frozenset(['as_blocks', 'blocks',
'consolidate', 'convert_objects'])
_metadata = []
is_copy = None
def __init__(self, data, axes=None, copy=False, dtype=None,
fastpath=False):
if not fastpath:
if dtype is not None:
data = data.astype(dtype)
elif copy:
data = data.copy()
if axes is not None:
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
object.__setattr__(self, 'is_copy', None)
object.__setattr__(self, '_data', data)
object.__setattr__(self, '_item_cache', {})
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option('display.max_rows'))
payload = json.loads(data.to_json(orient='table'),
object_pairs_hook=collections.OrderedDict)
return payload
def _validate_dtype(self, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == 'V':
raise NotImplementedError("compound dtypes are not implemented"
" in the {0} constructor"
.format(self.__class__.__name__))
return dtype
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(axe,
axis=self._get_block_manager_axis(a),
copy=False)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self):
"""Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
def __unicode__(self):
# unicode representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = '[%s]' % ','.join(map(pprint_thing, self))
return '%s(%s)' % (self.__class__.__name__, prepr)
def _dir_additions(self):
""" add the string-like attributes from the info_axis """
additions = set([c for c in self._info_axis
if isinstance(c, string_types) and isidentifier(c)])
return super(NDFrame, self)._dir_additions().union(additions)
@property
def _constructor_sliced(self):
"""Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame() and DataFrame.to_panel()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
@classmethod
def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None,
slicers=None, axes_are_reversed=False, build_axes=True,
ns=None):
"""Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
"""
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = dict((a, i) for i, a in enumerate(axes))
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = dict((v, k) for k, v in cls._AXIS_ALIASES.items())
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = slicers or None
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, '_typ', cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for i, a in cls._AXIS_NAMES.items():
set_axis(a, m - i)
else:
for i, a in cls._AXIS_NAMES.items():
set_axis(a, i)
# addtl parms
if isinstance(ns, dict):
for k, v in ns.items():
setattr(cls, k, v)
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = dict([(a, self._get_axis(a)) for a in (axes or self._AXIS_ORDERS)])
d.update(kwargs)
return d
@staticmethod
def _construct_axes_dict_from(self, axes, **kwargs):
"""Return an axes dictionary for the passed axes."""
d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, axes)])
d.update(kwargs)
return d
def _construct_axes_dict_for_slice(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = dict([(self._AXIS_SLICEMAP[a], self._get_axis(a))
for a in (axes or self._AXIS_ORDERS)])
d.update(kwargs)
return d
def _construct_axes_from_arguments(self, args, kwargs, require_all=False):
"""Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
"""
# construct the args
args = list(args)
for a in self._AXIS_ORDERS:
# if we have an alias for this axis
alias = self._AXIS_IALIASES.get(a)
if alias is not None:
if a in kwargs:
if alias in kwargs:
raise TypeError("arguments are mutually exclusive "
"for [%s,%s]" % (a, alias))
continue
if alias in kwargs:
kwargs[a] = kwargs.pop(alias)
continue
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError:
if require_all:
raise TypeError("not enough/duplicate arguments "
"specified!")
axes = dict([(a, kwargs.pop(a, None)) for a in self._AXIS_ORDERS])
return axes, kwargs
@classmethod
def _from_axes(cls, data, axes, **kwargs):
# for construction from BlockManager
if isinstance(data, BlockManager):
return cls(data, **kwargs)
else:
if cls._AXIS_REVERSED:
axes = axes[::-1]
d = cls._construct_axes_dict_from(cls, axes, copy=False)
d.update(kwargs)
return cls(data, **d)
def _get_axis_number(self, axis):
axis = self._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in self._AXIS_NAMES:
return axis
else:
try:
return self._AXIS_NUMBERS[axis]
except:
pass
raise ValueError('No axis named {0} for object type {1}'
.format(axis, type(self)))
def _get_axis_name(self, axis):
axis = self._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, string_types):
if axis in self._AXIS_NUMBERS:
return axis
else:
try:
return self._AXIS_NAMES[axis]
except:
pass
raise ValueError('No axis named {0} for object type {1}'
.format(axis, type(self)))
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
def _get_block_manager_axis(self, axis):
"""Map the axis to the block_manager axis."""
axis = self._get_axis_number(axis)
if self._AXIS_REVERSED:
m = self._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis):
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = '{prefix}level_{i}'.format(prefix=prefix, i=i)
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self):
d = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return d
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self):
"""Return a tuple of axis dimensions"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self):
"""Return index label(s) of the internal NDFrame"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self):
"""Number of axes / array dimensions"""
return self._data.ndim
@property
def size(self):
"""number of elements in the NDFrame"""
return np.prod(self.shape)
@property
def _selected_obj(self):
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self):
""" internal compat with SelectionMixin """
return self
def _expand_axes(self, key):
new_axes = []
for k, ax in zip(key, self.axes):
if k not in ax:
if type(k) != ax.dtype.type:
ax = ax.astype('O')
new_axes.append(ax.insert(len(ax), k))
else:
new_axes.append(ax)
return new_axes
_shared_docs['set_axis'] = """Assign desired index to given axis
Parameters
----------
labels: list-like or Index
The values for the new index
axis : int or string, default 0
inplace : boolean, default None
Whether to return a new %(klass)s instance.
WARNING: inplace=None currently falls back to to True, but
in a future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
.. versionadded:: 0.21.0
The signature is make consistent to the rest of the API.
Previously, the "axis" and "labels" arguments were respectively
the first and second positional arguments.
Returns
-------
renamed : %(klass)s or None
An object of same type as caller if inplace=False, None otherwise.
See Also
--------
pandas.NDFrame.rename
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.set_axis(['a', 'b', 'c'], axis=0, inplace=False)
a 1
b 2
c 3
dtype: int64
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.set_axis(['a', 'b', 'c'], axis=0, inplace=False)
A B
a 1 4
b 2 5
c 3 6
>>> df.set_axis(['I', 'II'], axis=1, inplace=False)
I II
0 1 4
1 2 5
2 3 6
>>> df.set_axis(['i', 'ii'], axis=1, inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
@Appender(_shared_docs['set_axis'] % dict(klass='NDFrame'))
def set_axis(self, labels, axis=0, inplace=None):
if is_scalar(labels):
warnings.warn(
'set_axis now takes "labels" as first argument, and '
'"axis" as named parameter. The old form, with "axis" as '
'first parameter and \"labels\" as second, is still supported '
'but will be deprecated in a future version of pandas.',
FutureWarning, stacklevel=2)
labels, axis = axis, labels
if inplace is None:
warnings.warn(
'set_axis currently defaults to operating inplace.\nThis '
'will change in a future version of pandas, use '
'inplace=True to avoid this warning.',
FutureWarning, stacklevel=2)
inplace = True
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis, labels):
self._data.set_axis(axis, labels)
self._clear_item_cache()
_shared_docs['transpose'] = """
Permute the dimensions of the %(klass)s
Parameters
----------
args : %(args_transpose)s
copy : boolean, default False
Make a copy of the underlying data. Mixed-dtype data will
always result in a copy
Examples
--------
>>> p.transpose(2, 0, 1)
>>> p.transpose(2, 0, 1, copy=True)
Returns
-------
y : same as input
"""
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs,
require_all=True)
axes_names = tuple([self._get_axis_name(axes[a])
for a in self._AXIS_ORDERS])
axes_numbers = tuple([self._get_axis_number(axes[a])
for a in self._AXIS_ORDERS])
# we must have unique axes
if len(axes) != len(set(axes)):
raise ValueError('Must specify %s unique axes' % self._AXIS_LEN)
new_axes = self._construct_axes_dict_from(self, [self._get_axis(x)
for x in axes_names])
new_values = self.values.transpose(axes_numbers)
if kwargs.pop('copy', None) or (len(args) and args[-1]):
new_values = new_values.copy()
nv.validate_transpose_for_generic(self, kwargs)
return self._constructor(new_values, **new_axes).__finalize__(self)
def swapaxes(self, axis1, axis2, copy=True):
"""
Interchange axes and swap values axes appropriately
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k))
for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Column label to be popped
Returns
-------
popped : Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze length 1 dimensions.
Parameters
----------
axis : None, integer or string axis name, optional
The axis to squeeze if 1-sized.
.. versionadded:: 0.20.0
Returns
-------
scalar if 1-sized, else original object
"""
axis = (self._AXIS_NAMES if axis is None else
(self._get_axis_number(axis),))
try:
return self.iloc[
tuple([0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)])]
except:
return self
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
# ----------------------------------------------------------------------
# Rename
# TODO: define separate funcs for DataFrame, Series and Panel so you can
# get completion on keyword arguments.
_shared_docs['rename'] = """
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(optional_mapper)s
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame or Panel.
dict-like or functions are transformations to apply to
that axis' values
%(optional_axis)s
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : %(klass)s (new object)
See Also
--------
pandas.NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)
* ``(mapper, axis={'index', 'columns'}, ...)
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
@Appender(_shared_docs['rename'] % dict(axes='axes keywords for this'
' object', klass='NDFrame',
optional_mapper='',
optional_axis=''))
def rename(self, *args, **kwargs):
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
level = kwargs.pop('level', None)
if kwargs:
raise TypeError('rename() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if com._count_not_none(*axes.values()) == 0:
raise TypeError('must pass an index to rename')
# renamer function if passed a dict
def _get_rename_function(mapper):
if isinstance(mapper, (dict, ABCSeries)):
def f(x):
if x in mapper:
return mapper[x]
else:
return x
else:
f = mapper
return f
self._consolidate_inplace()
result = self if inplace else self.copy(deep=copy)
# start in the axis order to eliminate too many copies
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is None:
continue
f = _get_rename_function(v)
baxis = self._get_block_manager_axis(axis)
if level is not None:
level = self.axes[axis]._get_level_number(level)
result._data = result._data.rename_axis(f, axis=baxis, copy=copy,
level=level)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
else:
return result.__finalize__(self)
rename.__doc__ = _shared_docs['rename']
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
"""
Alter index and / or columns using input function or functions.
A scalar or list-like for ``mapper`` will alter the ``Index.name``
or ``MultiIndex.names`` attribute.
A function or dict for ``mapper`` will alter the labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is.
Parameters
----------
mapper : scalar, list-like, dict-like or function, optional
axis : int or string, default 0
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Returns
-------
renamed : type of caller or None if inplace=True
See Also
--------
pandas.NDFrame.rename
pandas.Index.rename
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename_axis("foo") # scalar, alters df.index.name
A B
foo
0 1 4
1 2 5
2 3 6
>>> df.rename_axis(lambda x: 2 * x) # function: alters labels
A B
0 1 4
2 2 5
4 3 6
>>> df.rename_axis({"A": "ehh", "C": "see"}, axis="columns") # mapping
ehh B
0 1 4
1 2 5
2 3 6
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not
is_dict_like(mapper))
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
axis = self._get_axis_name(axis)
d = {'copy': copy, 'inplace': inplace}
d[axis] = mapper
return self.rename(**d)
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Alter the name or names of the axis.
Parameters
----------
name : str or list of str
Name for the Index, or list of names for the MultiIndex
axis : int or str
0 or 'index' for the index; 1 or 'columns' for the columns
inplace : bool
whether to modify `self` directly or return a copy
.. versionadded: 0.21.0
Returns
-------
renamed : type of caller or None if inplace=True
See Also
--------
pandas.DataFrame.rename
pandas.Series.rename
pandas.Index.rename
Examples
--------
>>> df._set_axis_name("foo")
A
foo
0 1
1 2
2 3
>>> df.index = pd.MultiIndex.from_product([['A'], ['a', 'b', 'c']])
>>> df._set_axis_name(["bar", "baz"])
A
bar baz
A a 1
b 2
c 3
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, 'inplace')
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparisons
def _indexed_same(self, other):
return all([self._get_axis(a).equals(other._get_axis(a))
for a in self._AXIS_ORDERS])
def __neg__(self):
values = _values_from_object(self)
if values.dtype == np.bool_:
arr = operator.inv(values)
else:
arr = operator.neg(values)
return self.__array_wrap__(arr)
def __invert__(self):
try:
arr = operator.inv(_values_from_object(self))
return self.__array_wrap__(arr)
except:
# inv fails with 0 len
if not np.prod(self.shape):
return self
raise
def equals(self, other):
"""
Determines if two NDFrame objects contain the same elements. NaNs in
the same location are considered equal.
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError('{0!r} objects are mutable, thus they cannot be'
' hashed'.format(self.__class__.__name__))
def __iter__(self):
"""Iterate over infor axis"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""Get the 'info axis' (see Indexing for more)
This is index for Series, columns for DataFrame and major_axis for
Panel.
"""
return self._info_axis
def iteritems(self):
"""Iterate over (label, values) on info axis
This is index for Series, columns for DataFrame, major_axis for Panel,
and so on.
"""
for h in self._info_axis:
yield h, self[h]
def __len__(self):
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key):
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self):
"""True if NDFrame is entirely empty [no items], meaning any of the
axes are of length 0.
Notes
-----
If NDFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
See also
--------
pandas.Series.dropna
pandas.DataFrame.dropna
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError("bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__))
self.__nonzero__()
def __abs__(self):
return self.abs()
def __round__(self, decimals=0):
return self.round(decimals)
# ----------------------------------------------------------------------
# Array Interface
def __array__(self, dtype=None):
return _values_from_object(self)
def __array_wrap__(self, result, context=None):
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
def to_dense(self):
"""Return dense representation of NDFrame (as opposed to sparse)"""
# compat
return self
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self):
meta = dict((k, getattr(self, k, None)) for k in self._metadata)
return dict(_data=self._data, _typ=self._typ, _metadata=self._metadata,
**meta)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get('_typ')
if typ is not None:
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
self._unpickle_series_compat(state)
elif isinstance(state[0], dict):
if len(state) == 5:
self._unpickle_sparse_frame_compat(state)
else:
self._unpickle_frame_compat(state)
elif len(state) == 4:
self._unpickle_panel_compat(state)
elif len(state) == 2:
self._unpickle_series_compat(state)
else: # pragma: no cover
# old pickling format, for compatibility
self._unpickle_matrix_compat(state)
self._item_cache = {}
# ----------------------------------------------------------------------
# IO
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option('display.latex.repr'):
return self.to_latex()
else:
return None
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs['to_excel'] = """
Write %(klass)s to an excel sheet
%(versionadded_to_excel)s
Parameters
----------
excel_writer : string or ExcelWriter object
File path or existing ExcelWriter
sheet_name : string, default 'Sheet1'
Name of sheet which will contain DataFrame
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : boolean, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding: string, default None
encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : string, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel)
freeze_panes : tuple of integer (length 2), default None
Specifies the one-based bottommost row and rightmost column that
is to be frozen
.. versionadded:: 0.20.0
Notes
-----
If passing an existing ExcelWriter object, then the sheet will be added
to the existing workbook. This can be used to save different
DataFrames to one workbook:
>>> writer = pd.ExcelWriter('output.xlsx')
>>> df1.to_excel(writer,'Sheet1')
>>> df2.to_excel(writer,'Sheet2')
>>> writer.save()
For compatibility with to_csv, to_excel serializes lists and dicts to
strings before writing.
"""
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression=None):
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : the path or buffer to write the result string
if this is None, return the converted string
orient : string
* Series
- default is 'index'
- allowed values are: {'split','records','index'}
* DataFrame
- default is 'columns'
- allowed values are:
{'split','records','index','columns','values'}
* The format of the JSON string
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
- columns : dict like {column -> {index -> value}}
- values : just the values array
- table : dict like {'schema': {schema}, 'data': {data}}
describing the data, and the data component is
like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. `epoch` = epoch milliseconds,
`iso` = ISO8601. The default depends on the `orient`. For
`orient='table'`, the default is `'iso'`. For all other orients,
the default is `'epoch'`.
double_precision : The number of decimal places to use when encoding
floating point values, default 10.
force_ascii : force encoded string to be ASCII, default True.
date_unit : string, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : boolean, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
.. versionadded:: 0.19.0
compression : {None, 'gzip', 'bz2', 'xz'}
A string representing the compression to use in the output file,
only used when the first argument is a filename
.. versionadded:: 0.21.0
Returns
-------
same type as input object with filtered info axis
See Also
--------
pd.read_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == 'table':
date_format = 'iso'
elif date_format is None:
date_format = 'epoch'
return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii, date_unit=date_unit,
default_handler=default_handler,
lines=lines, compression=compression)
def to_hdf(self, path_or_buf, key, **kwargs):
"""Write the contained data to an HDF5 file using HDFStore.
Parameters
----------
path_or_buf : the path (string) or HDFStore object
key : string
identifier for the group in the store
mode : optional, {'a', 'w', 'r+'}, default 'a'
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default False
For Table formats, append the input data to the existing
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__.
Applicable only to format='table'.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
dropna : boolean, default False.
If true, ALL nan rows will not be written to store.
"""
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path : string File path, buffer-like, or None
if None, return generated string
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
from pandas.io import packers
return packers.to_msgpack(path_or_buf, self, encoding=encoding,
**kwargs)
def to_sql(self, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
name : string
Name of SQL table
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
.. deprecated:: 0.19.0
'sqlite' is the only supported option if SQLAlchemy is not
used.
schema : string, default None
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
from pandas.io import sql
sql.to_sql(self, name, con, flavor=flavor, schema=schema,
if_exists=if_exists, index=index, index_label=index_label,
chunksize=chunksize, dtype=dtype)
def to_pickle(self, path, compression='infer',
protocol=pkl.HIGHEST_PROTOCOL):
"""
Pickle (serialize) object to input file path.
Parameters
----------
path : string
File path
compression : {'infer', 'gzip', 'bz2', 'xz', None}, default 'infer'
a string representing the compression to use in the output file
.. versionadded:: 0.20.0
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible
values for this parameter depend on the version of Python. For
Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a
valid value. For Python >= 3.4, 4 is a valid value.A negative value
for the protocol parameter is equivalent to setting its value to
HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html
.. versionadded:: 0.21.0
"""
from pandas.io.pickle import to_pickle
return to_pickle(self, path, compression=compression,
protocol=protocol)
def to_clipboard(self, excel=None, sep=None, **kwargs):
"""
Attempt to write text representation of object to the system clipboard
This can be pasted into Excel, for example.
Parameters
----------
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows: none
- OS X: none
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
a DataArray for a Series
a Dataset for a DataFrame
a DataArray for higher dims
Examples
--------
>>> df = pd.DataFrame({'A' : [1, 1, 2],
'B' : ['foo', 'bar', 'foo'],
'C' : np.arange(4.,7)})
>>> df
A B C
0 1 foo 4.0
1 1 bar 5.0
2 2 foo 6.0
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 3)
Coordinates:
* index (index) int64 0 1 2
Data variables:
A (index) int64 1 1 2
B (index) object 'foo' 'bar' 'foo'
C (index) float64 4.0 5.0 6.0
>>> df = pd.DataFrame({'A' : [1, 1, 2],
'B' : ['foo', 'bar', 'foo'],
'C' : np.arange(4.,7)}
).set_index(['B','A'])
>>> df
C
B A
foo 1 4.0
bar 1 5.0
foo 2 6.0
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (A: 2, B: 2)
Coordinates:
* B (B) object 'bar' 'foo'
* A (A) int64 1 2
Data variables:
C (B, A) float64 5.0 nan 4.0 6.0
>>> p = pd.Panel(np.arange(24).reshape(4,3,2),
items=list('ABCD'),
major_axis=pd.date_range('20130101', periods=3),
minor_axis=['first', 'second'])
>>> p
<class 'pandas.core.panel.Panel'>
Dimensions: 4 (items) x 3 (major_axis) x 2 (minor_axis)
Items axis: A to D
Major_axis axis: 2013-01-01 00:00:00 to 2013-01-03 00:00:00
Minor_axis axis: first to second
>>> p.to_xarray()
<xarray.DataArray (items: 4, major_axis: 3, minor_axis: 2)>
array([[[ 0, 1],
[ 2, 3],
[ 4, 5]],
[[ 6, 7],
[ 8, 9],
[10, 11]],
[[12, 13],
[14, 15],
[16, 17]],
[[18, 19],
[20, 21],
[22, 23]]])
Coordinates:
* items (items) object 'A' 'B' 'C' 'D'
* major_axis (major_axis) datetime64[ns] 2013-01-01 2013-01-02 2013-01-03 # noqa
* minor_axis (minor_axis) object 'first' 'second'
Notes
-----
See the `xarray docs <http://xarray.pydata.org/en/stable/>`__
"""
try:
import xarray
except ImportError:
# Give a nice error message
raise ImportError("the xarray library is not installed\n"
"you can install via conda\n"
"conda install xarray\n"
"or via pip\n"
"pip install xarray\n")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
elif self.ndim == 2:
return xarray.Dataset.from_dataframe(self)
# > 2 dims
coords = [(a, self._get_axis(a)) for a in self._AXIS_ORDERS]
return xarray.DataArray(self,
coords=coords,
)
_shared_docs['to_latex'] = r"""
Render an object to a tabular environment table. You can splice
this into a LaTeX document. Requires \\usepackage{booktabs}.
.. versionchanged:: 0.20.2
Added to Series
`to_latex`-specific options:
bold_rows : boolean, default False
Make the row labels bold in the output
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3
columns
longtable : boolean, default will be read from the pandas config module
Default: False.
Use a longtable environment instead of tabular. Requires adding
a \\usepackage{longtable} to your LaTeX preamble.
escape : boolean, default will be read from the pandas config module
Default: True.
When set to False prevents from escaping latex special
characters in column names.
encoding : str, default None
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
multicolumn : boolean, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
.. versionadded:: 0.20.0
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
.. versionadded:: 0.20.0
multirow : boolean, default False
Use \multirow to enhance MultiIndex rows.
Requires adding a \\usepackage{multirow} to your LaTeX preamble.
Will print centered labels (instead of top-aligned)
across the contained rows, separating groups via clines.
The default will be read from the pandas config module.
.. versionadded:: 0.20.0
"""
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
'column names.')
@Appender(_shared_docs['to_latex'] % _shared_doc_kwargs)
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=False,
column_format=None, longtable=None, escape=None,
encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option(
"display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
encoding=encoding, multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if buf is None:
return formatter.buf.getvalue()
# ----------------------------------------------------------------------
# Fancy Indexing
@classmethod
def _create_indexer(cls, name, indexer):
"""Create an indexer like _name in the class."""
if getattr(cls, name, None) is None:
iname = '_%s' % name
setattr(cls, iname, None)
def _indexer(self):
i = getattr(self, iname)
if i is None:
i = indexer(self, name)
setattr(self, iname, i)
return i
setattr(cls, name, property(_indexer, doc=indexer.__doc__))
# add to our internal names set
cls._internal_names_set.add(iname)
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : type of items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def __getitem__(self, item):
return self._get_item_cache(item)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res.is_copy = self.is_copy
return res
def _set_as_cached(self, item, cacher):
"""Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self):
"""Reset the cacher."""
if hasattr(self, '_cacher'):
del self._cacher
def _iget_item_cache(self, item):
"""Return the cached item, item represents a positional indexer."""
ax = self._info_axis
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
lower = self._take(item, axis=self._info_axis_number,
convert=True)
return lower
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _maybe_cache_changed(self, item, value):
"""The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value, check=False)
@property
def _is_cached(self):
"""Return boolean indicating if self is cached or not."""
return getattr(self, '_cacher', None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, '_cacher', None)
if cacher is not None:
cacher = cacher[1]()
return cacher
@property
def _is_view(self):
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : boolean, default False
clear the item cache
verify_is_copy : boolean, default True
provide is_copy checks
"""
cacher = getattr(self, '_cacher', None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
try:
ref._maybe_cache_changed(cacher[0], self)
except:
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t='referant')
if clear:
self._clear_item_cache()
def _clear_item_cache(self, i=None):
if i is not None:
self._item_cache.pop(i, None)
else:
self._item_cache.clear()
def _slice(self, slobj, axis=0, kind=None):
"""
Construct a slice of this container.
kind parameter is maintained for compatibility with Series slicing.
"""
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view slicable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value):
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref=None, copy=True):
if not copy:
self.is_copy = None
else:
if ref is not None:
self.is_copy = weakref.ref(ref)
else:
self.is_copy = None
def _check_is_chained_assignment_possible(self):
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t='referant',
force=True)
return True
elif self.is_copy:
self._check_setitem_copy(stacklevel=4, t='referant')
return False
def _check_setitem_copy(self, stacklevel=4, t='setting', force=False):
"""
Parameters
----------
stacklevel : integer, default 4
the level to show of the stack when the error is output
t : string, the type of setting error
force : boolean, default False
if True, then force showing an error
validate if we are doing a settitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
if force or self.is_copy:
value = config.get_option('mode.chained_assignment')
if value is None:
return
# see if the copy is not actually refererd; if so, then disolve
# the copy weakref
try:
gc.collect(2)
if not gc.get_referents(self.is_copy()):
self.is_copy = None
return
except:
pass
# we might be a false positive
try:
if self.is_copy().shape == self.shape:
self.is_copy = None
return
except:
pass
# a custom message
if isinstance(self.is_copy, string_types):
t = self.is_copy
elif t == 'referant':
t = ("\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/"
"indexing.html#indexing-view-versus-copy"
)
else:
t = ("\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/"
"indexing.html#indexing-view-versus-copy"
)
if value == 'raise':
raise SettingWithCopyError(t)
elif value == 'warn':
warnings.warn(t, SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key):
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key, )
for col in self.columns:
if isinstance(col, tuple) and col[:len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
_shared_docs['_take'] = """
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
This is the internal version of ``.take()`` and will contain a wider
selection of parameters useful for internal use but not as suitable
for public usage.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : int, default 0
The axis on which to select elements. "0" means that we are
selecting rows, "1" means that we are selecting columns, etc.
convert : bool, default True
Whether to convert negative indices into positive ones.
For example, ``-1`` would map to the ``len(axis) - 1``.
The conversions are similar to the behavior of indexing a
regular Python list.
is_copy : bool, default True
Whether to return a copy of the original object or not.
Returns
-------
taken : type of caller
An array-like containing the elements taken from the object.
See Also
--------
numpy.ndarray.take
numpy.take
"""
@Appender(_shared_docs['_take'])
def _take(self, indices, axis=0, convert=True, is_copy=True):
self._consolidate_inplace()
if convert:
indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
new_data = self._data.take(indices,
axis=self._get_block_manager_axis(axis),
verify=True)
result = self._constructor(new_data).__finalize__(self)
# Maybe set copy if we didn't actually change the index.
if is_copy:
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
_shared_docs['take'] = """
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : int, default 0
The axis on which to select elements. "0" means that we are
selecting rows, "1" means that we are selecting columns, etc.
convert : bool, default True
.. deprecated:: 0.21.0
Whether to convert negative indices into positive ones.
For example, ``-1`` would map to the ``len(axis) - 1``.
The conversions are similar to the behavior of indexing a
regular Python list.
is_copy : bool, default True
Whether to return a copy of the original object or not.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
('parrot', 'bird', 24.0),
('lion', 'mammal', 80.5),
('monkey', 'mammal', np.nan)],
columns=('name', 'class', 'max_speed'),
index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
Returns
-------
taken : type of caller
An array-like containing the elements taken from the object.
See Also
--------
numpy.ndarray.take
numpy.take
"""
@Appender(_shared_docs['take'])
def take(self, indices, axis=0, convert=True, is_copy=True, **kwargs):
nv.validate_take(tuple(), kwargs)
if not convert:
msg = ("The 'convert' parameter is deprecated "
"and will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._take(indices, axis=axis, convert=convert, is_copy=is_copy)
def xs(self, key, axis=0, level=None, drop_level=True):
"""
Returns a cross-section (row(s) or column(s)) from the
Series/DataFrame. Defaults to cross-section on the rows (axis=0).
Parameters
----------
key : object
Some label contained in the index, or partially in a MultiIndex
axis : int, default 0
Axis to retrieve cross-section on
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : boolean, default True
If False, returns object with same levels as self.
Examples
--------
>>> df
A B C
a 4 5 2
b 4 0 9
c 9 7 3
>>> df.xs('a')
A 4
B 5
C 2
Name: a
>>> df.xs('C', axis=1)
a 2
b 9
c 3
Name: C
>>> df
A B C D
first second third
bar one 1 4 1 8 9
two 1 7 5 5 0
baz one 1 6 6 8 0
three 2 5 3 5 3
>>> df.xs(('baz', 'three'))
A B C D
third
2 5 3 5 3
>>> df.xs('one', level=1)
A B C D
first third
bar 1 4 1 8 9
baz 1 6 6 8 0
>>> df.xs(('baz', 2), level=[0, 'third'])
A B C D
second
three 5 3 5 3
Returns
-------
xs : Series or DataFrame
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels. It is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level,
drop_level=drop_level)
# create the tuple of the indexer
indexer = [slice(None)] * self.ndim
indexer[axis] = loc
indexer = tuple(indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key,
drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
inds, = loc.nonzero()
return self._take(inds, axis=axis, convert=False)
else:
return self._take(loc, axis=axis, convert=True)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
new_values = self._data.fast_xs(loc)
# may need to box a datelike-scalar
#
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
if not is_list_like(new_values) or self.ndim == 1:
return _maybe_box_datetimelike(new_values)
result = self._constructor_sliced(
new_values, index=self.columns,
name=self.index[loc], dtype=new_values.dtype)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view slicable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs = xs
def select(self, crit, axis=0):
"""
Return data corresponding to axis labels matching criteria
DEPRECATED: use df.loc[df.index.map(crit)] to select via labels
Parameters
----------
crit : function
To be called on each index (label). Should return True or False
axis : int
Returns
-------
selection : type of caller
"""
warnings.warn("'select' is deprecated and will be removed in a "
"future release. You can use "
".loc[labels.map(crit)] as a replacement",
FutureWarning, stacklevel=2)
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis)
if len(axis_values) > 0:
new_axis = axis_values[
np.asarray([bool(crit(label)) for label in axis_values])]
else:
new_axis = axis_values
return self.reindex(**{axis_name: new_axis})
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
"""Return an object with matching indices to myself.
Parameters
----------
other : Object
method : string or None
copy : boolean, default True
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between labels of the other object and this
object for inexact matches.
.. versionadded:: 0.17.0
Notes
-----
Like calling s.reindex(index=other.index, columns=other.columns,
method=...)
Returns
-------
reindexed : same as input
"""
d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method,
copy=copy, limit=limit,
tolerance=tolerance)
return self.reindex(**d)
def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
"""
Return new object with labels in requested axis removed.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : int or axis name
Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns : single label or list-like
Alternative to specifying `axis` (``labels, axis=1`` is
equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, default None
For MultiIndex
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : type of caller
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3,4),
columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Notes
-----
Specifying both `labels` and `index` or `columns` will raise a
ValueError.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and "
"'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError("Need to specify at least one of 'labels', "
"'index' or 'columns'")
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(self, labels, axis, level=None, errors='raise'):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis, axis_ = self._get_axis(axis), axis
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
dropped = self.reindex(**{axis_name: new_axis})
try:
dropped.axes[axis_].set_names(axis.names, inplace=True)
except AttributeError:
pass
result = dropped
else:
labels = _ensure_object(com._index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = ~axis.get_level_values(level).isin(labels)
else:
indexer = ~axis.isin(labels)
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy=True):
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : boolean, default True
provide is_copy checks
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, '_data', result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self, prefix):
"""
Concatenate prefix string with panel items names.
Parameters
----------
prefix : string
Returns
-------
with_prefix : type of caller
"""
new_data = self._data.add_prefix(prefix)
return self._constructor(new_data).__finalize__(self)
def add_suffix(self, suffix):
"""
Concatenate suffix string with panel items names.
Parameters
----------
suffix : string
Returns
-------
with_suffix : type of caller
"""
new_data = self._data.add_suffix(suffix)
return self._constructor(new_data).__finalize__(self)
_shared_docs['sort_values'] = """
Sort by the values along either axis
.. versionadded:: 0.17.0
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to direct sorting
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : %(klass)s
Examples
--------
>>> df = pd.DataFrame({
... 'col1' : ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2' : [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
def sort_values(self, by=None, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
NOT IMPLEMENTED: do not call this method, as sorting values is not
supported for Panel objects and will raise an error.
"""
raise NotImplementedError("sort_values has not been implemented "
"on Panel or Panel4D objects.")
_shared_docs['sort_index'] = """
Sort object by labels (along an axis)
Parameters
----------
axis : %(axes)s to direct sorting
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
if true and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level
Returns
-------
sorted_obj : %(klass)s
"""
@Appender(_shared_docs['sort_index'] % dict(axes="axes", klass="NDFrame"))
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
labels = self._get_axis(axis)
if level is not None:
raise NotImplementedError("level is not implemented")
if inplace:
raise NotImplementedError("inplace is not implemented")
sort_index = labels.argsort()
if not ascending:
sort_index = sort_index[::-1]
new_axis = labels.take(sort_index)
return self.reindex(**{axis_name: new_axis})
_shared_docs['reindex'] = """
Conform %(klass)s to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
copy=False
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional (should be specified using keywords)
New labels / index to conform to. Preferably an Index object to
avoid duplicating data
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional
method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* default: don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : boolean, default True
Return a new object, even if the passed indexes are the same
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value
limit : int, default None
Maximum number of consecutive elements to forward or backward fill
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
.. versionadded:: 0.17.0
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)
* ``(labels, axis={'index', 'columns'}, ...)
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({
... 'http_status': [200,200,404,404,301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100
2010-01-02 101
2010-01-03 NaN
2010-01-04 100
2010-01-05 89
2010-01-06 88
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100
2010-01-02 101
2010-01-03 NaN
2010-01-04 100
2010-01-05 89
2010-01-06 88
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to backpropagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100
2009-12-30 100
2009-12-31 100
2010-01-01 100
2010-01-02 101
2010-01-03 NaN
2010-01-04 100
2010-01-05 89
2010-01-06 88
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
Returns
-------
reindexed : %(klass)s
"""
# TODO: Decide if we care about having different examples for different
# kinds
@Appender(_shared_docs['reindex'] % dict(axes="axes", klass="NDFrame",
optional_labels="",
optional_axis=""))
def reindex(self, *args, **kwargs):
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop('method', None))
level = kwargs.pop('level', None)
copy = kwargs.pop('copy', True)
limit = kwargs.pop('limit', None)
tolerance = kwargs.pop('tolerance', None)
fill_value = kwargs.pop('fill_value', np.nan)
if kwargs:
raise TypeError('reindex() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all([self._get_axis(axis).identical(ax)
for axis, ax in axes.items() if ax is not None]):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
try:
return self._reindex_multi(axes, copy, fill_value)
except:
pass
# perform the reindex on the axes
return self._reindex_axes(axes, level, limit, tolerance, method,
fill_value, copy).__finalize__(self)
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(labels, level=level, limit=limit,
tolerance=tolerance, method=method)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers({axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy, allow_dups=False)
return obj
def _needs_reindex_multi(self, axes, method, level):
"""Check if we do need a multi reindex."""
return ((com._count_not_none(*axes.values()) == self._AXIS_LEN) and
method is None and level is None and not self._is_mixed_type)
def _reindex_multi(self, axes, copy, fill_value):
return NotImplemented
_shared_docs[
'reindex_axis'] = ("""Conform input object to new index with optional
filling logic, placing NA/NaN in locations having no value in the
previous index. A new object is produced unless the new index is
equivalent to the current one and copy=False
Parameters
----------
labels : array-like
New labels / index to conform to. Preferably an Index object to
avoid duplicating data
axis : %(axes_single_arg)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional
Method to use for filling holes in reindexed DataFrame:
* default: don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : boolean, default True
Return a new object, even if the passed indexes are the same
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
limit : int, default None
Maximum number of consecutive elements to forward or backward fill
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
.. versionadded:: 0.17.0
Examples
--------
>>> df.reindex_axis(['A', 'B', 'C'], axis=1)
See Also
--------
reindex, reindex_like
Returns
-------
reindexed : %(klass)s
""")
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
self._consolidate_inplace()
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis_name)
method = missing.clean_reindex_fill_method(method)
new_index, indexer = axis_values.reindex(labels, method, level,
limit=limit)
return self._reindex_with_indexers({axis: [new_index, indexer]},
fill_value=fill_value, copy=copy)
def _reindex_with_indexers(self, reindexers, fill_value=np.nan, copy=False,
allow_dups=False):
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = _ensure_index(index)
if indexer is not None:
indexer = _ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(index, indexer, axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def _reindex_axis(self, new_index, fill_method, axis, copy):
new_data = self._data.reindex_axis(new_index, axis=axis,
method=fill_method, copy=copy)
if new_data is self._data and not copy:
return self
else:
return self._constructor(new_data).__finalize__(self)
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
List of info axis to restrict to (must not all be present)
like : string
Keep info axis where "arg in col == True"
regex : string (regular expression)
Keep info axis with re.search(regex, col) == True
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame
Returns
-------
same type as input object
Examples
--------
>>> df
one two three
mouse 1 2 3
rabbit 4 5 6
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
See Also
--------
pandas.DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
"""
import re
nkw = sum([x is not None for x in [items, like, regex]])
if nkw > 1:
raise TypeError('Keyword arguments `items`, `like`, or `regex` '
'are mutually exclusive')
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(
**{name: [r for r in items if r in labels]})
elif like:
def f(x):
if not isinstance(x, string_types):
x = str(x)
return like in x
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
matcher = re.compile(regex)
values = labels.map(lambda x: matcher.search(str(x)) is not None)
return self.loc(axis=axis)[values]
else:
raise TypeError('Must pass either `items`, `like`, or `regex`')
def head(self, n=5):
"""
Return the first n rows.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : type of caller
The first n rows of the caller object.
"""
return self.iloc[:n]
def tail(self, n=5):
"""
Return the last n rows.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_tail : type of caller
The last n rows of the caller object.
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(self, n=None, frac=None, replace=False, weights=None,
random_state=None, axis=None):
"""
Returns a random sample of items from an axis of object.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : boolean, optional
Sample with or without replacement. Default = False.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
inf and -inf values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : int or string, optional
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames, 1 for Panels).
Returns
-------
A new object of same type as caller.
Examples
--------
Generate an example ``Series`` and ``DataFrame``:
>>> s = pd.Series(np.random.randn(50))
>>> s.head()
0 -0.038497
1 1.820773
2 -0.972766
3 -1.598270
4 -1.095526
dtype: float64
>>> df = pd.DataFrame(np.random.randn(50, 4), columns=list('ABCD'))
>>> df.head()
A B C D
0 0.016443 -2.318952 -0.566372 -1.028078
1 -1.051921 0.438836 0.658280 -0.175797
2 -1.243569 -0.364626 -0.215065 0.057736
3 1.768216 0.404512 -0.385604 -1.457834
4 1.072446 -1.137172 0.314194 -0.046661
Next extract a random sample from both of these objects...
3 random elements from the ``Series``:
>>> s.sample(n=3)
27 -0.994689
55 -1.049016
67 -0.224565
dtype: float64
And a random 10% of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.1, replace=True)
A B C D
35 1.981780 0.142106 1.817165 -0.290805
49 -1.336199 -0.448634 -0.789640 0.217116
40 0.823173 -0.078816 1.009536 1.015108
15 1.421154 -0.055301 -1.922594 -0.019696
6 -0.148339 0.832938 1.787600 -1.383767
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com._random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, pd.Series):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, string_types):
if isinstance(self, pd.DataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a "
"valid column")
else:
raise ValueError("Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame")
else:
raise ValueError("Strings cannot be passed as weights "
"when sampling from a Series or Panel.")
weights = pd.Series(weights, dtype='float64')
if len(weights) != axis_length:
raise ValueError("Weights and axis to be sampled must be of "
"same length")
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative "
"values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError('Please enter a value for `frac` OR `n`, not '
'both')
# Check for negative sizes
if n < 0:
raise ValueError("A negative number of rows requested. Please "
"provide positive value.")
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis, is_copy=False)
_shared_docs['pipe'] = (r"""
Apply func(self, \*args, \*\*kwargs)
Parameters
----------
func : function
function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : positional arguments passed into ``func``.
kwargs : a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
on Series or DataFrames. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
See Also
--------
pandas.DataFrame.apply
pandas.DataFrame.applymap
pandas.Series.map
""")
@Appender(_shared_docs['pipe'] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
_shared_docs['aggregate'] = ("""
Aggregate using callable, string, dict, or list of string/callables
%(versionadded)s
Parameters
----------
func : callable, string, dictionary, or list of string/callables
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply. For
a DataFrame, can pass a dict, if the keys are DataFrame column names.
Accepted Combinations are:
- string function name
- function
- list of functions
- dict of column names -> functions (or list of functions)
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
`agg` is an alias for `aggregate`. Use the alias.
Returns
-------
aggregated : %(klass)s
""")
_shared_docs['transform'] = ("""
Call function producing a like-indexed %(klass)s
and return a %(klass)s with the transformed values
.. versionadded:: 0.20.0
Parameters
----------
func : callable, string, dictionary, or list of string/callables
To apply to column
Accepted Combinations are:
- string function name
- function
- list of functions
- dict of column names -> functions (or list of functions)
Returns
-------
transformed : %(klass)s
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
... index=pd.date_range('1/1/2000', periods=10))
df.iloc[3:7] = np.nan
>>> df.transform(lambda x: (x - x.mean()) / x.std())
A B C
2000-01-01 0.579457 1.236184 0.123424
2000-01-02 0.370357 -0.605875 -1.231325
2000-01-03 1.455756 -0.277446 0.288967
2000-01-04 NaN NaN NaN
2000-01-05 NaN NaN NaN
2000-01-06 NaN NaN NaN
2000-01-07 NaN NaN NaN
2000-01-08 -0.498658 1.274522 1.642524
2000-01-09 -0.540524 -1.012676 -0.828968
2000-01-10 -1.366388 -0.614710 0.005378
See also
--------
pandas.%(klass)s.aggregate
pandas.%(klass)s.apply
""")
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(self, other, method=None, **kwargs):
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name):
"""After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (name in self._internal_names_set or name in self._metadata or
name in self._accessors):
return object.__getattribute__(self, name)
else:
if name in self._info_axis:
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
"""After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn("Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2)
object.__setattr__(self, name, value)
# ----------------------------------------------------------------------
# Getting and setting elements
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self):
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace=False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : boolean, default False
If False return new object, otherwise modify existing object
Returns
-------
consolidated : type of caller
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
def consolidate(self, inplace=False):
"""
DEPRECATED: consolidate will be an internal implementation only.
"""
# 15483
warnings.warn("consolidate is deprecated and will be removed in a "
"future release.", FutureWarning, stacklevel=2)
return self._consolidate(inplace)
@property
def _is_mixed_type(self):
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self):
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
@property
def _is_datelike_mixed_type(self):
f = lambda: self._data.is_datelike_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value):
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
try:
if np.isnan(value):
return True
except:
pass
raise TypeError('Cannot do inplace boolean setting on '
'mixed-types with a non np.nan value')
return True
def _get_numeric_data(self):
return self._constructor(
self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
def as_matrix(self, columns=None):
"""
Convert the frame to its Numpy-array representation.
Parameters
----------
columns: list, optional, default:None
If None, return all columns, otherwise, returns specified columns.
Returns
-------
values : ndarray
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
Return is NOT a Numpy-matrix, rather, a Numpy-array.
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcase to
int32. By numpy.find_common_type convention, mixing int64 and uint64
will result in a flot64 dtype.
This method is provided for backwards compatibility. Generally,
it is recommended to use '.values'.
See Also
--------
pandas.DataFrame.values
"""
self._consolidate_inplace()
if self._AXIS_REVERSED:
return self._data.as_matrix(columns).T
return self._data.as_matrix(columns)
@property
def values(self):
"""Numpy representation of NDFrame
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By numpy.find_common_type convention, mixing int64 and uint64
will result in a flot64 dtype.
"""
return self.as_matrix()
@property
def _values(self):
"""internal implementation"""
return self.values
@property
def _get_values(self):
# compat
return self.as_matrix()
def get_values(self):
"""same as values (but handles sparseness conversions)"""
return self.as_matrix()
def get_dtype_counts(self):
"""Return the counts of dtypes in this object."""
from pandas import Series
return Series(self._data.get_dtype_counts())
def get_ftype_counts(self):
"""Return the counts of ftypes in this object."""
from pandas import Series
return Series(self._data.get_ftype_counts())
@property
def dtypes(self):
"""Return the dtypes in this object."""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis,
dtype=np.object_)
@property
def ftypes(self):
"""
Return the ftypes (indication of sparse/dense and dtype)
in this object.
"""
from pandas import Series
return Series(self._data.get_ftypes(), index=self._info_axis,
dtype=np.object_)
def as_blocks(self, copy=True):
"""
Convert the frame to a dict of dtype -> Constructor Types that each has
a homogeneous dtype.
.. deprecated:: 0.21.0
NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in
as_matrix)
Parameters
----------
copy : boolean, default True
Returns
-------
values : a dict of dtype -> Constructor Types
"""
warnings.warn("as_blocks is deprecated and will "
"be removed in a future version",
FutureWarning, stacklevel=2)
return self._to_dict_of_blocks(copy=copy)
@property
def blocks(self):
"""
Internal property, property synonym for as_blocks()
.. deprecated:: 0.21.0
"""
return self.as_blocks()
def _to_dict_of_blocks(self, copy=True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()}
@deprecate_kwarg(old_arg_name='raise_on_error', new_arg_name='errors',
mapping={True: 'raise', False: 'ignore'})
def astype(self, dtype, copy=True, errors='raise', **kwargs):
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True.
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'.
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
.. versionadded:: 0.20.0
raise_on_error : raise on invalid input
.. deprecated:: 0.20.0
Use ``errors`` instead
kwargs : keyword arguments to pass on to the constructor
Returns
-------
casted : type of caller
Examples
--------
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> ser.astype('category', ordered=True, categories=[2, 1])
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1,2])
>>> s2 = s1.astype('int', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
See also
--------
pandas.to_datetime : Convert argument to datetime.
pandas.to_timedelta : Convert argument to timedelta.
pandas.to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError('Only the Series name can be used for '
'the key in Series dtype mappings.')
new_type = dtype[self.name]
return self.astype(new_type, copy, errors, **kwargs)
elif self.ndim > 2:
raise NotImplementedError(
'astype() only accepts a dtype arg of type dict when '
'invoked on Series and DataFrames. A single dtype must be '
'specified when invoked on a Panel.'
)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
from pandas import concat
results = []
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype[col_name], copy=copy))
else:
results.append(results.append(col.copy() if copy else col))
return concat(results, axis=1, copy=False)
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,
**kwargs)
return self._constructor(new_data).__finalize__(self)
def copy(self, deep=True):
"""
Make a copy of this objects data.
Parameters
----------
deep : boolean or string, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices or the data are copied.
Note that when ``deep=True`` data is copied, actual python objects
will not be copied recursively, only the reference to the object.
This is in contrast to ``copy.deepcopy`` in the Standard Library,
which recursively copies object data.
Returns
-------
copy : type of caller
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self, deep=True):
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _convert(self, datetime=False, numeric=False, timedelta=False,
coerce=False, copy=True):
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : boolean, default False
If True, convert to date where possible.
numeric : boolean, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : boolean, default False
If True, convert to timedelta where possible.
coerce : boolean, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT)
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
return self._constructor(
self._data.convert(datetime=datetime, numeric=numeric,
timedelta=timedelta, coerce=coerce,
copy=copy)).__finalize__(self)
# TODO: Remove in 0.18 or 2017, which ever is sooner
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
"""
Deprecated.
Attempt to infer better dtype for object columns
Parameters
----------
convert_dates : boolean, default True
If True, convert to date where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
convert_numeric : boolean, default False
If True, attempt to coerce to numbers (including strings), with
unconvertible values becoming NaN.
convert_timedeltas : boolean, default True
If True, convert to timedelta where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
See Also
--------
pandas.to_datetime : Convert argument to datetime.
pandas.to_timedelta : Convert argument to timedelta.
pandas.to_numeric : Return a fixed frequency timedelta index,
with day as the default.
Returns
-------
converted : same as input object
"""
msg = ("convert_objects is deprecated. To re-infer data dtypes for "
"object columns, use {klass}.infer_objects()\nFor all "
"other conversions use the data-type specific converters "
"pd.to_datetime, pd.to_timedelta and pd.to_numeric."
).format(klass=self.__class__.__name__)
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._constructor(
self._data.convert(convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy)).__finalize__(self)
def infer_objects(self):
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
See Also
--------
pandas.to_datetime : Convert argument to datetime.
pandas.to_timedelta : Convert argument to timedelta.
pandas.to_numeric : Convert argument to numeric typeR
Returns
-------
converted : same type as input object
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(datetime=True, numeric=False,
timedelta=True, coerce=False,
copy=True)).__finalize__(self)
# ----------------------------------------------------------------------
# Filling NA's
_shared_docs['fillna'] = ("""
Fill NA/NaN values using the specified method
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). (values not
in the dict/Series/DataFrame will not be filled). This value cannot
be a list.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
axis : %(axes_single_arg)s
inplace : boolean, default False
If True, fill in place. Note: this will modify any
other views on this object, (e.g. a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
See Also
--------
reindex, asfreq
Returns
-------
filled : %(klass)s
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
""")
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None):
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
method = missing.clean_fill_method(method)
from pandas import DataFrame
if value is None:
if method is None:
raise ValueError('must specify a fill method or value')
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
# > 3d
if self.ndim > 3:
raise NotImplementedError('Cannot fillna with a method for > '
'3dims')
# 3d
elif self.ndim == 3:
# fill in 2d chunks
result = dict([(col, s.fillna(method=method, value=value))
for col, s in self.iteritems()])
new_obj = self._constructor.\
from_dict(result).__finalize__(self)
new_data = new_obj._data
else:
# 2d or less
method = missing.clean_fill_method(method)
new_data = self._data.interpolate(method=method, axis=axis,
limit=limit, inplace=inplace,
coerce=True,
downcast=downcast)
else:
if method is not None:
raise ValueError('cannot specify both a fill method and value')
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
from pandas import Series
value = Series(value)
elif not is_list_like(value):
pass
else:
raise ValueError("invalid fill value with a %s" %
type(value))
new_data = self._data.fillna(value=value, limit=limit,
inplace=inplace,
downcast=downcast)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError('Currently only can fill '
'with dict/Series column '
'by column')
result = self if inplace else self.copy()
for k, v in compat.iteritems(value):
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(value=value, limit=limit,
inplace=inplace,
downcast=downcast)
elif isinstance(value, DataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError("invalid fill value with a %s" % type(value))
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna(method='ffill') <DataFrame.fillna>`
"""
return self.fillna(method='ffill', axis=axis, inplace=inplace,
limit=limit, downcast=downcast)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna(method='bfill') <DataFrame.fillna>`
"""
return self.fillna(method='bfill', axis=axis, inplace=inplace,
limit=limit, downcast=downcast)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad', axis=None):
"""
Replace values given in 'to_replace' with 'value'.
Parameters
----------
to_replace : str, regex, list, dict, Series, numeric, or None
* str or regex:
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str and regex rules apply as above.
* dict:
- Nested dictionaries, e.g., {'a': {'b': nan}}, are read as
follows: look in column 'a' for the value 'b' and replace it
with nan. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
- Keys map to column names and values map to substitution
values. You can treat this as a special case of passing two
lists except that you are specifying the column to search in.
* None:
- This means that the ``regex`` argument must be a string,
compiled regular expression, or list, dict, ndarray or Series
of such elements. If `value` is also ``None`` then this
**must** be a nested dictionary or ``Series``.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to use to fill holes (e.g. 0), alternately a dict of values
specifying which value to use for each column (columns not in the
dict will not be filled). Regular expressions, strings and lists or
dicts of such objects are also allowed.
inplace : boolean, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Otherwise, `to_replace` must be ``None`` because this
parameter will be interpreted as a regular expression or a list,
dict, or array of regular expressions.
method : string, optional, {'pad', 'ffill', 'bfill'}
The method to use when for replacement, when ``to_replace`` is a
``list``.
See Also
--------
NDFrame.reindex
NDFrame.asfreq
NDFrame.fillna
Returns
-------
filled : NDFrame
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not ``None``.
TypeError
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable into a
regular expression or is a list, dict, ndarray, or Series.
ValueError
* If `to_replace` and `value` are ``list`` s or ``ndarray`` s, but
they are not the same length.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point numbers
*are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is "
"not a bool")
if axis is not None:
warnings.warn('the "axis" argument is deprecated '
'and will be removed in'
'v0.13; this argument has no effect')
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
return _single_replace(self, to_replace, method, inplace,
limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError('If "to_replace" and "value" are both None'
' and "to_replace" is not a list, then '
'regex must be a mapping')
to_replace = regex
regex = True
items = list(compat.iteritems(to_replace))
keys, values = lzip(*items) or ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError("If a nested mapping is passed, all values"
" of the top level mapping must be "
"mappings")
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = lzip(*v.items()) or ([], [])
if set(keys) & set(values):
raise ValueError("Replacement not allowed with "
"overlapping keys and values")
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(to_replace, value, inplace=inplace,
limit=limit, regex=regex)
else:
# need a non-zero len on all axes
for a in self._AXIS_ORDERS:
if not len(self._get_axis(a)):
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in compat.iteritems(to_replace):
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursivelly
res[c] = res[c].replace(to_replace=src,
value=value[c],
inplace=False,
regex=regex)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in compat.iteritems(to_replace)
if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert)
else:
raise TypeError('value argument must be scalar, dict, or '
'Series')
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError('Replacement lists must match '
'in length. Expecting %d got %d ' %
(len(to_replace), len(value)))
new_data = self._data.replace_list(src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex)
else: # [NA, ''] -> 0
new_data = self._data.replace(to_replace=to_replace,
value=value, inplace=inplace,
regex=regex)
elif to_replace is None:
if not (is_re_compilable(regex) or
is_list_like(regex) or is_dict_like(regex)):
raise TypeError("'regex' must be a string or a compiled "
"regular expression or a list or dict of "
"strings or regular expressions, you "
"passed a"
" {0!r}".format(type(regex).__name__))
return self.replace(regex, value, inplace=inplace, limit=limit,
regex=True)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in compat.iteritems(value):
if k in self:
new_data = new_data.replace(to_replace=to_replace,
value=v, filter=[k],
inplace=inplace,
regex=regex)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(to_replace=to_replace,
value=value, inplace=inplace,
regex=regex)
else:
msg = ('Invalid "to_replace" type: '
'{0!r}').format(type(to_replace).__name__)
raise TypeError(msg) # pragma: no cover
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs['interpolate'] = """
Please note that only ``method='linear'`` is supported for
DataFrames/Series with a MultiIndex.
Parameters
----------
method : {'linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline', 'piecewise_polynomial',
'from_derivatives', 'pchip', 'akima'}
* 'linear': ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
default
* 'time': interpolation works on daily and higher resolution
data to interpolate given length of interval
* 'index', 'values': use the actual numerical values of the index
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'polynomial' is passed to
``scipy.interpolate.interp1d``. Both 'polynomial' and 'spline'
require that you also specify an `order` (int),
e.g. df.interpolate(method='polynomial', order=4).
These use the actual numerical values of the index.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
are all wrappers around the scipy interpolation methods of
similar names. These use the actual numerical values of the
index. For more information on their behavior, see the
`scipy documentation
<http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `tutorial documentation
<http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__
* 'from_derivatives' refers to BPoly.from_derivatives which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18
.. versionadded:: 0.18.1
Added support for the 'akima' method
Added interpolate method 'from_derivatives' which replaces
'piecewise_polynomial' in scipy 0.18; backwards-compatible with
scipy < 0.18
axis : {0, 1}, default 0
* 0: fill column-by-column
* 1: fill row-by-row
limit : int, default None.
Maximum number of consecutive NaNs to fill. Must be greater than 0.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
.. versionadded:: 0.17.0
inplace : bool, default False
Update the NDFrame in place if possible.
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
kwargs : keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame of same shape interpolated at the NaNs
See Also
--------
reindex, replace, fillna
Examples
--------
Filling in NaNs
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s.interpolate()
0 0
1 1
2 2
3 3
dtype: float64
"""
@Appender(_shared_docs['interpolate'] % _shared_doc_kwargs)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', downcast=None, **kwargs):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if self.ndim > 2:
raise NotImplementedError("Interpolate has not been implemented "
"on Panel and Panel 4D objects.")
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
else:
_maybe_transposed_self = self
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if (isinstance(_maybe_transposed_self.index, MultiIndex) and
method != 'linear'):
raise ValueError("Only `method=linear` interpolation is supported "
"on MultiIndexes.")
if _maybe_transposed_self._data.get_dtype_counts().get(
'object') == len(_maybe_transposed_self.T):
raise TypeError("Cannot interpolate with all NaNs.")
# create/use the index
if method == 'linear':
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
if isna(index).any():
raise NotImplementedError("Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating.")
data = _maybe_transposed_self._data
new_data = data.interpolate(method=method, axis=ax, index=index,
values=_maybe_transposed_self, limit=limit,
limit_direction=limit_direction,
inplace=inplace, downcast=downcast,
**kwargs)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
The last row without any NaN is taken (or the last row without
NaN considering only the subset of columns in the case of a DataFrame)
.. versionadded:: 0.19.0 For DataFrame
If there is no good value, NaN is returned for a Series
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array of dates
subset : string or list of strings, default None
if not None use these columns for NaN propagation
Notes
-----
Dates are assumed to be sorted
Raises if this is not the case
Returns
-------
where is scalar
- value or NaN if input is Series
- Series if input is DataFrame
where is Index: same shape object as input
See Also
--------
merge_asof
"""
if isinstance(where, compat.string_types):
from pandas import to_datetime
where = to_datetime(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
elif self.ndim > 2:
raise NotImplementedError("asof is not implemented "
"for {type}".format(type=type(self)))
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq).ordinal
start = start.ordinal
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side='right')
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs, is_copy=False)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs['isna'] = """
Return a boolean same-sized object indicating if the values are NA.
See Also
--------
%(klass)s.notna : boolean inverse of isna
%(klass)s.isnull : alias of isna
isna : top-level isna
"""
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return isna(self).__finalize__(self)
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return isna(self).__finalize__(self)
_shared_docs['notna'] = """
Return a boolean same-sized object indicating if the values are
not NA.
See Also
--------
%(klass)s.isna : boolean inverse of notna
%(klass)s.notnull : alias of notna
notna : top-level notna
"""
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return notna(self).__finalize__(self)
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace=False):
if ((lower is not None and np.any(isna(lower))) or
(upper is not None and np.any(isna(upper)))):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self.values
mask = isna(result)
with np.errstate(all='ignore'):
if upper is not None:
result = np.where(result >= upper, upper, result)
if lower is not None:
result = np.where(result <= lower, lower, result)
if np.any(mask):
result[mask] = np.nan
axes_dict = self._construct_axes_dict()
result = self._constructor(result, **axes_dict).__finalize__(self)
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
inplace = validate_bool_kwarg(inplace, 'inplace')
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == 'le':
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = pd.Series(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, np.asarray(threshold),
axis)
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(self, lower=None, upper=None, axis=None, inplace=False,
*args, **kwargs):
"""
Trim values at input threshold(s).
Parameters
----------
lower : float or array_like, default None
upper : float or array_like, default None
axis : int or string axis name, optional
Align object with lower and upper along the given axis.
inplace : boolean, default False
Whether to perform the operation in place on the data
.. versionadded:: 0.21.0
Returns
-------
clipped : Series
Examples
--------
>>> df
0 1
0 0.335232 -1.256177
1 -1.367855 0.746646
2 0.027753 -1.176076
3 0.230930 -0.679613
4 1.261967 0.570967
>>> df.clip(-1.0, 0.5)
0 1
0 0.335232 -1.000000
1 -1.000000 0.500000
2 0.027753 -1.000000
3 0.230930 -0.679613
4 0.500000 0.500000
>>> t
0 -0.3
1 -0.2
2 -0.1
3 0.0
4 0.1
dtype: float64
>>> df.clip(t, t + 1, axis=0)
0 1
0 0.335232 -0.300000
1 -0.200000 0.746646
2 0.027753 -0.100000
3 0.230930 0.000000
4 1.100000 0.570967
"""
if isinstance(self, ABCPanel):
raise NotImplementedError("clip is not supported yet for panels")
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = nv.validate_clip_with_axis(axis, args, kwargs)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
if np.any(pd.isnull(lower)):
lower = None
if np.any(pd.isnull(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if ((lower is None or (is_scalar(lower) and is_number(lower))) and
(upper is None or (is_scalar(upper) and is_number(upper)))):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result.clip_lower(lower, axis, inplace=inplace)
if upper is not None:
if inplace:
result = self
result = result.clip_upper(upper, axis, inplace=inplace)
return result
def clip_upper(self, threshold, axis=None, inplace=False):
"""
Return copy of input with values above given value(s) truncated.
Parameters
----------
threshold : float or array_like
axis : int or string axis name, optional
Align object with threshold along the given axis.
inplace : boolean, default False
Whether to perform the operation in place on the data
.. versionadded:: 0.21.0
See Also
--------
clip
Returns
-------
clipped : same type as input
"""
return self._clip_with_one_bound(threshold, method=self.le,
axis=axis, inplace=inplace)
def clip_lower(self, threshold, axis=None, inplace=False):
"""
Return copy of the input with values below given value(s) truncated.
Parameters
----------
threshold : float or array_like
axis : int or string axis name, optional
Align object with threshold along the given axis.
inplace : boolean, default False
Whether to perform the operation in place on the data
.. versionadded:: 0.21.0
See Also
--------
clip
Returns
-------
clipped : same type as input
"""
return self._clip_with_one_bound(threshold, method=self.ge,
axis=axis, inplace=inplace)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, **kwargs):
"""
Group series using mapper (dict or key function, apply given function
to group, return result as series) or by a series of columns.
Parameters
----------
by : mapping, function, str, or iterable
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A str or list of strs
may be passed to group by the columns in ``self``
axis : int, default 0
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels
as_index : boolean, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output
sort : boolean, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. groupby preserves the order of rows within each group.
group_keys : boolean, default True
When calling apply, add group keys to index to identify pieces
squeeze : boolean, default False
reduce the dimensionality of the return type if possible,
otherwise return a consistent type
Examples
--------
DataFrame results
>>> data.groupby(func, axis=0).mean()
>>> data.groupby(['col1', 'col2'])['col3'].mean()
DataFrame with hierarchical index
>>> data.groupby(['col1', 'col2']).mean()
Returns
-------
GroupBy object
"""
from pandas.core.groupby import groupby
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return groupby(self, by=by, axis=axis, level=level, as_index=as_index,
sort=sort, group_keys=group_keys, squeeze=squeeze,
**kwargs)
def asfreq(self, freq, method=None, how=None, normalize=False,
fill_value=None):
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset object, or string
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill
how : {'start', 'end'}, default end
For PeriodIndex only, see PeriodIndex.asfreq
normalize : bool, default False
Whether to reset output index to midnight
fill_value: scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
.. versionadded:: 0.20.0
Returns
-------
converted : type of caller
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
from pandas.core.resample import asfreq
return asfreq(self, freq, method=method, how=how, normalize=normalize,
fill_value=fill_value)
def at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM).
Parameters
----------
time : datetime.time or string
Returns
-------
values_at_time : type of caller
"""
try:
indexer = self.index.indexer_at_time(time, asof=asof)
return self._take(indexer, convert=False)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
def between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
Parameters
----------
start_time : datetime.time or string
end_time : datetime.time or string
include_start : boolean, default True
include_end : boolean, default True
Returns
-------
values_between_time : type of caller
"""
try:
indexer = self.index.indexer_between_time(
start_time, end_time, include_start=include_start,
include_end=include_end)
return self._take(indexer, convert=False)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
label=None, convention='start', kind=None, loffset=None,
limit=None, base=0, on=None, level=None):
"""
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (DatetimeIndex,
PeriodIndex, or TimedeltaIndex), or pass datetime-like values
to the on or level keyword.
Parameters
----------
rule : string
the offset string or object representing target conversion
axis : int, optional, default 0
closed : {'right', 'left'}
Which side of bin interval is closed
label : {'right', 'left'}
Which bin edge label to label bucket with
convention : {'start', 'end', 's', 'e'}
For PeriodIndex only, controls whether to use the start or end of
`rule`
loffset : timedelta
Adjust the resampled time labels
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0
on : string, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
.. versionadded:: 0.19.0
level : string or int, optional
For a MultiIndex, level (name or number) to use for
resampling. Level must be datetime-like.
.. versionadded:: 0.19.0
Notes
-----
To learn more about the offset strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] #select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like)+5
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
freq='A',
periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
Resample by month using 'start' `convention`. Values are assigned to
the first month of the period.
>>> s.resample('M', convention='start').asfreq().head()
2012-01 1.0
2012-02 NaN
2012-03 NaN
2012-04 NaN
2012-05 NaN
Freq: M, dtype: float64
Resample by month using 'end' `convention`. Values are assigned to
the last month of the period.
>>> s.resample('M', convention='end').asfreq()
2012-12 1.0
2013-01 NaN
2013-02 NaN
2013-03 NaN
2013-04 NaN
2013-05 NaN
2013-06 NaN
2013-07 NaN
2013-08 NaN
2013-09 NaN
2013-10 NaN
2013-11 NaN
2013-12 2.0
Freq: M, dtype: float64
For DataFrame objects, the keyword ``on`` can be used to specify the
column instead of the index for resampling.
>>> df = pd.DataFrame(data=9*[range(4)], columns=['a', 'b', 'c', 'd'])
>>> df['time'] = pd.date_range('1/1/2000', periods=9, freq='T')
>>> df.resample('3T', on='time').sum()
a b c d
time
2000-01-01 00:00:00 0 3 6 9
2000-01-01 00:03:00 0 3 6 9
2000-01-01 00:06:00 0 3 6 9
For a DataFrame with MultiIndex, the keyword ``level`` can be used to
specify on level the resampling needs to take place.
>>> time = pd.date_range('1/1/2000', periods=5, freq='T')
>>> df2 = pd.DataFrame(data=10*[range(4)],
columns=['a', 'b', 'c', 'd'],
index=pd.MultiIndex.from_product([time, [1, 2]])
)
>>> df2.resample('3T', level=0).sum()
a b c d
2000-01-01 00:00:00 0 6 12 18
2000-01-01 00:03:00 0 4 8 12
"""
from pandas.core.resample import (resample,
_maybe_process_deprecations)
axis = self._get_axis_number(axis)
r = resample(self, freq=rule, label=label, closed=closed,
axis=axis, kind=kind, loffset=loffset,
convention=convention,
base=base, key=on, level=level)
return _maybe_process_deprecations(r,
how=how,
fill_method=fill_method,
limit=limit)
def first(self, offset):
"""
Convenience method for subsetting initial periods of time series data
based on a date offset.
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Examples
--------
ts.first('10D') -> First 10 days
Returns
-------
subset : type of caller
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
raise NotImplementedError("'first' only supports a DatetimeIndex "
"index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.isAnchored() and hasattr(offset, '_inc'):
if end_date in self.index:
end = self.index.searchsorted(end_date, side='left')
return self.iloc[:end]
return self.loc[:end]
def last(self, offset):
"""
Convenience method for subsetting final periods of time series data
based on a date offset.
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Examples
--------
ts.last('5M') -> Last 5 months
Returns
-------
subset : type of caller
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
raise NotImplementedError("'last' only supports a DatetimeIndex "
"index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side='right')
return self.iloc[start:]
def rank(self, axis=0, method='average', numeric_only=None,
na_option='keep', ascending=True, pct=False):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
index to direct ranking
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
numeric_only : boolean, default None
Include only float, int, boolean data. Valid only for DataFrame or
Panel objects
na_option : {'keep', 'top', 'bottom'}
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
ascending : boolean, default True
False for ranks by high (1) to low (N)
pct : boolean, default False
Computes percentage rank of data
Returns
-------
ranks : same type as caller
"""
axis = self._get_axis_number(axis)
if self.ndim > 2:
msg = "rank does not make sense when ndim > 2"
raise NotImplementedError(msg)
def ranker(data):
ranks = algos.rank(data.values, axis=axis, method=method,
ascending=ascending, na_option=na_option,
pct=pct)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs['align'] = ("""
Align two objects on their axes with the
specified join method for each axis Index
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None)
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
copy : boolean, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value
method : str, default None
limit : int, default None
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions
.. versionadded:: 0.17.0
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects
""")
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
from pandas import DataFrame, Series
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, Series):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(dict((c, self) for c in other.columns),
**other._construct_axes_dict())
return df._align_frame(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value, method=method,
limit=limit, fill_axis=fill_axis)
elif isinstance(other, Series):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(dict((c, other) for c in self.columns),
**self._construct_axes_dict())
return self._align_frame(df, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, DataFrame):
return self._align_frame(other, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
elif isinstance(other, Series):
return self._align_series(other, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
def _align_frame(self, other, join='outer', axis=None, level=None,
copy=True, fill_value=np.nan, method=None, limit=None,
fill_axis=0):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(reindexers, copy=copy,
fill_value=fill_value,
allow_dups=True)
# other must be always DataFrame
right = other._reindex_with_indexers({0: [join_index, iridx],
1: [join_columns, cridx]},
copy=copy, fill_value=fill_value,
allow_dups=True)
if method is not None:
left = left.fillna(axis=fill_axis, method=method, limit=limit)
right = right.fillna(axis=fill_axis, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(self, other, join='outer', axis=None, level=None,
copy=True, fill_value=None, method=None, limit=None,
fill_axis=0):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError('cannot align series to a series other than '
'axis 0')
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(other.index, how=join,
level=level,
return_indexers=True)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level,
return_indexers=True)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level,
return_indexers=True)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError('Must specify axis=0 or 1')
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit,
axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
# align the cond to same shape as myself
cond = com._apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join='right', broadcast_axis=1)
else:
if not hasattr(cond, 'shape'):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError('Array conditional must be same shape as '
'self')
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = True if inplace else False
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, pd.DataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
else:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = cond.astype(bool, copy=False)
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, 'align'):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(other, join='left', axis=axis,
level=level, fill_value=np.nan)
# if we are NOT aligned, raise as we cannot where index
if (axis is None and
not all([other._get_axis(i).equals(ax)
for i, ax in enumerate(self.axes)])):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError("cannot align with a higher "
"dimensional NDFrame")
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
try:
new_other = _values_from_object(self).copy()
new_other[icond] = other
other = new_other
except:
try_quick = False
# let's create a new (if we failed at the above
# or not try_quick
if not try_quick:
dtype, fill_value = maybe_promote(other.dtype)
new_other = np.empty(len(icond), dtype=dtype)
new_other.fill(fill_value)
maybe_upcast_putmask(new_other, icond, other)
other = new_other
else:
raise ValueError('Length of replacements must equal '
'series length')
else:
raise ValueError('other must be the same shape as self '
'when an ndarray')
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, 'ndim', 0):
align = True
else:
align = (self._get_axis_number(axis) == 1)
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(mask=cond, new=other, align=align,
inplace=True, axis=block_axis,
transpose=self._AXIS_REVERSED)
self._update_inplace(new_data)
else:
new_data = self._data.where(other=other, cond=cond, align=align,
errors=errors,
try_cast=try_cast, axis=block_axis,
transpose=self._AXIS_REVERSED)
return self._constructor(new_data).__finalize__(self)
_shared_docs['where'] = ("""
Return an object of same shape as self and whose corresponding
entries are from self where `cond` is %(cond)s and otherwise are from
`other`.
Parameters
----------
cond : boolean %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
.. versionadded:: 0.18.1
A callable can be used as cond.
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
.. versionadded:: 0.18.1
A callable can be used as other.
inplace : boolean, default False
Whether to perform the operation in place on the data
axis : alignment axis if needed, default None
level : alignment level if needed, default None
errors : str, {'raise', 'ignore'}, default 'raise'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
try_cast : boolean, default False
try to cast the result back to the input type (if possible),
raise_on_error : boolean, default True
Whether to raise on invalid data types (e.g. trying to where on
strings)
.. deprecated:: 0.21.0
Returns
-------
wh : same type as caller
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
>>> s.where(s > 1, 10)
0 10.0
1 10.0
2 2.0
3 3.0
4 4.0
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
See Also
--------
:func:`DataFrame.%(name_other)s`
""")
@Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="True",
cond_rev="False", name='where',
name_other='mask'))
def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
if raise_on_error is not None:
warnings.warn(
"raise_on_error is deprecated in "
"favor of errors='raise|ignore'",
FutureWarning, stacklevel=2)
if raise_on_error:
errors = 'raise'
else:
errors = 'ignore'
other = com._apply_if_callable(other, self)
return self._where(cond, other, inplace, axis, level,
errors=errors, try_cast=try_cast)
@Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="False",
cond_rev="True", name='mask',
name_other='where'))
def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
if raise_on_error is not None:
warnings.warn(
"raise_on_error is deprecated in "
"favor of errors='raise|ignore'",
FutureWarning, stacklevel=2)
if raise_on_error:
errors = 'raise'
else:
errors = 'ignore'
inplace = validate_bool_kwarg(inplace, 'inplace')
cond = com._apply_if_callable(cond, self)
return self.where(~cond, other=other, inplace=inplace, axis=axis,
level=level, try_cast=try_cast,
errors=errors)
_shared_docs['shift'] = ("""
Shift index by desired number of periods with an optional time freq
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
Increment to use from the tseries module or time rule (e.g. 'EOM').
See Notes.
axis : %(axes_single_arg)s
Notes
-----
If freq is specified then the index values are shifted but the data
is not realigned. That is, use freq if you would like to extend the
index when shifting and preserve the original data.
Returns
-------
shifted : %(klass)s
""")
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
if periods == 0:
return self
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(periods=periods, axis=block_axis)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self, periods=1, axis=0):
"""
Equivalent to `shift` without copying data. The shifted data will
not include the dropped periods and the shifted axis will be smaller
than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
Returns
-------
shifted : same type as caller
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(self, periods=1, freq=None, axis=0):
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, default None
Increment to use from the tseries module or time rule (e.g. 'EOM')
axis : int or basestring
Corresponds to the axis that contains the Index
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
Returns
-------
shifted : NDFrame
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, 'freq', None)
if freq is None:
freq = getattr(index, 'inferred_freq', None)
if freq is None:
msg = 'Freq was not given and was not set in the index'
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, string_types):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
else:
msg = ('Given freq %s does not match PeriodIndex freq %s' %
(freq.rule_code, orig_freq.rule_code))
raise ValueError(msg)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(self, before=None, after=None, axis=None, copy=True):
"""Truncates a sorted NDFrame before and/or after some particular
index value. If the axis contains only datetime values, before/after
parameters are converted to datetime values.
Parameters
----------
before : date
Truncate before index value
after : date
Truncate after index value
axis : the truncation axis, defaults to the stat axis
copy : boolean, default is True,
return a copy of the truncated section
Returns
-------
truncated : type of caller
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError('Truncate: %s must be after %s' %
(after, before))
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis),
ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(self, tz, axis=0, level=None, copy=True):
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : string or pytz.timezone object
axis : the axis to convert
level : int, str, default None
If axis ia a MultiIndex, convert a specific level. Otherwise
must be None
copy : boolean, default True
Also make a copy of the underlying data
Returns
-------
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, 'tz_convert'):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError('%s is not a valid DatetimeIndex or '
'PeriodIndex' % ax_name)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result.set_axis(ax, axis=axis, inplace=True)
return result.__finalize__(self)
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer',
False: 'raise'})
def tz_localize(self, tz, axis=0, level=None, copy=True,
ambiguous='raise'):
"""
Localize tz-naive TimeSeries to target time zone.
Parameters
----------
tz : string or pytz.timezone object
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None
copy : boolean, default True
Also make a copy of the underlying data
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
infer_dst : boolean, default False
.. deprecated:: 0.15.0
Attempt to infer fall dst-transition hours based on order
Returns
-------
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous):
if not hasattr(ax, 'tz_localize'):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError('%s is not a valid DatetimeIndex or '
'PeriodIndex' % ax_name)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_localize(ax, tz, ambiguous)
result = self._constructor(self._data, copy=copy)
result.set_axis(ax, axis=axis, inplace=True)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self):
"""
Return an object with absolute value taken--only applicable to objects
that are all numeric.
Returns
-------
abs: type of caller
"""
return np.abs(self)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
summary: Series/DataFrame of summary statistics
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({ 'object': ['a', 'b', 'c'],
... 'numeric': [1, 2, 3],
... 'categorical': pd.Categorical(['d','e','f'])
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
See Also
--------
DataFrame.count
DataFrame.max
DataFrame.min
DataFrame.mean
DataFrame.std
DataFrame.select_dtypes
"""
if self.ndim >= 3:
msg = "describe is not implemented on Panel or PanelND objects."
raise NotImplementedError(msg)
elif self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
self._check_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (['count', 'mean', 'std', 'min'] +
formatted_percentiles + ['max'])
d = ([series.count(), series.mean(), series.std(), series.min()] +
[series.quantile(x) for x in percentiles] + [series.max()])
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ['count', 'unique']
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
if is_datetime64_dtype(data):
asint = data.dropna().values.view('i8')
names += ['top', 'freq', 'first', 'last']
result += [lib.Timestamp(top), freq,
lib.Timestamp(asint.min()),
lib.Timestamp(asint.max())]
else:
names += ['top', 'freq']
result += [top, freq]
return pd.Series(result, index=names, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == 'all':
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.iteritems()]
# set a convenient order for rows
names = []
ldesc_indexes = sorted([x.index for x in ldesc], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
d.columns = data.columns.copy()
return d
def _check_percentile(self, q):
"""Validate percentiles (used by describe and quantile)."""
msg = ("percentiles should all be in the interval [0, 1]. "
"Try {0} instead.")
q = np.asarray(q)
if q.ndim == 0:
if not 0 <= q <= 1:
raise ValueError(msg.format(q / 100.0))
else:
if not all(0 <= qs <= 1 for qs in q):
raise ValueError(msg.format(q / 100.0))
return q
_shared_docs['pct_change'] = """
Percent change over given number of periods.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change
fill_method : str, default 'pad'
How to handle NAs before computing percent changes
limit : int, default None
The number of consecutive NAs to fill before stopping
freq : DateOffset, timedelta, or offset alias string, optional
Increment to use from time series API (e.g. 'M' or BDay())
Returns
-------
chg : %(klass)s
Notes
-----
By default, the percentage change is calculated along the stat
axis: 0, or ``Index``, for ``DataFrame`` and 1, or ``minor`` for
``Panel``. You can change this with the ``axis`` keyword argument.
"""
@Appender(_shared_docs['pct_change'] % _shared_doc_kwargs)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
**kwargs):
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop('axis', self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self.fillna(method=fill_method, limit=limit, axis=axis)
rs = (data.div(data.shift(periods=periods, freq=freq, axis=axis,
**kwargs)) - 1)
if freq is None:
mask = isna(_values_from_object(self))
np.putmask(rs.values, mask, np.nan)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""Add the operations to the cls; evaluate the doc strings again"""
axis_descr, name, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls, 'any', name, name2, axis_descr,
'Return whether any element is True over requested axis',
nanops.nanany)
cls.all = _make_logical_function(
cls, 'all', name, name2, axis_descr,
'Return whether all elements are True over requested axis',
nanops.nanall)
@Substitution(outname='mad',
desc="Return the mean absolute deviation of the values "
"for the requested axis",
name1=name, name2=name2, axis_descr=axis_descr)
@Appender(_num_doc)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level('mad', axis=axis, level=level,
skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls, 'sem', name, name2, axis_descr,
"Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
nanops.nansem)
cls.var = _make_stat_function_ddof(
cls, 'var', name, name2, axis_descr,
"Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
nanops.nanvar)
cls.std = _make_stat_function_ddof(
cls, 'std', name, name2, axis_descr,
"Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
nanops.nanstd)
@Substitution(outname='compounded',
desc="Return the compound percentage of the values for "
"the requested axis", name1=name, name2=name2,
axis_descr=axis_descr)
@Appender(_num_doc)
def compound(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
return (1 + self).prod(axis=axis, skipna=skipna, level=level) - 1
cls.compound = compound
cls.cummin = _make_cum_function(
cls, 'cummin', name, name2, axis_descr, "cumulative minimum",
lambda y, axis: np.minimum.accumulate(y, axis), "min",
np.inf, np.nan)
cls.cumsum = _make_cum_function(
cls, 'cumsum', name, name2, axis_descr, "cumulative sum",
lambda y, axis: y.cumsum(axis), "sum", 0., np.nan)
cls.cumprod = _make_cum_function(
cls, 'cumprod', name, name2, axis_descr, "cumulative product",
lambda y, axis: y.cumprod(axis), "prod", 1., np.nan)
cls.cummax = _make_cum_function(
cls, 'cummax', name, name2, axis_descr, "cumulative max",
lambda y, axis: np.maximum.accumulate(y, axis), "max",
-np.inf, np.nan)
cls.sum = _make_stat_function(
cls, 'sum', name, name2, axis_descr,
'Return the sum of the values for the requested axis',
nanops.nansum)
cls.mean = _make_stat_function(
cls, 'mean', name, name2, axis_descr,
'Return the mean of the values for the requested axis',
nanops.nanmean)
cls.skew = _make_stat_function(
cls, 'skew', name, name2, axis_descr,
'Return unbiased skew over requested axis\nNormalized by N-1',
nanops.nanskew)
cls.kurt = _make_stat_function(
cls, 'kurt', name, name2, axis_descr,
"Return unbiased kurtosis over requested axis using Fisher's "
"definition of\nkurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1\n",
nanops.nankurt)
cls.kurtosis = cls.kurt
cls.prod = _make_stat_function(
cls, 'prod', name, name2, axis_descr,
'Return the product of the values for the requested axis',
nanops.nanprod)
cls.product = cls.prod
cls.median = _make_stat_function(
cls, 'median', name, name2, axis_descr,
'Return the median of the values for the requested axis',
nanops.nanmedian)
cls.max = _make_stat_function(
cls, 'max', name, name2, axis_descr,
"""This method returns the maximum of the values in the object.
If you want the *index* of the maximum, use ``idxmax``. This is
the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
nanops.nanmax)
cls.min = _make_stat_function(
cls, 'min', name, name2, axis_descr,
"""This method returns the minimum of the values in the object.
If you want the *index* of the minimum, use ``idxmin``. This is
the equivalent of the ``numpy.ndarray`` method ``argmin``.""",
nanops.nanmin)
@classmethod
def _add_series_only_operations(cls):
"""Add the series only operations to the cls; evaluate the doc
strings again.
"""
axis_descr, name, name2 = _doc_parms(cls)
def nanptp(values, axis=0, skipna=True):
nmax = nanops.nanmax(values, axis, skipna)
nmin = nanops.nanmin(values, axis, skipna)
return nmax - nmin
cls.ptp = _make_stat_function(
cls, 'ptp', name, name2, axis_descr,
"""Returns the difference between the maximum value and the
minimum value in the object. This is the equivalent of the
``numpy.ndarray`` method ``ptp``.""",
nanptp)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core import window as rwindow
@Appender(rwindow.rolling.__doc__)
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, on=None, axis=0, closed=None):
axis = self._get_axis_number(axis)
return rwindow.rolling(self, window=window,
min_periods=min_periods, freq=freq,
center=center, win_type=win_type,
on=on, axis=axis, closed=closed)
cls.rolling = rolling
@Appender(rwindow.expanding.__doc__)
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
axis = self._get_axis_number(axis)
return rwindow.expanding(self, min_periods=min_periods, freq=freq,
center=center, axis=axis)
cls.expanding = expanding
@Appender(rwindow.ewm.__doc__)
def ewm(self, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False,
axis=0):
axis = self._get_axis_number(axis)
return rwindow.ewm(self, com=com, span=span, halflife=halflife,
alpha=alpha, min_periods=min_periods, freq=freq,
adjust=adjust, ignore_na=ignore_na, axis=axis)
cls.ewm = ewm
@Appender(_shared_docs['transform'] % _shared_doc_kwargs)
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce "
"aggregated results")
return result
cls.transform = transform
# ----------------------------------------------------------------------
# Misc methods
_shared_docs['valid_index'] = """
Return index for %(position)s non-NA/null value.
Notes
--------
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
Returns
--------
scalar : type of index
"""
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = "{%s}" % ', '.join(["{0} ({1})".format(a, i)
for i, a in enumerate(cls._AXIS_ORDERS)])
name = (cls._constructor_sliced.__name__
if cls._AXIS_LEN > 1 else 'scalar')
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA or empty, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
numeric_only : boolean, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(outname)s : %(name1)s or %(name2)s (if level specified)\n"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
ddof : int, default 1
degrees of freedom
numeric_only : boolean, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(outname)s : %(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
bool_only : boolean, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
Returns
-------
%(outname)s : %(name1)s or %(name2)s (if level specified)\n"""
_cnum_doc = """
Parameters
----------
axis : %(axis_descr)s
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
%(outname)s : %(name1)s\n
See also
--------
pandas.core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
"""
def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr)
@Appender(_num_doc)
def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna)
return self._reduce(f, name, axis=axis, skipna=skipna,
numeric_only=numeric_only)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna, ddof=ddof)
return self._reduce(f, name, axis=axis, numeric_only=numeric_only,
skipna=skipna, ddof=ddof)
return set_function_name(stat_func, name, cls)
def _make_cum_function(cls, name, name1, name2, axis_descr, desc,
accum_func, accum_func_name, mask_a, mask_b):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr, accum_func_name=accum_func_name)
@Appender("Return {0} over requested axis.".format(desc) +
_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
y = _values_from_object(self).copy()
if (skipna and
issubclass(y.dtype.type, (np.datetime64, np.timedelta64))):
result = accum_func(y, axis)
mask = isna(self)
np.putmask(result, mask, tslib.iNaT)
elif skipna and not issubclass(y.dtype.type, (np.integer, np.bool_)):
mask = isna(self)
np.putmask(y, mask, mask_a)
result = accum_func(y, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(y, axis)
d = self._construct_axes_dict()
d['copy'] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr)
@Appender(_bool_doc)
def logical_func(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
if bool_only is not None:
raise NotImplementedError("Option bool_only is not "
"implemented with option level.")
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna)
return self._reduce(f, axis=axis, skipna=skipna,
numeric_only=bool_only, filter_type='bool',
name=name)
return set_function_name(logical_func, name, cls)
# install the indexes
for _name, _indexer in indexing.get_indexers_list():
NDFrame._create_indexer(_name, _indexer)
| bsd-3-clause |
rodluger/planetplanet | scripts/timing.py | 1 | 4464 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
timing.py |github|
------------------
Timing tests for :py:obj:`planetplanet`. Here we compare the time it takes
to compute a transit light curve among :py:obj:`planetplanet`,
:py:obj:`batman`, and :py:obj:`pysyzygy`. :py:obj:`planetplanet` is currently
about 10 times slower than the other two for transits. I'm working on speeding
it up, but the main source of the speed difference is the fact that
:py:obj:`planetplanet` computes wavelength-dependent light curves, which means
the total stellar flux is not unity (as it is in the other two codes); instead,
we need to evaluate the Planck function repeatedly, even when we set the
spectral resolution `R = 1`. It's fairly straightforward to optimize
this, so stay tuned for updates!
.. plot::
:align: center
from scripts import timing
timing._test()
.. role:: raw-html(raw)
:format: html
.. |github| replace:: :raw-html:`<a href = "https://github.com/rodluger/planetplanet/blob/master/scripts/timing.py"><i class="fa fa-github" aria-hidden="true"></i></a>`
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
from planetplanet.constants import *
from planetplanet import Star, Planet, System
import matplotlib.pyplot as pl
import numpy as np
try:
import batman
except:
batman = None
try:
import pysyzygy as ps
except:
ps = None
from tqdm import tqdm
import timeit
try:
import builtins
except:
import __builtin__ as builtins
# System params
mstar = 1.
rstar = 1.
limbdark = [0.4, 0.26]
per = 5.
inc = 90.
r = 10.
t0 = 0.
w = 60.
ecc = 0.3
def _test():
'''
'''
plot()
pl.show()
def run_pp(N = 1000):
'''
'''
# planetplanet
time = np.linspace(-0.1, 0.1, N)
star = Star('A', m = mstar, r = rstar, nz = 11, limbdark = limbdark)
b = Planet('b', m = 0., per = per, inc = inc, r = r, t0 = t0,
nz = 1, Omega = 0., w = w, ecc = ecc, phasecurve = False)
system = System(star, b, R = 1, lambda1 = 5, lambda2 = 6, quiet = True,
batmanopt = True, circleopt = True, nbody = False)
system.compute(time)
flux_pp = system.A.flux[:,0]
flux_pp /= flux_pp[0]
return flux_pp
def run_bm(N = 1000):
'''
'''
# batman
time = np.linspace(-0.2, 0.2, N)
params = batman.TransitParams()
params.t0 = t0
params.per = per
params.inc = inc
params.ecc = ecc
params.w = w - 180.
params.limb_dark = "quadratic"
params.u = limbdark
params.rp = r / (rstar * RSUNREARTH)
params.a = ((per) ** 2 * GEARTH * (mstar * MSUNMEARTH) /
(4 * np.pi ** 2)) ** (1. / 3.) / (rstar * RSUNREARTH)
m = batman.TransitModel(params, time)
flux_bm = m.light_curve(params)
return flux_bm
def run_ps(N = 1000):
'''
'''
# pysyzygy
time = np.linspace(-0.2, 0.2, N)
a = ((per) ** 2 * GEARTH * (mstar * MSUNMEARTH) /
(4 * np.pi ** 2)) ** (1. / 3.) / (rstar * RSUNREARTH)
trn = ps.Transit(t0 = t0, per = per, RpRs = r / (rstar * RSUNREARTH),
ecc = ecc, w = w * np.pi / 180 + np.pi, u1 = limbdark[0],
u2 = limbdark[1], bcirc = a * np.cos(inc * np.pi / 180),
aRs = a)
flux_ps = trn(time, 'unbinned')
return flux_ps
def plot():
'''
'''
# Register the functions
builtins.__dict__.update(globals())
# Loop over various dataset sizes
Narr = np.logspace(0, 5, 5)
tpp = np.zeros_like(Narr)
tbm = np.zeros_like(Narr)
tps = np.zeros_like(Narr)
for i, N in enumerate(Narr):
tpp[i] = timeit.timeit('run_pp(%d)' % N, number = 10) / 10.
if batman is not None:
tbm[i] = timeit.timeit('run_bm(%d)' % N, number = 10) / 10.
if ps is not None:
tps[i] = timeit.timeit('run_ps(%d)' % N, number = 10) / 10.
pl.plot(Narr, tpp, '-o', label = 'planetplanet')
if batman is not None:
pl.plot(Narr, tbm, '-o', label = 'batman')
if ps is not None:
pl.plot(Narr, tps, '-o', label = 'pysyzygy')
pl.legend()
pl.yscale('log')
pl.xscale('log')
pl.ylabel('Time [seconds]', fontweight = 'bold')
pl.xlabel('Number of datapoints', fontweight = 'bold')
if __name__ == '__main__':
plot()
pl.show() | gpl-3.0 |
MJuddBooth/pandas | pandas/core/dtypes/cast.py | 1 | 43455 | """ routings for casting """
from datetime import datetime, timedelta
import numpy as np
from pandas._libs import lib, tslib, tslibs
from pandas._libs.tslibs import NaT, OutOfBoundsDatetime, Period, iNaT
from pandas.compat import PY3, string_types, text_type, to_str
from .common import (
_INT64_DTYPE, _NS_DTYPE, _POSSIBLY_CAST_DTYPES, _TD_DTYPE, ensure_int8,
ensure_int16, ensure_int32, ensure_int64, ensure_object, is_bool,
is_bool_dtype, is_categorical_dtype, is_complex, is_complex_dtype,
is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype, is_datetimelike, is_dtype_equal,
is_extension_array_dtype, is_extension_type, is_float, is_float_dtype,
is_integer, is_integer_dtype, is_object_dtype, is_scalar, is_string_dtype,
is_timedelta64_dtype, is_timedelta64_ns_dtype, is_unsigned_integer_dtype,
pandas_dtype)
from .dtypes import (
DatetimeTZDtype, ExtensionDtype, PandasExtensionDtype, PeriodDtype)
from .generic import (
ABCDatetimeArray, ABCDatetimeIndex, ABCPeriodArray, ABCPeriodIndex,
ABCSeries)
from .inference import is_list_like
from .missing import isna, notna
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple)):
values = construct_1d_object_array_from_listlike(list(values))
if getattr(values, 'dtype', None) == np.object_:
if hasattr(values, '_values'):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj):
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj):
if any(isinstance(v, ABCSeries) for v in obj.values):
return True
return False
def maybe_downcast_to_dtype(result, dtype):
""" try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
if is_scalar(result):
return result
def trans(x):
return x
if isinstance(dtype, string_types):
if dtype == 'infer':
inferred_type = lib.infer_dtype(ensure_object(result.ravel()),
skipna=False)
if inferred_type == 'boolean':
dtype = 'bool'
elif inferred_type == 'integer':
dtype = 'int64'
elif inferred_type == 'datetime64':
dtype = 'datetime64[ns]'
elif inferred_type == 'timedelta64':
dtype = 'timedelta64[ns]'
# try to upcast here
elif inferred_type == 'floating':
dtype = 'int64'
if issubclass(result.dtype.type, np.number):
def trans(x): # noqa
return x.round()
else:
dtype = 'object'
if isinstance(dtype, string_types):
dtype = np.dtype(dtype)
try:
# don't allow upcasts here (except if empty)
if dtype.kind == result.dtype.kind:
if (result.dtype.itemsize <= dtype.itemsize and
np.prod(result.shape)):
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
# if we don't have any elements, just astype it
if not np.prod(result.shape):
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
# if we have any nulls, then we are done
if (isna(arr).any() or
not np.allclose(arr, trans(arr).astype(dtype), rtol=0)):
return result
# a comparable, e.g. a Decimal may slip in here
elif not isinstance(r[0], (np.integer, np.floating, np.bool, int,
float, bool)):
return result
if (issubclass(result.dtype.type, (np.object_, np.number)) and
notna(result).all()):
new_result = trans(result).astype(dtype)
try:
if np.allclose(new_result, result, rtol=0):
return new_result
except Exception:
# comparison of an object dtype with a number type could
# hit here
if (new_result == result).all():
return new_result
elif (issubclass(dtype.type, np.floating) and
not is_bool_dtype(result.dtype)):
return result.astype(dtype)
# a datetimelike
# GH12821, iNaT is casted to float
elif dtype.kind in ['M', 'm'] and result.dtype.kind in ['i', 'f']:
try:
result = result.astype(dtype)
except Exception:
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize('utc')
result = result.tz_convert(dtype.tz)
elif dtype.type == Period:
# TODO(DatetimeArray): merge with previous elif
from pandas.core.arrays import PeriodArray
return PeriodArray(result, freq=dtype.freq)
except Exception:
pass
return result
def maybe_upcast_putmask(result, mask, other):
"""
A safe version of putmask that potentially upcasts the result
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : ndarray or scalar
The source array or value
Returns
-------
result : ndarray
changed : boolean
Set to true if the result array was upcasted
"""
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if is_datetimelike(result.dtype):
if is_scalar(other):
if isna(other):
other = result.dtype.type('nat')
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
elif is_integer_dtype(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# try to directly set by expanding our array to full
# length of the boolean
try:
om = other[mask]
om_at = om.astype(result.dtype)
if (om == om_at).all():
new_result = result.values.copy()
new_result[mask] = om_at
result[:] = new_result
return result, False
except Exception:
pass
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if (is_scalar(other) or
(isinstance(other, np.ndarray) and other.ndim < 1)):
if isna(other):
return changeit()
# we have an ndarray and the masking has nans in it
else:
if isna(other[mask]).any():
return changeit()
try:
np.place(result, mask, other)
except Exception:
return changeit()
return result, False
def maybe_promote(dtype, fill_value=np.nan):
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = iNaT
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, np.datetime64):
fill_value = tslibs.Timestamp(fill_value).value
elif issubclass(dtype.type, np.timedelta64):
fill_value = tslibs.Timedelta(fill_value).value
elif is_datetime64tz_dtype(dtype):
if isna(fill_value):
fill_value = NaT
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.object_
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
# upcast to prevent overflow
arr = np.asarray(fill_value)
if arr != arr.astype(dtype):
dtype = arr.dtype
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, (np.integer, np.floating)):
dtype = np.complex128
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = iNaT
else:
dtype = np.object_
fill_value = np.nan
else:
dtype = np.object_
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif is_datetime64tz_dtype(dtype):
pass
elif issubclass(np.dtype(dtype).type, string_types):
dtype = np.object_
return dtype, fill_value
def infer_dtype_from(val, pandas_dtype=False):
"""
interpret the dtype from a scalar or array. This is a convenience
routines to infer dtype from a scalar or an array
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def infer_dtype_from_scalar(val, pandas_dtype=False):
"""
interpret the dtype from a scalar
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype = np.object_
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, string_types):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.object_
elif isinstance(val, (np.datetime64, datetime)):
val = tslibs.Timestamp(val)
if val is tslibs.NaT or val.tz is None:
dtype = np.dtype('M8[ns]')
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit='ns', tz=val.tz)
else:
# return datetimetz as object
return np.object_, val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = tslibs.Timedelta(val).value
dtype = np.dtype('m8[ns]')
elif is_bool(val):
dtype = np.bool_
elif is_integer(val):
if isinstance(val, np.integer):
dtype = type(val)
else:
dtype = np.int64
elif is_float(val):
if isinstance(val, np.floating):
dtype = type(val)
else:
dtype = np.float64
elif is_complex(val):
dtype = np.complex_
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
val = val.ordinal
return dtype, val
def infer_dtype_from_array(arr, pandas_dtype=False):
"""
infer the dtype from a scalar or array
Parameters
----------
arr : scalar or array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(numpy.object_, [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_type(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ['string', 'bytes', 'unicode',
'mixed', 'mixed-integer']:
return (np.object_, arr)
arr = np.asarray(arr)
return arr.dtype, arr
def maybe_infer_dtype_type(element):
"""Try to infer an object's dtype, for use in arithmetic ops
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
numpy.int64
"""
tipo = None
if hasattr(element, 'dtype'):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
""" provide explicit type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required
"""
if is_extension_type(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def maybe_cast_item(obj, item, dtype):
chunk = obj[item]
if chunk.values.dtype != dtype:
if dtype in (np.object_, np.bool_):
obj[item] = chunk.astype(np.object_)
elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover
raise ValueError("Unexpected dtype encountered: {dtype}"
.format(dtype=dtype))
def invalidate_string_dtypes(dtype_set):
"""Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
non_string_dtypes = dtype_set - {np.dtype('S').type, np.dtype('<U').type}
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer)
def coerce_to_dtypes(result, dtypes):
"""
given a dtypes and a result set, coerce the result elements to the
dtypes
"""
if len(result) != len(dtypes):
raise AssertionError("_coerce_to_dtypes requires equal len arrays")
def conv(r, dtype):
try:
if isna(r):
pass
elif dtype == _NS_DTYPE:
r = tslibs.Timestamp(r)
elif dtype == _TD_DTYPE:
r = tslibs.Timedelta(r)
elif dtype == np.bool_:
# messy. non 0/1 integers do not get converted.
if is_integer(r) and r not in [0, 1]:
return int(r)
r = bool(r)
elif dtype.kind == 'f':
r = float(r)
elif dtype.kind == 'i':
r = int(r)
except Exception:
pass
return r
return [conv(r, dtype) for r, dtype in zip(result, dtypes)]
def astype_nansafe(arr, dtype, copy=True, skipna=False):
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype
copy : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
# dispatch on extension dtype if needed
if is_extension_array_dtype(dtype):
return dtype.construct_array_type()._from_sequence(
arr, dtype=dtype, copy=copy)
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, text_type):
# in Py3 that's str, in Py2 that's unicode
return lib.astype_unicode(arr.ravel(),
skipna=skipna).reshape(arr.shape)
elif issubclass(dtype.type, string_types):
return lib.astype_str(arr.ravel(),
skipna=skipna).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if is_object_dtype(dtype):
return tslib.ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == 'M':
return arr.astype(dtype)
raise TypeError("cannot astype a datetimelike from [{from_dtype}] "
"to [{to_dtype}]".format(from_dtype=arr.dtype,
to_dtype=dtype))
elif is_timedelta64_dtype(arr):
if is_object_dtype(dtype):
return tslibs.ints_to_pytimedelta(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
# in py3, timedelta64[ns] are int64
if ((PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or
(not PY3 and dtype != _TD_DTYPE)):
# allow frequency conversions
# we return a float here!
if dtype.kind == 'm':
mask = isna(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
elif dtype == _TD_DTYPE:
return arr.astype(_TD_DTYPE, copy=copy)
raise TypeError("cannot astype a timedelta from [{from_dtype}] "
"to [{to_dtype}]".format(from_dtype=arr.dtype,
to_dtype=dtype))
elif (np.issubdtype(arr.dtype, np.floating) and
np.issubdtype(dtype, np.integer)):
if not np.isfinite(arr).all():
raise ValueError('Cannot convert non-finite values (NA or inf) to '
'integer')
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr).values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = ("The '{dtype}' dtype has no unit. "
"Please pass in '{dtype}[ns]' instead.")
raise ValueError(msg.format(dtype=dtype.name))
if copy or is_object_dtype(arr) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
return arr.astype(dtype, copy=True)
return arr.view(dtype)
def maybe_convert_objects(values, convert_dates=True, convert_numeric=True,
convert_timedeltas=True, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
# if we have passed in a list or scalar
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.object_)
if not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
# convert dates
if convert_dates and values.dtype == np.object_:
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
new_values = maybe_cast_to_datetime(
values, 'M8[ns]', errors='coerce')
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(values,
convert_datetime=convert_dates)
# convert timedeltas
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
from pandas.core.tools.timedeltas import to_timedelta
new_values = to_timedelta(values, errors='coerce')
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_timedelta=convert_timedeltas)
# convert to numeric
if values.dtype == np.object_:
if convert_numeric:
try:
new_values = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
except Exception:
pass
else:
# soft-conversion
values = lib.maybe_convert_objects(values)
values = values.copy() if copy else values
return values
def soft_convert_objects(values, datetime=True, numeric=True, timedelta=True,
coerce=False, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError('At least one of datetime, numeric or timedelta must '
'be True.')
elif conversion_count > 1 and coerce:
raise ValueError("Only one of 'datetime', 'numeric' or "
"'timedelta' can be True when when coerce=True.")
if isinstance(values, (list, tuple)):
# List or scalar
values = np.array(values, dtype=np.object_)
elif not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
elif not is_object_dtype(values.dtype):
# If not object, do not attempt conversion
values = values.copy() if copy else values
return values
# If 1 flag is coerce, ensure 2 others are False
if coerce:
# Immediate return if coerce
if datetime:
from pandas import to_datetime
return to_datetime(values, errors='coerce', box=False)
elif timedelta:
from pandas import to_timedelta
return to_timedelta(values, errors='coerce', box=False)
elif numeric:
from pandas import to_numeric
return to_numeric(values, errors='coerce')
# Soft conversions
if datetime:
# GH 20380, when datetime is beyond year 2262, hence outside
# bound of nanosecond-resolution 64-bit integers.
try:
values = lib.maybe_convert_objects(values,
convert_datetime=datetime)
except OutOfBoundsDatetime:
pass
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
values = lib.maybe_convert_objects(values, convert_timedelta=timedelta)
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# If all NaNs, then do not-alter
values = converted if not isna(converted).all() else values
values = values.copy() if copy else values
except Exception:
pass
return values
def maybe_castable(arr):
# return False to force a non-fastpath
# check datetime64[ns]/timedelta64[ns] are valid
# otherwise try to coerce
kind = arr.dtype.kind
if kind == 'M':
return is_datetime64_ns_dtype(arr.dtype)
elif kind == 'm':
return is_timedelta64_ns_dtype(arr.dtype)
return arr.dtype.name not in _POSSIBLY_CAST_DTYPES
def maybe_infer_to_datetimelike(value, convert_dates=False):
"""
we might have a array (or single object) that is datetime like,
and no dtype is passed don't change the value unless we find a
datetime/timedelta set
this is pretty strict in that a datetime/timedelta is REQUIRED
in addition to possible nulls/string likes
Parameters
----------
value : np.array / Series / Index / list-like
convert_dates : boolean, default False
if True try really hard to convert dates (such as datetime.date), other
leave inferred dtype 'date' alone
"""
# TODO: why not timedelta?
if isinstance(value, (ABCDatetimeIndex, ABCPeriodIndex,
ABCDatetimeArray, ABCPeriodArray)):
return value
elif isinstance(value, ABCSeries):
if isinstance(value._values, ABCDatetimeIndex):
return value._values
v = value
if not is_list_like(v):
v = [v]
v = np.array(v, copy=False)
# we only care about object dtypes
if not is_object_dtype(v):
return value
shape = v.shape
if not v.ndim == 1:
v = v.ravel()
if not len(v):
return value
def try_datetime(v):
# safe coerce to datetime64
try:
# GH19671
v = tslib.array_to_datetime(v,
require_iso8601=True,
errors='raise')[0]
except ValueError:
# we might have a sequence of the same-datetimes with tz's
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
try:
from pandas._libs.tslibs import conversion
from pandas import DatetimeIndex
values, tz = conversion.datetime_to_datetime64(v)
return DatetimeIndex(values).tz_localize(
'UTC').tz_convert(tz=tz)
except (ValueError, TypeError):
pass
except Exception:
pass
return v.reshape(shape)
def try_timedelta(v):
# safe coerce to timedelta64
# will try first with a string & object conversion
from pandas import to_timedelta
try:
return to_timedelta(v)._ndarray_values.reshape(shape)
except Exception:
return v.reshape(shape)
inferred_type = lib.infer_datetimelike_array(ensure_object(v))
if inferred_type == 'date' and convert_dates:
value = try_datetime(v)
elif inferred_type == 'datetime':
value = try_datetime(v)
elif inferred_type == 'timedelta':
value = try_timedelta(v)
elif inferred_type == 'nat':
# if all NaT, return as datetime
if isna(v).all():
value = try_datetime(v)
else:
# We have at least a NaT and a string
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but technically is also a datetime
value = try_timedelta(v)
if lib.infer_dtype(value, skipna=False) in ['mixed']:
# cannot skip missing values, as NaT implies that the string
# is actually a datetime
value = try_datetime(v)
return value
def maybe_cast_to_datetime(value, dtype, errors='raise'):
""" try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
from pandas.core.tools.timedeltas import to_timedelta
from pandas.core.tools.datetimes import to_datetime
if dtype is not None:
if isinstance(dtype, string_types):
dtype = np.dtype(dtype)
is_datetime64 = is_datetime64_dtype(dtype)
is_datetime64tz = is_datetime64tz_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
if is_datetime64 or is_datetime64tz or is_timedelta64:
# Force the dtype if needed.
msg = ("The '{dtype}' dtype has no unit. "
"Please pass in '{dtype}[ns]' instead.")
if is_datetime64 and not is_dtype_equal(dtype, _NS_DTYPE):
if dtype.name in ('datetime64', 'datetime64[ns]'):
if dtype.name == 'datetime64':
raise ValueError(msg.format(dtype=dtype.name))
dtype = _NS_DTYPE
else:
raise TypeError("cannot convert datetimelike to "
"dtype [{dtype}]".format(dtype=dtype))
elif is_datetime64tz:
# our NaT doesn't support tz's
# this will coerce to DatetimeIndex with
# a matching dtype below
if is_scalar(value) and isna(value):
value = [value]
elif is_timedelta64 and not is_dtype_equal(dtype, _TD_DTYPE):
if dtype.name in ('timedelta64', 'timedelta64[ns]'):
if dtype.name == 'timedelta64':
raise ValueError(msg.format(dtype=dtype.name))
dtype = _TD_DTYPE
else:
raise TypeError("cannot convert timedeltalike to "
"dtype [{dtype}]".format(dtype=dtype))
if is_scalar(value):
if value == iNaT or isna(value):
value = iNaT
else:
value = np.array(value, copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = iNaT
# we have an array of datetime or timedeltas & nulls
elif np.prod(value.shape) or not is_dtype_equal(value.dtype,
dtype):
try:
if is_datetime64:
value = to_datetime(value, errors=errors)._values
elif is_datetime64tz:
# The string check can be removed once issue #13712
# is solved. String data that is passed with a
# datetime64tz is assumed to be naive which should
# be localized to the timezone.
is_dt_string = is_string_dtype(value)
value = to_datetime(value, errors=errors).array
if is_dt_string:
# Strings here are naive, so directly localize
value = value.tz_localize(dtype.tz)
else:
# Numeric values are UTC at this point,
# so localize and convert
value = (value.tz_localize('UTC')
.tz_convert(dtype.tz))
elif is_timedelta64:
value = to_timedelta(value, errors=errors)._values
except (AttributeError, ValueError, TypeError):
pass
# coerce datetimelike to object
elif is_datetime64_dtype(value) and not is_datetime64_dtype(dtype):
if is_object_dtype(dtype):
if value.dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
ints = np.asarray(value).view('i8')
return tslib.ints_to_pydatetime(ints)
# we have a non-castable dtype that was passed
raise TypeError('Cannot cast datetime64 to {dtype}'
.format(dtype=dtype))
else:
is_array = isinstance(value, np.ndarray)
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
if is_array and value.dtype.kind in ['M', 'm']:
dtype = value.dtype
if dtype.kind == 'M' and dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
elif dtype.kind == 'm' and dtype != _TD_DTYPE:
value = to_timedelta(value)
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
elif not (is_array and not (issubclass(value.dtype.type, np.integer) or
value.dtype == np.object_)):
value = maybe_infer_to_datetimelike(value)
return value
def find_common_type(types):
"""
Find a common data type among the given dtypes.
Parameters
----------
types : list of dtypes
Returns
-------
pandas extension or numpy dtype
See Also
--------
numpy.find_common_type
"""
if len(types) == 0:
raise ValueError('no types given')
first = types[0]
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
# => object
if all(is_dtype_equal(first, t) for t in types[1:]):
return first
if any(isinstance(t, (PandasExtensionDtype, ExtensionDtype))
for t in types):
return np.object
# take lowest unit
if all(is_datetime64_dtype(t) for t in types):
return np.dtype('datetime64[ns]')
if all(is_timedelta64_dtype(t) for t in types):
return np.dtype('timedelta64[ns]')
# don't mix bool / int or float or complex
# this is different from numpy, which casts bool with float/int as int
has_bools = any(is_bool_dtype(t) for t in types)
if has_bools:
for t in types:
if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t):
return np.object
return np.find_common_type(types, [])
def cast_scalar_to_array(shape, value, dtype=None):
"""
create np.ndarray of specified shape and dtype, filled with values
Parameters
----------
shape : tuple
value : scalar value
dtype : np.dtype, optional
dtype to coerce
Returns
-------
ndarray of shape, filled with value, of specified / inferred dtype
"""
if dtype is None:
dtype, fill_value = infer_dtype_from_scalar(value)
else:
fill_value = value
values = np.empty(shape, dtype=dtype)
values.fill(fill_value)
return values
def construct_1d_arraylike_from_scalar(value, length, dtype):
"""
create a np.ndarray / pandas type of specified shape and dtype
filled with values
Parameters
----------
value : scalar value
length : int
dtype : pandas_dtype / np.dtype
Returns
-------
np.ndarray / pandas type of length, filled with value
"""
if is_datetime64tz_dtype(dtype):
from pandas import DatetimeIndex
subarr = DatetimeIndex([value] * length, dtype=dtype)
elif is_categorical_dtype(dtype):
from pandas import Categorical
subarr = Categorical([value] * length, dtype=dtype)
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
if length and is_integer_dtype(dtype) and isna(value):
# coerce if we have nan for an integer dtype
dtype = np.dtype('float64')
elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "S"):
# we need to coerce to object dtype to avoid
# to allow numpy to take our string as a scalar value
dtype = object
if not isna(value):
value = to_str(value)
subarr = np.empty(length, dtype=dtype)
subarr.fill(value)
return subarr
def construct_1d_object_array_from_listlike(values):
"""
Transform any list-like object in a 1-dimensional numpy array of object
dtype.
Parameters
----------
values : any iterable which has a len()
Raises
------
TypeError
* If `values` does not have a len()
Returns
-------
1-dimensional numpy array of dtype object
"""
# numpy will try to interpret nested lists as further dimensions, hence
# making a 1D array that contains list-likes is a bit tricky:
result = np.empty(len(values), dtype='object')
result[:] = values
return result
def construct_1d_ndarray_preserving_na(values, dtype=None, copy=False):
"""
Construct a new ndarray, coercing `values` to `dtype`, preserving NA.
Parameters
----------
values : Sequence
dtype : numpy.dtype, optional
copy : bool, default False
Note that copies may still be made with ``copy=False`` if casting
is required.
Returns
-------
arr : ndarray[dtype]
Examples
--------
>>> np.array([1.0, 2.0, None], dtype='str')
array(['1.0', '2.0', 'None'], dtype='<U4')
>>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str')
"""
subarr = np.array(values, dtype=dtype, copy=copy)
if dtype is not None and dtype.kind in ("U", "S"):
# GH-21083
# We can't just return np.array(subarr, dtype='str') since
# NumPy will convert the non-string objects into strings
# Including NA values. Se we have to go
# string -> object -> update NA, which requires an
# additional pass over the data.
na_values = isna(values)
subarr2 = subarr.astype(object)
subarr2[na_values] = np.asarray(values, dtype=object)[na_values]
subarr = subarr2
return subarr
def maybe_cast_to_integer_array(arr, dtype, copy=False):
"""
Takes any dtype and returns the casted version, raising for when data is
incompatible with integer/unsigned integer dtypes.
.. versionadded:: 0.24.0
Parameters
----------
arr : array-like
The array to cast.
dtype : str, np.dtype
The integer dtype to cast the array to.
copy: boolean, default False
Whether to make a copy of the array before returning.
Returns
-------
int_arr : ndarray
An array of integer or unsigned integer dtype
Raises
------
OverflowError : the dtype is incompatible with the data
ValueError : loss of precision has occurred during casting
Examples
--------
If you try to coerce negative values to unsigned integers, it raises:
>>> Series([-1], dtype="uint64")
Traceback (most recent call last):
...
OverflowError: Trying to coerce negative values to unsigned integers
Also, if you try to coerce float values to integers, it raises:
>>> Series([1, 2, 3.5], dtype="int64")
Traceback (most recent call last):
...
ValueError: Trying to coerce float values to integers
"""
try:
if not hasattr(arr, "astype"):
casted = np.array(arr, dtype=dtype, copy=copy)
else:
casted = arr.astype(dtype, copy=copy)
except OverflowError:
raise OverflowError("The elements provided in the data cannot all be "
"casted to the dtype {dtype}".format(dtype=dtype))
if np.array_equal(arr, casted):
return casted
# We do this casting to allow for proper
# data and dtype checking.
#
# We didn't do this earlier because NumPy
# doesn't handle `uint64` correctly.
arr = np.asarray(arr)
if is_unsigned_integer_dtype(dtype) and (arr < 0).any():
raise OverflowError("Trying to coerce negative values "
"to unsigned integers")
if is_integer_dtype(dtype) and (is_float_dtype(arr) or
is_object_dtype(arr)):
raise ValueError("Trying to coerce float values to integers")
| bsd-3-clause |
soravux/deap | doc/conf.py | 9 | 8166 | # -*- coding: utf-8 -*-
#
# DEAP documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 30 13:21:43 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, time
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append("..")
#sys.path.append(os.path.abspath('_ext/'))
import deap
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.pngmath', 'sphinx.ext.intersphinx', 'sphinx.ext.extlinks',
'sphinx.ext.viewcode']
try:
import matplotlib
except:
pass
else:
extensions += ['matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DEAP'
copyright = u'2009-%s, DEAP Project' % time.strftime('%Y')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = deap.__version__
# The full version, including alpha/beta/rc tags.
release = deap.__revision__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, the todo will be printed in the documentation
todo_include_todos = True
# Search in python documentation
intersphinx_mapping = {'python' : ('http://docs.python.org/', None),
'numpy' : ('http://docs.scipy.org/doc/numpy', None)}
# Reload the cached values every 5 days
intersphinx_cache_limit = 5
# -- Options for pyplot extension ----------------------------------------------
# Default value for the include-source option
plot_include_source = False
# Code that should be executed before each plot.
#plot_pre_code
# Base directory, to which ``plot::`` file names are relative
# to. (If None or empty, file names are relative to the
# directory where the file containing the directive is.)
#plot_basedir
# Whether to show links to the files in HTML.
plot_html_show_formats = True
# -- Options for extlinks extension ----------------------------------------------
import subprocess
try:
tree = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
except OSError:
import warnings
warnings.warn("Cannot link examples because we cannot retrieve the git version", Warning)
else:
extlinks = {'example': ('https://github.com/DEAP/deap/blob/{tree}/examples/%s.py'.format(tree=tree), "examples/")}
# -- Options for HTML output ---------------------------------------------------
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'classic' and 'alabaster'.
html_theme = 'pydoctheme'
#RTFD.org does not support sphinx 1.3.1 yet.
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {'collapsiblesidebar': True}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "deap_orange_icon_32.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = True
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'DEAP-doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'DEAP.tex', u'DEAP Documentation',
u'DEAP Project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = '\usepackage{amsmath,amssymb}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| lgpl-3.0 |
rsignell-usgs/notebook | xray_test.py | 1 | 1314 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import xray
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
%matplotlib inline
# <codecell>
URL = 'http://thredds.ucar.edu/thredds/dodsC/grib/NCEP/GFS/Global_0p5deg/Best'
# <codecell>
ds = xray.open_dataset(URL)
# <codecell>
# select lat,lon region of interest
# note: slice(20.5,55.0) fails
dsloc = ds.sel(lon=slice(230.5,300.0),lat=slice(55.0,20.5))
# <codecell>
# select closest data to time of interest
date = datetime.datetime(2015,7,15,3,0,0)
#date = datetime.datetime.now()
ds_snapshot = dsloc.sel(time1=date,method='nearest')
# <codecell>
# ds.data_vars
# ds.coords
# ds.attrs
# <codecell>
t = ds_snapshot['Temperature_surface']
# <codecell>
t.shape
# <codecell>
plt.pcolormesh(t.lon.data,t.lat.data,t.data)
plt.title(t.name+pd.Timestamp(t.time.values).strftime(': %Y-%m-%d %H:%M:%S %Z %z'));
# <codecell>
# time series closest to specified lon,lat location
ds_series = ds.sel(lon=250.,lat=33.,method='nearest')
# <codecell>
# Select temperature and convert to Pandas Series
v_series = ds_series['Temperature_surface'].to_series()
# <codecell>
v_series.plot(title=v_series.name);
# <codecell>
ds_snapshot.to_netcdf('ds_snapshot.nc')
# <codecell>
ds_snapshot
# <codecell>
| mit |
gfyoung/pandas | pandas/tests/frame/test_logical_ops.py | 2 | 6147 | import operator
import re
import numpy as np
import pytest
from pandas import CategoricalIndex, DataFrame, Interval, Series, isnull
import pandas._testing as tm
class TestDataFrameLogicalOperators:
# &, |, ^
@pytest.mark.parametrize(
"left, right, op, expected",
[
(
[True, False, np.nan],
[True, False, True],
operator.and_,
[True, False, False],
),
(
[True, False, True],
[True, False, np.nan],
operator.and_,
[True, False, False],
),
(
[True, False, np.nan],
[True, False, True],
operator.or_,
[True, False, False],
),
(
[True, False, True],
[True, False, np.nan],
operator.or_,
[True, False, True],
),
],
)
def test_logical_operators_nans(self, left, right, op, expected, frame_or_series):
# GH#13896
result = op(frame_or_series(left), frame_or_series(right))
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_logical_ops_empty_frame(self):
# GH#5808
# empty frames, non-mixed dtype
df = DataFrame(index=[1])
result = df & df
tm.assert_frame_equal(result, df)
result = df | df
tm.assert_frame_equal(result, df)
df2 = DataFrame(index=[1, 2])
result = df & df2
tm.assert_frame_equal(result, df2)
dfa = DataFrame(index=[1], columns=["A"])
result = dfa & dfa
expected = DataFrame(False, index=[1], columns=["A"])
tm.assert_frame_equal(result, expected)
def test_logical_ops_bool_frame(self):
# GH#5808
df1a_bool = DataFrame(True, index=[1], columns=["A"])
result = df1a_bool & df1a_bool
tm.assert_frame_equal(result, df1a_bool)
result = df1a_bool | df1a_bool
tm.assert_frame_equal(result, df1a_bool)
def test_logical_ops_int_frame(self):
# GH#5808
df1a_int = DataFrame(1, index=[1], columns=["A"])
df1a_bool = DataFrame(True, index=[1], columns=["A"])
result = df1a_int | df1a_bool
tm.assert_frame_equal(result, df1a_bool)
# Check that this matches Series behavior
res_ser = df1a_int["A"] | df1a_bool["A"]
tm.assert_series_equal(res_ser, df1a_bool["A"])
def test_logical_ops_invalid(self):
# GH#5808
df1 = DataFrame(1.0, index=[1], columns=["A"])
df2 = DataFrame(True, index=[1], columns=["A"])
msg = re.escape("unsupported operand type(s) for |: 'float' and 'bool'")
with pytest.raises(TypeError, match=msg):
df1 | df2
df1 = DataFrame("foo", index=[1], columns=["A"])
df2 = DataFrame(True, index=[1], columns=["A"])
msg = re.escape("unsupported operand type(s) for |: 'str' and 'bool'")
with pytest.raises(TypeError, match=msg):
df1 | df2
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(
op(df1.values, df2.values), index=df1.index, columns=df1.columns
)
assert result.values.dtype == np.bool_
tm.assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index, columns=df1.columns)
assert result.values.dtype == np.bool_
tm.assert_frame_equal(result, expected)
df1 = {
"a": {"a": True, "b": False, "c": False, "d": True, "e": True},
"b": {"a": False, "b": True, "c": False, "d": False, "e": False},
"c": {"a": False, "b": False, "c": True, "d": False, "e": False},
"d": {"a": True, "b": False, "c": False, "d": True, "e": True},
"e": {"a": True, "b": False, "c": False, "d": True, "e": True},
}
df2 = {
"a": {"a": True, "b": False, "c": True, "d": False, "e": False},
"b": {"a": False, "b": True, "c": False, "d": False, "e": False},
"c": {"a": True, "b": False, "c": True, "d": False, "e": False},
"d": {"a": False, "b": False, "c": False, "d": True, "e": False},
"e": {"a": False, "b": False, "c": False, "d": False, "e": True},
}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
_check_unary_op(operator.inv) # TODO: belongs elsewhere
def test_logical_with_nas(self):
d = DataFrame({"a": [np.nan, False], "b": [True, True]})
# GH4947
# bool comparisons should return bool
result = d["a"] | d["b"]
expected = Series([False, True])
tm.assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d["a"].fillna(False) | d["b"]
expected = Series([True, True])
tm.assert_series_equal(result, expected)
result = d["a"].fillna(False, downcast=False) | d["b"]
expected = Series([True, True])
tm.assert_series_equal(result, expected)
def test_logical_ops_categorical_columns(self):
# GH#38367
intervals = [Interval(1, 2), Interval(3, 4)]
data = DataFrame(
[[1, np.nan], [2, np.nan]],
columns=CategoricalIndex(
intervals, categories=intervals + [Interval(5, 6)]
),
)
mask = DataFrame(
[[False, False], [False, False]], columns=data.columns, dtype=bool
)
result = mask | isnull(data)
expected = DataFrame(
[[False, True], [False, True]],
columns=CategoricalIndex(
intervals, categories=intervals + [Interval(5, 6)]
),
)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
anurag313/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
jkarnows/scikit-learn | sklearn/metrics/ranking.py | 75 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
yaroslavvb/tensorflow | tensorflow/examples/learn/boston.py | 11 | 1978 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
from sklearn import preprocessing
import tensorflow as tf
def main(unused_argv):
# Load dataset
boston = tf.contrib.learn.datasets.load_dataset('boston')
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Fit
regressor.fit(x_train, y_train, steps=5000, batch_size=1)
# Transform
x_transformed = scaler.transform(x_test)
# Predict and score
y_predicted = list(regressor.predict(x_transformed, as_iterable=True))
score = metrics.mean_squared_error(y_predicted, y_test)
print('MSE: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
natasasdj/OpenWPM | analysis_parallel/07_images_pixels.py | 1 | 6871 | import os
import sqlite3
import pandas as pd
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from statsmodels.distributions.empirical_distribution import ECDF
def thousands(x, pos):
if x>=1e9:
return '%dB' % (x*1e-9)
elif x>=1e6:
return '%dM' % (x*1e-6)
elif x>=1e3:
return '%dK' % (x*1e-3)
else:
return x
formatter = FuncFormatter(thousands)
def ecdf_for_plot(sample):
x = sample.sort_values(ascending = False)
ecdf = ECDF(x)
y = ecdf(x)
return (x,y)
main_dir = '/root/OpenWPM/analysis_parallel/
res_dir = os.path.join(main_dir,'results')
db = os.path.join(res_dir,'images3.sqlite')
conn3 = sqlite3.connect(db)
query = 'SELECT * FROM Images3'
df3 = pd.read_sql_query(query,conn3)
df3['cont_length'] = pd.to_numeric(df3['cont_length'],errors='coerce',downcast='integer')
s = df3[df3['size']!=df3['cont_length']]
df3['pixels'].max() #178,560,000 #7,393,944,182,842,454,016
df3['pixels'].isnull().sum() #2,797,214 #9033623
df3['pixels'].isnull().sum()/float(df.shape[0]) #8.8% # 0.092028976013786706
df3['pixels'] = df3['pixels'].fillna(-1).map(int)
fig_dir = os.path.join(main_dir,'figs')
# scatter plot no of pixels vs size with the color showing count of a pixel-size pair
grouped = df3.groupby(['pixels','size'])
pix_size_count = grouped.size().sort_values()
pixels = pix_size_count.index.get_level_values(level='pixels')
size = pix_size_count.index.get_level_values(level='size')
'''
fig,ax=plt.subplots()
plt.scatter(size, pixels, c=pix_size_count,cmap="Reds", norm=LogNorm(),edgecolors='none')
cbar = plt.colorbar()
cbar.set_label('count of images')
plt.grid(True)
plt.xscale('symlog')
plt.yscale('symlog')
plt.xlim([-1,1e8])
plt.ylim([-1,1e8])
plt.xlabel('total no of pixels')
plt.ylabel('file size [bytes]')
#plt.show()
fig.savefig(fig_dir + 'pix_size_count.png',format='png')
fig.savefig(fig_dir + 'pix_size_count.eps',format='eps')
'''
fig,ax=plt.subplots()
plt.scatter(size, pixels, c=pix_size_count/float(df3.shape[0])*100,cmap="Reds", norm=LogNorm(),edgecolors='none')
ax.set_yticklabels(labels)
cbar = plt.colorbar()
cbar.set_label('percentage of third-party images')
plt.grid(True)
plt.xscale('symlog')
plt.yscale('symlog')
plt.xlim([-1,1e8])
plt.ylim([-2,1e18])
fig.canvas.draw()
locs = ax.get_yticks().tolist()
locs = [-1] + locs
labels = [item.get_text() for item in ax.get_yticklabels()]
labels = ['NA'] + labels
ax.set_yticks(locs)
ax.set_yticklabels(labels)
plt.xlabel('file size [bytes]')
plt.ylabel('no of pixels')
fig.savefig(os.path.join(fig_dir,'02a_size_pix_perc.png'),format='png')
# cdf of number of pixels
(x,y) = ecdf_for_plot(df3['pixels'])
(x2,y2) = ecdf_for_plot(s['pixels'])
fig,ax=plt.subplots()
plt.step(x,y,color='blue',label='all')
plt.step(x2,y2 * float(s.shape[0])/float(df3.shape[0]),color = 'red', label =r'size $\neq$ content-length')
plt.legend(loc='upper left')
plt.grid(True)
plt.xscale('symlog')
plt.xlim([-2,1e8])
fig.canvas.draw()
locs = ax.get_xticks().tolist()
locs = [-1] + locs
labels = [item.get_text() for item in ax.get_xticklabels()]
labels = ['NA'] + labels
ax.set_xticks(locs)
ax.set_xticklabels(labels)
plt.xlabel('number of pixels')
plt.ylabel('cdf')
plt.savefig(os.path.join(fig_dir,'02b_pix_cdf.png'))
grouped = df.groupby('pixels')
s_pix_count = grouped.size()
s_pix_count_=s_pix_count/float(df.shape[0])*100
df_pix_count = pd.DataFrame(s_pix_count,columns=['count'])
# count of total number of pixels
fig,ax=plt.subplots()
plt.scatter(s_pix_count.index,s_pix_count,marker='.',color='darkblue')
#s_pix_count_lim = s_pix_count[s_pix_count > 0.0001*df.shape[0]]
#plt.scatter(s_pix_count_lim.index,s_pix_count_lim, marker='.',color='lightblue')
plt.xscale('symlog')
plt.yscale('log')
plt.xlabel('total number of pixels')
plt.ylabel('number of images')
plt.xlim([-1,1e8])
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_count.png',format='png')
fig.savefig(fig_dir + 'pix_count.eps',format='eps')
fig,ax=plt.subplots()
plt.scatter(s_pix_count_.index,s_pix_count_,marker='.',color='darkblue')
plt.xlabel('total number of pixels')
plt.ylabel('percentage of total number of images')
plt.xscale('symlog')
plt.yscale('log')
plt.xlim([-1,1e8])
plt.ylim([1e-6,1e2])
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_perc.png',format='png')
fig.savefig(fig_dir + 'pix_perc.eps',format='eps')
# Top 20 size counts of images
s_pix_count_sort = s_pix_count.sort_values(ascending=False)
s_pix_perc_sort = s_pix_count_sort/float(df.shape[0])*100
x=range(1,21)
labels = map(str,[ int(a) for a in list(s_pix_count_sort.index[0:20]) ])
fig, ax = plt.subplots()
plt.bar(x,s_pix_count_sort.iloc[0:20],align='center', label ='all')
plt.xticks(x, labels,rotation=70)
plt.ylabel('count')
plt.xlabel('total number of pixels')
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_count_top20.png',format='png')
fig.savefig(fig_dir + 'pix_count_top20.eps',format='eps')
x=range(1,21)
labels = map(str,[ int(a) for a in list(s_pix_perc_sort.index[0:20]) ])
fig, ax = plt.subplots()
plt.bar(x,s_pix_perc_sort.iloc[0:20],align='center', label ='all')
plt.xticks(x, labels,rotation=70)
plt.ylabel('percentage of total number of images')
plt.xlabel('total number of pixels')
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_perc_top20.png',format='png')
fig.savefig(fig_dir + 'pix_perc_top20.eps',format='eps')
#s=df['size'][df['size']!=df['cont_length']]
#l=s.tolist()
#df['pixels'].fillna(value=-100,inplace=True)
'''
grouped = df.groupby(['pixels','type'])
s_pix_type_count = grouped.size()
df_pix_type_count = pd.DataFrame(s_type_count,columns=['count'])
'''
# top 20 pixel size count
pix_size_count.sort_values(ascending = False,inplace = True)
x=range(1,21)
labels = map(str,[(int(a),int(b)) for (a,b) in pix_size_count.index[0:20]])
fig, ax = plt.subplots()
plt.bar(x,pix_size_count.iloc[0:20],align='center', label ='all')
plt.xticks(x, labels,rotation=70)
plt.ylabel('count')
plt.xlabel('total number of pixels, file size [bytes]')
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_size_count_top20.png',format='png')
fig.savefig(fig_dir + 'pix_size_count_top20.eps',format='eps')
fig, ax = plt.subplots()
plt.bar(x,pix_size_count.iloc[0:20]/float(df.shape[0])*100,align='center', label ='all')
plt.xticks(x, labels,rotation=70)
plt.ylabel('percentage of total number of images')
plt.xlabel('total number of pixels, file size [bytes]')
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_size_perc_top20.png',format='png')
fig.savefig(fig_dir + 'pix_size_perc_top20.eps',format='eps')
| gpl-3.0 |
shikhardb/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
MasterScrat/ChatShape | cloud.py | 1 | 4775 | #!/usr/bin/env python3
import os
import re
import time
import json
import argparse
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from wordcloud import WordCloud, ImageColorGenerator
from scipy.misc import imread
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', dest='data_paths', nargs='+',
help='chat log data files (pickle files)', required=True)
parser.add_argument('--sw', '--stopwords-paths', dest='stopwords_paths', nargs='+',
help='stopword files (JSON format)', default=['stopwords/en.json'])
parser.add_argument('-m', '--mask-image', dest='mask_image', type=str, default=None,
help='image to use as mask', required=True)
parser.add_argument('--filter-conversation', dest='filter_conversation', type=str, default=None,
help='only keep messages sent in a conversation with these senders, separated by comma')
parser.add_argument('--filter-sender', dest='filter_sender', type=str, default=None,
help='only keep messages sent by these senders, separated by comma')
parser.add_argument('--remove-sender', dest='remove_sender', type=str, default=None,
help='remove messages sent by these senders, separated by comma')
parser.add_argument('-n', '--num-words', dest='num_words', type=int, default=10000, help='bin width for histograms')
parser.add_argument('--density', '--dpi', dest='density', type=int, default=100, help='rendered image DPI')
args = parser.parse_args()
return args
def main():
args = parse_arguments()
num_words = args.num_words
density = args.density
mask_img = args.mask_image
data_paths = args.data_paths
stopwords_paths = args.stopwords_paths
filter_conversation = args.filter_conversation
filter_sender = args.filter_sender
remove_sender = args.remove_sender
print('Mask image:', mask_img)
print('Up to', num_words, 'words on the cloud')
print('Image dpi:', density)
# TODO factor w histo logic
df = pd.DataFrame()
for dataPath in data_paths:
print('Loading messages from %s...' % dataPath)
df = pd.concat([df, pd.read_pickle(dataPath)])
df.columns = ['timestamp', 'conversationId', 'conversationWithName', 'senderName', 'text', 'platform', 'language',
'datetime']
print('Loaded', len(df), 'messages')
# TODO filtering should be in a package-wide function!
if filter_conversation is not None:
filter_conversation = filter_conversation.split(',')
print('Keeping only messages in conversations with', filter_conversation)
df = df[df['conversationWithName'].isin(filter_conversation)]
if filter_sender is not None:
filter_sender = filter_sender.split(',')
print('Keeping only messages sent by', filter_sender)
df = df[df['senderName'].isin(filter_sender)]
if remove_sender is not None:
print('Removing messages sent by', remove_sender)
remove_sender = remove_sender.split(',')
df = df[~df['senderName'].isin(remove_sender)]
if len(df['text']) == 0:
print('No messages left! review your filter options')
exit(0)
print('Final corpus:', len(df['text'])/1000, 'K messages')
stopwords = []
for stopwordsPath in stopwords_paths:
print('Loading stopwords from', stopwordsPath, '...')
stopwords = stopwords + json.load(open(stopwordsPath))
stopwords = set(stopwords)
print('Stopwords:', len(stopwords), 'words')
# pre-compiled regex is faster than going through a list
stopwords = '|'.join(stopwords)
regex = re.compile(r'\b('+stopwords+r')\b', re.UNICODE)
print('Cleaning up...')
text = df['text'] \
.replace(to_replace='None', value=np.nan).dropna() \
.str.lower() \
.apply(lambda w: re.sub(r'^https?:\/\/.*[\r\n]*', '', w)) \
.apply(lambda w: regex.sub('', w))
text = ' '.join(text)
print('Rendering...')
directory = os.path.dirname(__file__)
mask = imread(os.path.join(directory, mask_img))
# https://amueller.github.io/word_cloud/generated/wordcloud.WordCloud.html#wordcloud.WordCloud
wc = WordCloud(background_color="white", mask=mask,
max_words=num_words, stopwords=None, normalize_plurals=False, collocations=True).generate(text)
image_colors = ImageColorGenerator(mask)
wc = wc.recolor(color_func=image_colors)
plt.imshow(wc)
plt.axis("off")
path = 'renders/' + str(int(time.time()*1000)) + '.png'
print('Saving to %s...' % path)
plt.savefig(path, dpi=density)
if __name__ == '__main__':
main()
| mit |
aayushidwivedi01/spark-tk | regression-tests/sparktkregtests/testcases/frames/assign_sample_test.py | 12 | 3247 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test assign sample functionality """
import unittest
import sys
import os
from sparktkregtests.lib import sparktk_test
class AssignSample(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(AssignSample, self).setUp()
schema = [("color1", str), ("predicted", str)]
self.frame = self.context.frame.import_csv(
self.get_file("model_color.csv"), schema=schema)
def test_label_column(self):
"""Test splitting on the label column"""
self.frame.assign_sample(
[0.6, 0.3, 0.1], ['one', 'two', 'three'], 'label_column', 2)
baseline = {'one': 0.6, 'two': 0.3, 'three': 0.1}
self._test_frame_assign('label_column', baseline)
def test_sample_bin(self):
"""Test splitting on the sample_bin column"""
self.frame.assign_sample([0.5, 0.3, 0.2])
baseline = {'TR': 0.5, 'TE': 0.3, 'VA': 0.2}
self._test_frame_assign("sample_bin", baseline)
def test_seed(self):
""" Test seed is default 0, and non-0 is different """
self.frame.assign_sample(
[0.6, 0.2, 0.1, 0.1], output_column="default")
self.frame.assign_sample(
[0.6, 0.2, 0.1, 0.1], seed=0, output_column="seed_0")
self.frame.assign_sample(
[0.6, 0.2, 0.1, 0.1], seed=5, output_column="seed_5")
baseline = {'Sample_0': 0.6,
'Sample_1': 0.2,
'Sample_2': 0.1,
'Sample_3': 0.1}
# Check expected results
self._test_frame_assign("default", baseline)
frame_take = self.frame.take(self.frame.count())
seed_d = [i[2] for i in frame_take]
seed_0 = [i[3] for i in frame_take]
seed_5 = [i[4] for i in frame_take]
# seed=0 and default give the same results.
self.assertEqual(seed_0, seed_d)
# seed=0 and seed=5 give different assignments.
self.assertNotEqual(seed_0, seed_5)
def _test_frame_assign(self, column_name, sample):
"""Tests the assign method on the given column and sample"""
pd = self.frame.to_pandas(self.frame.count())
groupby_rows = pd.groupby(column_name).size()
count = float(groupby_rows.sum())
normalized = groupby_rows.map(lambda x: x/count)
self.assertItemsEqual(normalized.keys(), sample.keys())
for i, j in normalized.iteritems():
self.assertAlmostEqual(sample[i], j, delta=0.1)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
pravsripad/mne-python | examples/inverse/plot_label_activation_from_stc.py | 62 | 1949 | """
==================================================
Extracting time course from source_estimate object
==================================================
Load a SourceEstimate object from stc files and
extract the time course of activation in
individual labels, as well as in a complex label
formed through merging two labels.
"""
# Author: Christian Brodbeck <[email protected]>
#
# License: BSD (3-clause)
import os
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
os.environ['SUBJECTS_DIR'] = data_path + '/subjects'
meg_path = data_path + '/MEG/sample'
# load the stc
stc = mne.read_source_estimate(meg_path + '/sample_audvis-meg')
# load the labels
aud_lh = mne.read_label(meg_path + '/labels/Aud-lh.label')
aud_rh = mne.read_label(meg_path + '/labels/Aud-rh.label')
# extract the time course for different labels from the stc
stc_lh = stc.in_label(aud_lh)
stc_rh = stc.in_label(aud_rh)
stc_bh = stc.in_label(aud_lh + aud_rh)
# calculate center of mass and transform to mni coordinates
vtx, _, t_lh = stc_lh.center_of_mass('sample')
mni_lh = mne.vertex_to_mni(vtx, 0, 'sample')[0]
vtx, _, t_rh = stc_rh.center_of_mass('sample')
mni_rh = mne.vertex_to_mni(vtx, 1, 'sample')[0]
# plot the activation
plt.figure()
plt.axes([.1, .275, .85, .625])
hl = plt.plot(stc.times, stc_lh.data.mean(0), 'b')[0]
hr = plt.plot(stc.times, stc_rh.data.mean(0), 'g')[0]
hb = plt.plot(stc.times, stc_bh.data.mean(0), 'r')[0]
plt.xlabel('Time (s)')
plt.ylabel('Source amplitude (dSPM)')
plt.xlim(stc.times[0], stc.times[-1])
# add a legend including center-of-mass mni coordinates to the plot
labels = ['LH: center of mass = %s' % mni_lh.round(2),
'RH: center of mass = %s' % mni_rh.round(2),
'Combined LH & RH']
plt.figlegend([hl, hr, hb], labels, 'lower center')
plt.suptitle('Average activation in auditory cortex labels', fontsize=20)
plt.show()
| bsd-3-clause |
chrjxj/zipline | zipline/finance/risk/period.py | 7 | 11932 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import math
import numpy as np
import numpy.linalg as la
from six import iteritems
import pandas as pd
from . import risk
from . risk import (
alpha,
check_entry,
downside_risk,
information_ratio,
sharpe_ratio,
sortino_ratio,
)
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
log = logbook.Logger('Risk Period')
choose_treasury = functools.partial(risk.choose_treasury,
risk.select_treasury_duration)
class RiskMetricsPeriod(object):
def __init__(self, start_date, end_date, returns, env,
benchmark_returns=None, algorithm_leverages=None):
self.env = env
treasury_curves = env.treasury_curves
if treasury_curves.index[-1] >= start_date:
mask = ((treasury_curves.index >= start_date) &
(treasury_curves.index <= end_date))
self.treasury_curves = treasury_curves[mask]
else:
# our test is beyond the treasury curve history
# so we'll use the last available treasury curve
self.treasury_curves = treasury_curves[-1:]
self.start_date = start_date
self.end_date = end_date
if benchmark_returns is None:
br = env.benchmark_returns
benchmark_returns = br[(br.index >= returns.index[0]) &
(br.index <= returns.index[-1])]
self.algorithm_returns = self.mask_returns_to_period(returns,
env)
self.benchmark_returns = self.mask_returns_to_period(benchmark_returns,
env)
self.algorithm_leverages = algorithm_leverages
self.calculate_metrics()
def calculate_metrics(self):
self.benchmark_period_returns = \
self.calculate_period_returns(self.benchmark_returns)
self.algorithm_period_returns = \
self.calculate_period_returns(self.algorithm_returns)
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date
)
raise Exception(message)
self.num_trading_days = len(self.benchmark_returns)
self.trading_day_counts = pd.stats.moments.rolling_count(
self.algorithm_returns, self.num_trading_days)
self.mean_algorithm_returns = pd.Series(
index=self.algorithm_returns.index)
for dt, ret in self.algorithm_returns.iteritems():
self.mean_algorithm_returns[dt] = (
self.algorithm_returns[:dt].sum() /
self.trading_day_counts[dt])
self.benchmark_volatility = self.calculate_volatility(
self.benchmark_returns)
self.algorithm_volatility = self.calculate_volatility(
self.algorithm_returns)
self.treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
self.end_date,
self.env,
)
self.sharpe = self.calculate_sharpe()
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(self.sharpe):
self.sharpe = 0.0
self.sortino = self.calculate_sortino()
self.information = self.calculate_information()
self.beta, self.algorithm_covariance, self.benchmark_variance, \
self.condition_number, self.eigen_values = self.calculate_beta()
self.alpha = self.calculate_alpha()
self.excess_return = self.algorithm_period_returns - \
self.treasury_period_return
self.max_drawdown = self.calculate_max_drawdown()
self.max_leverage = self.calculate_max_leverage()
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
period_label = self.end_date.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.benchmark_volatility,
'algo_volatility': self.algorithm_volatility,
'treasury_period_return': self.treasury_period_return,
'algorithm_period_return': self.algorithm_period_returns,
'benchmark_period_return': self.benchmark_period_returns,
'sharpe': self.sharpe,
'sortino': self.sortino,
'information': self.information,
'beta': self.beta,
'alpha': self.alpha,
'excess_return': self.excess_return,
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: None if check_entry(k, v) else v
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
metrics = [
"algorithm_period_returns",
"benchmark_period_returns",
"excess_return",
"num_trading_days",
"benchmark_volatility",
"algorithm_volatility",
"sharpe",
"sortino",
"information",
"algorithm_covariance",
"benchmark_variance",
"beta",
"alpha",
"max_drawdown",
"max_leverage",
"algorithm_returns",
"benchmark_returns",
"condition_number",
"eigen_values"
]
for metric in metrics:
value = getattr(self, metric)
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def mask_returns_to_period(self, daily_returns, env):
if isinstance(daily_returns, list):
returns = pd.Series([x.returns for x in daily_returns],
index=[x.date for x in daily_returns])
else: # otherwise we're receiving an index already
returns = daily_returns
trade_days = env.trading_days
trade_day_mask = returns.index.normalize().isin(trade_days)
mask = ((returns.index >= self.start_date) &
(returns.index <= self.end_date) & trade_day_mask)
returns = returns[mask]
return returns
def calculate_period_returns(self, returns):
period_returns = (1. + returns).prod() - 1
return period_returns
def calculate_volatility(self, daily_returns):
return np.std(daily_returns, ddof=1) * math.sqrt(self.num_trading_days)
def calculate_sharpe(self):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
"""
return sharpe_ratio(self.algorithm_volatility,
self.algorithm_period_returns,
self.treasury_period_return)
def calculate_sortino(self):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
"""
mar = downside_risk(self.algorithm_returns,
self.mean_algorithm_returns,
self.num_trading_days)
# Hold on to downside risk for debugging purposes.
self.downside_risk = mar
return sortino_ratio(self.algorithm_period_returns,
self.treasury_period_return,
mar)
def calculate_information(self):
"""
http://en.wikipedia.org/wiki/Information_ratio
"""
return information_ratio(self.algorithm_returns,
self.benchmark_returns)
def calculate_beta(self):
"""
.. math::
\\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}
http://en.wikipedia.org/wiki/Beta_(finance)
"""
# it doesn't make much sense to calculate beta for less than two days,
# so return none.
if len(self.algorithm_returns) < 2:
return 0.0, 0.0, 0.0, 0.0, []
returns_matrix = np.vstack([self.algorithm_returns,
self.benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
eigen_values = la.eigvals(C)
condition_number = max(eigen_values) / min(eigen_values)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return (
beta,
algorithm_covariance,
benchmark_variance,
condition_number,
eigen_values
)
def calculate_alpha(self):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
"""
return alpha(self.algorithm_period_returns,
self.treasury_period_return,
self.benchmark_period_returns,
self.beta)
def calculate_max_drawdown(self):
compounded_returns = []
cur_return = 0.0
for r in self.algorithm_returns:
try:
cur_return += math.log(1.0 + r)
# this is a guard for a single day returning -100%, if returns are
# greater than -1.0 it will throw an error because you cannot take
# the log of a negative number
except ValueError:
log.debug("{cur} return, zeroing the returns".format(
cur=cur_return))
cur_return = 0.0
compounded_returns.append(cur_return)
cur_max = None
max_drawdown = None
for cur in compounded_returns:
if cur_max is None or cur > cur_max:
cur_max = cur
drawdown = (cur - cur_max)
if max_drawdown is None or drawdown < max_drawdown:
max_drawdown = drawdown
if max_drawdown is None:
return 0.0
return 1.0 - math.exp(max_drawdown)
def calculate_max_leverage(self):
if self.algorithm_leverages is None:
return 0.0
else:
return max(self.algorithm_leverages)
def __getstate__(self):
state_dict = {k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
STATE_VERSION = 3
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 3
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("RiskMetricsPeriod saved state \
is too old.")
self.__dict__.update(state)
| apache-2.0 |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/stats/tests/test_anova.py | 3 | 14617 | from StringIO import StringIO
import numpy as np
from statsmodels.stats.anova import anova_lm
from statsmodels.formula.api import ols
from pandas import read_table
kidney_table = StringIO("""Days Duration Weight ID
0.0 1 1 1
2.0 1 1 2
1.0 1 1 3
3.0 1 1 4
0.0 1 1 5
2.0 1 1 6
0.0 1 1 7
5.0 1 1 8
6.0 1 1 9
8.0 1 1 10
2.0 1 2 1
4.0 1 2 2
7.0 1 2 3
12.0 1 2 4
15.0 1 2 5
4.0 1 2 6
3.0 1 2 7
1.0 1 2 8
5.0 1 2 9
20.0 1 2 10
15.0 1 3 1
10.0 1 3 2
8.0 1 3 3
5.0 1 3 4
25.0 1 3 5
16.0 1 3 6
7.0 1 3 7
30.0 1 3 8
3.0 1 3 9
27.0 1 3 10
0.0 2 1 1
1.0 2 1 2
1.0 2 1 3
0.0 2 1 4
4.0 2 1 5
2.0 2 1 6
7.0 2 1 7
4.0 2 1 8
0.0 2 1 9
3.0 2 1 10
5.0 2 2 1
3.0 2 2 2
2.0 2 2 3
0.0 2 2 4
1.0 2 2 5
1.0 2 2 6
3.0 2 2 7
6.0 2 2 8
7.0 2 2 9
9.0 2 2 10
10.0 2 3 1
8.0 2 3 2
12.0 2 3 3
3.0 2 3 4
7.0 2 3 5
15.0 2 3 6
4.0 2 3 7
9.0 2 3 8
6.0 2 3 9
1.0 2 3 10
""")
class TestAnovaLM(object):
@classmethod
def setupClass(cls):
# kidney data taken from JT's course
# don't know the license
kidney_table.seek(0)
cls.data = read_table(kidney_table, sep="\s+")
cls.kidney_lm = ols('np.log(Days+1) ~ C(Duration) * C(Weight)',
data=cls.data).fit()
def test_results(self):
Df = np.array([1, 2, 2, 54])
sum_sq = np.array([2.339693, 16.97129, 0.6356584, 28.9892])
mean_sq = np.array([2.339693, 8.485645, 0.3178292, 0.536837])
f_value = np.array([4.358293, 15.80674, 0.5920404, np.nan])
pr_f = np.array([0.0415617, 3.944502e-06, 0.5567479, np.nan])
results = anova_lm(self.kidney_lm)
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, sum_sq, 4)
np.testing.assert_almost_equal(results['F'].values, f_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, pr_f)
class TestAnovaLMCompare(TestAnovaLM):
def test_results(self):
new_model = ols("np.log(Days+1) ~ C(Duration) + C(Weight)",
self.data).fit()
results = anova_lm(new_model, self.kidney_lm)
Res_Df = np.array([
56, 54
])
RSS = np.array([
29.62486, 28.9892
])
Df = np.array([
0, 2
])
Sum_of_Sq = np.array([
np.nan, 0.6356584
])
F = np.array([
np.nan, 0.5920404
])
PrF = np.array([
np.nan, 0.5567479
])
np.testing.assert_equal(results["df_resid"].values, Res_Df)
np.testing.assert_almost_equal(results["ssr"].values, RSS, 4)
np.testing.assert_almost_equal(results["df_diff"].values, Df)
np.testing.assert_almost_equal(results["ss_diff"].values, Sum_of_Sq)
np.testing.assert_almost_equal(results["F"].values, F)
np.testing.assert_almost_equal(results["Pr(>F)"].values, PrF)
class TestAnova2(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
3.067066, 13.27205, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F_value = np.array([
5.667033, 12.26141, 0.1760025, np.nan
])
PrF = np.array([
0.02106078, 4.487909e-05, 0.8391231, np.nan
])
results = anova_lm(anova_ii, typ="II")
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova2HC0(TestAnovaLM):
#NOTE: R doesn't return SSq with robust covariance. Why?
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F = np.array([
6.972744, 13.7804, 0.1709936, np.nan
])
PrF = np.array([
0.01095599, 1.641682e-05, 0.8433081, np.nan
])
results = anova_lm(anova_ii, typ="II", robust="hc0")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova2HC1(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F = np.array([
6.238771, 12.32983, 0.1529943, np.nan
])
PrF = np.array([
0.01576555, 4.285456e-05, 0.858527, np.nan
])
results = anova_lm(anova_ii, typ="II", robust="hc1")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova2HC2(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F = np.array([
6.267499, 12.25354, 0.1501224, np.nan
])
PrF = np.array([
0.01554009, 4.511826e-05, 0.8609815, np.nan
])
results = anova_lm(anova_ii, typ="II", robust="hc2")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova2HC3(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F = np.array([
5.633786, 10.89842, 0.1317223, np.nan
])
PrF = np.array([
0.02142223, 0.0001145965, 0.8768817, np.nan
])
results = anova_lm(anova_ii, typ="II", robust="hc3")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F_value = np.array([
279.7545, 5.367071, 12.43245, 0.1760025, np.nan
])
PrF = np.array([
2.379855e-22, 0.02457384, 3.999431e-05, 0.8391231, np.nan
])
results = anova_lm(anova_iii, typ="III")
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3HC0(TestAnovaLM):
#NOTE: R doesn't return SSq with robust covariance. Why?
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F = np.array([
298.3404, 5.723638, 13.76069, 0.1709936, np.nan
])
PrF = np.array([
5.876255e-23, 0.02046031, 1.662826e-05, 0.8433081, np.nan
])
results = anova_lm(anova_iii, typ="III", robust="hc0")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3HC1(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F = np.array([
266.9361, 5.12115, 12.3122, 0.1529943, np.nan
])
PrF = np.array([
6.54355e-22, 0.02792296, 4.336712e-05, 0.858527, np.nan
])
results = anova_lm(anova_iii, typ="III", robust="hc1")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3HC2(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F = np.array([
264.5137, 5.074677, 12.19158, 0.1501224, np.nan
])
PrF = np.array([
7.958286e-22, 0.02860926, 4.704831e-05, 0.8609815, np.nan
])
results = anova_lm(anova_iii, typ="III", robust="hc2")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3HC3(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F = np.array([
234.4026, 4.496996, 10.79903, 0.1317223, np.nan
])
PrF = np.array([
1.037224e-20, 0.03883841, 0.0001228716, 0.8768817, np.nan
])
results = anova_lm(anova_iii, typ="III", robust="hc3")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb-failure'], exit=False)
| apache-2.0 |
kipohl/ncanda-data-integration | scripts/reporting/mri_dvd_burning_script.py | 4 | 3005 | #!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
mri_dvd_burning_script
======================
Generate a list of eids for a special subset of subjects. this list can be used
in script/xnat/check_object_names
"""
import os
import sys
import csv
import redcap
import pandas as pd
import shutil
fields = ['study_id', 'redcap_event_name','exclude', 'visit_ignore',
'visit_date','mri_missing', 'mri_xnat_sid','mri_series_t1',
'mri_series_t2'];
forms=['mr_session_report', 'visit_date',
'demographics']
csv_dir="/fs/u00/alfonso/Desktop"
csv_file="{}/bart_list.csv".format(csv_dir)
def get_project_entry(args=None):
"""
Pulls the data from REDCap
"""
# Get API key.
summary_key_file = open(os.path.join(os.path.expanduser("~"),
'.server_config',
'redcap-dataentry-token'), 'r')
summary_api_key = summary_key_file.read().strip()
# Connect to API.
project_entry = redcap.Project('https://ncanda.sri.com/redcap/api/',
summary_api_key, verify_ssl=False)
return project_entry
def data_entry_fields(fields,project,arm):
"""
Gets the dataframe containing a specific arm from REDCap
"""
# Get a dataframe of fields
data_entry_raw = project.export_records(fields=fields, forms = forms,
format='df', events=arm)
return data_entry_rawg
def get_session_scan(scan_field):
session_scan = []
for i in scan_field:
session_scan.append(i.split('/'))
return session_scan
def main(args):
project_entry = get_project_entry()
project_df = data_entry_fields(fields,project_entry,['baseline_visit_arm_1',
'1y_visit_arm_1'])
## Generate Subject List from csv
with open(csv_file, 'rb') as f:
reader = csv.reader(f)
subject_list = []
for i in reader:
subject_list.append(i[0])
# Filter
filter_df = project_df[project_df['mri_xnat_sid'].isin(subject_list)]
# Make eid text file
session_scan_t1 = get_session_scan(filter_df['mri_series_t1'])
session_scan_t2 = get_session_scan(filter_df['mri_series_t2'])
eids = []
for i in session_scan_t1:
eids.append(i[0])
for i in session_scan_t2:
eids.append(i[0])
eids = set(eids)
with open("{}/eidlist2.txt".format(csv_dir), 'w') as file:
file.write("\n".join(eids))
csv_for_dolf = filter_df['mri_xnat_sid']
csv_for_dolf.to_csv('{}/bart_list_with_subject_ids.csv'.format(csv_dir))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v','--visit',choices=['baseline_visit_arm_1','1y_visit_arm_1'],default='baseline_visit_arm_1')
argv = parser.parse_args()
sys.exit(main(args=argv))
| bsd-3-clause |
carltoews/tennis | data/tennis_atp-master/examples/Pandas/AverageTop100Age/pandas_demo.py | 1 | 1449 | #!/usr/bin/env python
import pandas as pd
import glob
import matplotlib.pyplot as plt
import datetime, sys
pd.options.display.mpl_style = 'default'
def parse(t):
string_ = str(t)
try:
return datetime.date(int(string_[:4]), int(string[4:6]), int(string[6:]))
except:
print "Erro",len(string_)
return datetime.date(1900,1,1)
def readAllFiles(dirname):
allFiles = glob.glob(dirname + "/atp_rankings_" + "*.csv")
ranks = pd.DataFrame()
list_ = list()
for filen in allFiles:
print filen
df = pd.read_csv(filen,
index_col=None,
header=None,
parse_dates=[0],
date_parser=lambda t:parse(t))
list_.append(df)
ranks = pd.concat(list_)
return ranks
def readPlayers(dirname):
print ("Reading Players")
return pd.read_csv(dirname+"/atp_players.csv",
index_col=None,
header=None,
parse_dates=[4],
date_parser=lambda t:parse(t))
ranks = readAllFiles(sys.argv[1])
ranks = ranks[(ranks[1]<100)]
print ranks
players = readPlayers (sys.argv[1])
plRanks = ranks.merge(players,right_on=0,left_on=2)
plRanks["B"] = plRanks["0_x"] - plRanks[4]
plRanks["B"] = plRanks["B"].astype(int) / (365*24*3600*1000000000.0)
agg = plRanks[["0_x","B"]].groupby("0_x")
agg.mean().to_csv("top100ages.csv")
| gpl-3.0 |
alexandrebarachant/mne-python | examples/forward/plot_forward_sensitivity_maps.py | 12 | 2425 | """
================================================
Display sensitivity maps for EEG and MEG sensors
================================================
Sensitivity maps can be produced from forward operators that
indicate how well different sensor types will be able to detect
neural currents from different regions of the brain.
To get started with forward modeling see ref:`tut_forward`.
"""
# Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
subjects_dir = data_path + '/subjects'
# Read the forward solutions with surface orientation
fwd = mne.read_forward_solution(fwd_fname, surf_ori=True)
leadfield = fwd['sol']['data']
print("Leadfield size : %d x %d" % leadfield.shape)
###############################################################################
# Compute sensitivity maps
grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
###############################################################################
# Show gain matrix a.k.a. leadfield matrix with sensitivity map
picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
cmap='RdBu_r')
ax.set_title(ch_type.upper())
ax.set_xlabel('sources')
ax.set_ylabel('sensors')
plt.colorbar(im, ax=ax, cmap='RdBu_r')
plt.show()
plt.figure()
plt.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
color=['c', 'b', 'k'])
plt.legend()
plt.title('Normal orientation sensitivity')
plt.xlabel('sensitivity')
plt.ylabel('count')
plt.show()
grad_map.plot(time_label='Gradiometer sensitivity', subjects_dir=subjects_dir,
clim=dict(lims=[0, 50, 100]))
| bsd-3-clause |
OpnSrcConstruction/OSCbashRCs | .ipython/profile_default/ipython_config.py | 2 | 23357 | # Configuration file for ipython.
#------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
#------------------------------------------------------------------------------
## A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
## Execute the given command string.
#c.InteractiveShellApp.code_to_run = ''
## Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
#c.InteractiveShellApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
#c.InteractiveShellApp.exec_files = []
## lines of code to run at IPython startup.
#c.InteractiveShellApp.exec_lines = []
## A list of dotted module names of IPython extensions to load.
#c.InteractiveShellApp.extensions = []
## dotted module name of an IPython extension to load.
#c.InteractiveShellApp.extra_extension = ''
## A file to be run
#c.InteractiveShellApp.file_to_run = ''
## Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk2', 'gtk3',
# 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4').
#c.InteractiveShellApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
#c.InteractiveShellApp.hide_initial_ns = True
## Configure matplotlib for interactive use with the default matplotlib backend.
#c.InteractiveShellApp.matplotlib = None
## Run the module as a script.
#c.InteractiveShellApp.module_to_run = ''
## Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
#c.InteractiveShellApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
#c.InteractiveShellApp.pylab_import_all = True
## Reraise exceptions encountered loading IPython extensions?
#c.InteractiveShellApp.reraise_ipython_extension_failures = False
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
#------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## Whether to create profile dir if it doesn't exist
#c.BaseIPythonApplication.auto_create = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
#c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
#c.BaseIPythonApplication.extra_config_file = ''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
#c.BaseIPythonApplication.ipython_dir = ''
## Whether to overwrite existing config files when copying
#c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
#c.BaseIPythonApplication.profile = 'default'
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
#c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication,InteractiveShellApp) configuration
#------------------------------------------------------------------------------
## Whether to display a banner upon starting IPython.
#c.TerminalIPythonApp.display_banner = True
## If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
#c.TerminalIPythonApp.force_interact = False
## Class to use to instantiate the TerminalInteractiveShell object. Useful for
# custom Frontends
#c.TerminalIPythonApp.interactive_shell_class = 'IPython.terminal.interactiveshell.TerminalInteractiveShell'
## Start IPython quickly by skipping the loading of config files.
#c.TerminalIPythonApp.quick = False
#------------------------------------------------------------------------------
# InteractiveShell(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## An enhanced, interactive shell for Python.
## 'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying which
# nodes should be run interactively (displaying output from expressions).
#c.InteractiveShell.ast_node_interactivity = 'last_expr'
## A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
#c.InteractiveShell.ast_transformers = []
## Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
#c.InteractiveShell.autocall = 0
## Autoindent IPython code entered interactively.
#c.InteractiveShell.autoindent = True
## Enable magic commands to be called without the leading %.
#c.InteractiveShell.automagic = True
## The part of the banner to be printed before the profile
#c.InteractiveShell.banner1 = "Python 3.5.2 (default, Nov 23 2017, 16:37:01) \nType 'copyright', 'credits' or 'license' for more information\nIPython 6.2.1 -- An enhanced Interactive Python. Type '?' for help.\n"
## The part of the banner to be printed after the profile
#c.InteractiveShell.banner2 = ''
## Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 3 (if you provide a value
# less than 3, it is reset to 0 and a warning is issued). This limit is defined
# because otherwise you'll spend more time re-flushing a too small cache than
# working
#c.InteractiveShell.cache_size = 1000
## Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
#c.InteractiveShell.color_info = True
## Set the color scheme (NoColor, Neutral, Linux, or LightBG).
#c.InteractiveShell.colors = 'Neutral'
##
#c.InteractiveShell.debug = False
## Don't call post-execute functions that have failed in the past.
#c.InteractiveShell.disable_failing_post_execute = False
## If True, anything that would be passed to the pager will be displayed as
# regular output instead.
#c.InteractiveShell.display_page = False
## (Provisional API) enables html representation in mime bundles sent to pagers.
#c.InteractiveShell.enable_html_pager = False
## Total length of command history
#c.InteractiveShell.history_length = 10000
## The number of saved history entries to be loaded into the history buffer at
# startup.
#c.InteractiveShell.history_load_length = 1000
##
#c.InteractiveShell.ipython_dir = ''
## Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
#c.InteractiveShell.logappend = ''
## The name of the logfile to use.
#c.InteractiveShell.logfile = ''
## Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
#c.InteractiveShell.logstart = False
##
#c.InteractiveShell.object_info_string_level = 0
## Automatically call the pdb debugger after every exception.
#c.InteractiveShell.pdb = False
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in1 = 'In [\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in2 = ' .\\D.: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_out = 'Out[\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompts_pad_left = True
##
#c.InteractiveShell.quiet = False
##
#c.InteractiveShell.separate_in = '\n'
##
#c.InteractiveShell.separate_out = ''
##
#c.InteractiveShell.separate_out2 = ''
## Show rewritten input, e.g. for autocall.
#c.InteractiveShell.show_rewritten_input = True
## Enables rich html representation of docstrings. (This requires the docrepr
# module).
#c.InteractiveShell.sphinxify_docstring = False
##
#c.InteractiveShell.wildcards_case_sensitive = True
## Switch modes for the IPython exception handlers.
#c.InteractiveShell.xmode = 'Context'
#------------------------------------------------------------------------------
# TerminalInteractiveShell(InteractiveShell) configuration
#------------------------------------------------------------------------------
## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
#c.TerminalInteractiveShell.confirm_exit = True
## Options for displaying tab completions, 'column', 'multicolumn', and
# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit`
# documentation for more information.
#c.TerminalInteractiveShell.display_completions = 'multicolumn'
## Shortcut style to use at the prompt. 'vi' or 'emacs'.
#c.TerminalInteractiveShell.editing_mode = 'emacs'
## Set the editor used by IPython (default to $EDITOR/vi/notepad).
#c.TerminalInteractiveShell.editor = 'nano'
## Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is
# in addition to the F2 binding, which is always enabled.
#c.TerminalInteractiveShell.extra_open_editor_shortcuts = False
## Provide an alternative handler to be called when the user presses Return. This
# is an advanced option intended for debugging, which may be changed or removed
# in later releases.
#c.TerminalInteractiveShell.handle_return = None
## Highlight matching brackets.
#c.TerminalInteractiveShell.highlight_matching_brackets = True
## The name or class of a Pygments style to use for syntax
# highlighting:
# autumn, native, fruity, pastie, monokai, paraiso-light, friendly, emacs, vim, algol, bw, rrt, rainbow_dash, colorful, igor, perldoc, manni, algol_nu, xcode, borland, default, murphy, paraiso-dark, vs, arduino, abap, trac, tango, lovelace
#c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined
## Override highlighting format for specific tokens
#c.TerminalInteractiveShell.highlighting_style_overrides = {}
## Enable mouse support in the prompt (Note: prevents selecting text with the
# mouse)
#c.TerminalInteractiveShell.mouse_support = False
## Class used to generate Prompt token for prompt_toolkit
#c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
## Use `raw_input` for the REPL, without completion and prompt colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
#c.TerminalInteractiveShell.simple_prompt = False
## Number of line at the bottom of the screen to reserve for the completion menu
#c.TerminalInteractiveShell.space_for_menu = 6
## Automatically set the terminal title
#c.TerminalInteractiveShell.term_title = True
## Customize the terminal title format. This is a python format string.
# Available substitutions are: {cwd}.
#c.TerminalInteractiveShell.term_title_format = 'IPython: {cwd}'
## Use 24bit colors instead of 256 colors in prompt highlighting. If your
# terminal supports true color, the following command should print 'TRUECOLOR'
# in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n"
#c.TerminalInteractiveShell.true_color = False
#------------------------------------------------------------------------------
# HistoryAccessor(HistoryAccessorBase) configuration
#------------------------------------------------------------------------------
## Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
## Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
#c.HistoryAccessor.connection_options = {}
## enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
#c.HistoryAccessor.enabled = True
## Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
#c.HistoryAccessor.hist_file = ''
#------------------------------------------------------------------------------
# HistoryManager(HistoryAccessor) configuration
#------------------------------------------------------------------------------
## A class to organize all history-related functionality in one place.
## Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
#c.HistoryManager.db_cache_size = 0
## Should the history database include output? (default: no)
#c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# ProfileDir(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
## Set the profile location directly. This overrides the logic used by the
# `profile` option.
#c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# BaseFormatter(Configurable) configuration
#------------------------------------------------------------------------------
## A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
##
#c.BaseFormatter.deferred_printers = {}
##
#c.BaseFormatter.enabled = True
##
#c.BaseFormatter.singleton_printers = {}
##
#c.BaseFormatter.type_printers = {}
#------------------------------------------------------------------------------
# PlainTextFormatter(BaseFormatter) configuration
#------------------------------------------------------------------------------
## The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
##
#c.PlainTextFormatter.float_precision = ''
## Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
#c.PlainTextFormatter.max_seq_length = 1000
##
#c.PlainTextFormatter.max_width = 79
##
#c.PlainTextFormatter.newline = '\n'
##
#c.PlainTextFormatter.pprint = True
##
#c.PlainTextFormatter.verbose = False
#------------------------------------------------------------------------------
# Completer(Configurable) configuration
#------------------------------------------------------------------------------
## Enable unicode completions, e.g. \alpha<tab> . Includes completion of latex
# commands, unicode names, and expanding unicode characters back to latex
# commands.
#c.Completer.backslash_combining_completions = True
## Enable debug for the Completer. Mostly print extra information for
# experimental jedi integration.
#c.Completer.debug = False
## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
#c.Completer.greedy = False
## Experimental: restrict time (in milliseconds) during which Jedi can compute
# types. Set to 0 to stop computing types. Non-zero value lower than 100ms may
# hurt performance by preventing jedi to build its cache.
#c.Completer.jedi_compute_type_timeout = 400
## Experimental: Use Jedi to generate autocompletions. Default to True if jedi is
# installed
#c.Completer.use_jedi = True
#------------------------------------------------------------------------------
# IPCompleter(Completer) configuration
#------------------------------------------------------------------------------
## Extension of the completer class with IPython-specific features
## DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
#c.IPCompleter.limit_to__all__ = False
## Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
#c.IPCompleter.merge_completions = True
## Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
#c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# ScriptMagics(Magics) configuration
#------------------------------------------------------------------------------
## Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
## Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
#c.ScriptMagics.script_magics = []
## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
#c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# LoggingMagics(Magics) configuration
#------------------------------------------------------------------------------
## Magics related to all logging machinery.
## Suppress output of log state when logging is enabled
#c.LoggingMagics.quiet = False
#------------------------------------------------------------------------------
# StoreMagics(Magics) configuration
#------------------------------------------------------------------------------
## Lightweight persistence for python variables.
#
# Provides the %store magic.
## If True, any %store-d variables will be automatically restored when IPython
# starts.
#c.StoreMagics.autorestore = False
| unlicense |
ebressert/ScipyNumpy_book_examples | python_examples/scikits_412_ex2.py | 2 | 1271 | import numpy as np
import pyfits
import skimage.morphology as morph
import skimage.exposure as skie
import matplotlib.pyplot as plt
# Loading astronomy image from an infrared space telescope
img = pyfits.getdata('stellar_cluster.fits')[500:1500, 500:1500]
# Prep file scikit-image environment and plotting
limg = np.arcsinh(img)
limg = limg / limg.max()
low = np.percentile(limg, 0.25)
high = np.percentile(limg, 99.5)
opt_img = skie.exposure.rescale_intensity(limg, in_range=(low, high))
# Calculating local maxima and filtering out noise
lm = morph.is_local_maximum(limg)
x1, y1 = np.where(lm.T == True)
v = limg[(y1, x1)]
lim = 0.5
x2, y2 = x1[v > lim], y1[v > lim]
# Creating figure to show local maximum detection
# rate success
fig = plt.figure(figsize=(8, 4))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
ax1 = fig.add_subplot(121)
ax1.imshow(opt_img)
ax1.set_xlim(0, img.shape[1])
ax1.set_ylim(0, img.shape[0])
ax1.xaxis.set_visible(False)
ax1.yaxis.set_visible(False)
ax2 = fig.add_subplot(122)
ax2.imshow(opt_img)
ax2.scatter(x2, y2, s=80, facecolor='none', edgecolor='#FF7400')
ax2.set_xlim(0, img.shape[1])
ax2.set_ylim(0, img.shape[0])
ax2.xaxis.set_visible(False)
ax2.yaxis.set_visible(False)
fig.savefig('scikits_412_ex2.pdf', bbox_inches='tight')
| mit |
sly-ninja/python_for_ml | Module5/assignment2.py | 1 | 5895 | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot') # Look Pretty
def showandtell(title=None):
if title != None: plt.savefig(title + ".png", bbox_inches='tight', dpi=300)
plt.show()
# exit()
#
# INFO: This dataset has call records for 10 users tracked over the course of 3 years.
# Your job is to find out where the users likely live and work at!
#
# TODO: Load up the dataset and take a peek at its head
# Convert the date using pd.to_datetime, and the time using pd.to_timedelta
#
df = pd.read_csv('Datasets/CDR.csv')
df['CallDate'] = pd.to_datetime( df['CallDate'] )
df['CallTime'] = pd.to_timedelta( df['CallTime'])
#
# TODO: Get a distinct list of "In" phone numbers (users) and store the values in a
# regular python list.
# Hint: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.tolist.html
#
import numpy as np
u = np.unique(df['In'])
unique_nums = u.tolist()
#
# TODO: Create a slice called user1 that filters to only include dataset records where the
# "In" feature (user phone number) is equal to the first number on your unique list above;
# that is, the very first number in the dataset
#
user1 = df[df['In'] == unique_nums[0]]
# INFO: Plot all the call locations
user1.plot.scatter(x='TowerLon', y='TowerLat', c='gray', marker='o', alpha=0.1, title='Call Locations')
# showandtell() # Comment this line out when you're ready to proceed
#
# INFO: The locations map above should be too "busy" to really wrap your head around. This
# is where domain expertise comes into play. Your intuition tells you that people are likely
# to behave differently on weekends:
#
# On Weekdays:
# 1. People probably don't go into work
# 2. They probably sleep in late on Saturday
# 3. They probably run a bunch of random errands, since they couldn't during the week
# 4. They should be home, at least during the very late hours, e.g. 1-4 AM
#
# On Weekdays:
# 1. People probably are at work during normal working hours
# 2. They probably are at home in the early morning and during the late night
# 3. They probably spend time commuting between work and home everyday
#
# TODO: Add more filters to the user1 slice you created. Add bitwise logic so that you're
# only examining records that came in on weekends (sat/sun).
#
user1 = user1[(user1['DOW'] == 'Sat') | (user1['DOW'] == 'Sun')]
#
# TODO: Further filter it down for calls that are came in either before 6AM OR after 10pm (22:00:00).
# You can use < and > to compare the string times, just make sure you code them as military time
# strings, eg: "06:00:00", "22:00:00": https://en.wikipedia.org/wiki/24-hour_clock
#
# You might also want to review the Data Manipulation section for this. Once you have your filtered
# slice, print out its length:
#
user1a = user1[('06:00:00' > user1['CallTime']) | (user1['CallTime'] > '22:00:00')]
#
# INFO: Visualize the dataframe with a scatter plot as a sanity check. Since you're familiar
# with maps, you know well that your X-Coordinate should be Longitude, and your Y coordinate
# should be the tower Latitude. Check the dataset headers for proper column feature names.
# https://en.wikipedia.org/wiki/Geographic_coordinate_system#Geographic_latitude_and_longitude
#
# At this point, you don't yet know exactly where the user is located just based off the cell
# phone tower position data; but considering the below are for Calls that arrived in the twilight
# hours of weekends, it's likely that wherever they are bunched up is probably near where the
# caller's residence:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(user1a.TowerLon, user1a.TowerLat, c='g', marker='o', alpha=0.2)
# user1.plot.scatter(user1.TowerLon, user1.TowerLat, c='gray', alpha=0.1, title='Weekend Twilight Calls')
# showandtell() # TODO: Comment this line out when you're ready to proceed
#
# TODO: Run K-Means with a K=1. There really should only be a single area of concentration. If you
# notice multiple areas that are "hot" (multiple areas the usr spends a lot of time at that are FAR
# apart from one another), then increase K=2, with the goal being that one of the centroids will
# sweep up the annoying outliers; and the other will zero in on the user's approximate home location.
# Or rather the location of the cell tower closest to their home.....
#
# Be sure to only feed in Lat and Lon coordinates to the KMeans algo, since none of the other
# data is suitable for your purposes. Since both Lat and Lon are (approximately) on the same scale,
# no feature scaling is required. Print out the centroid locations and add them onto your scatter
# plot. Use a distinguishable marker and color.
#
# Hint: Make sure you graph the CORRECT coordinates. This is part of your domain expertise.
#
from sklearn.cluster import KMeans
user1b = user1a[['TowerLon', 'TowerLat']]
model = KMeans(n_clusters = 7)
model.fit(user1b)
#
# INFO: Print and plot the centroids...
centroids = model.cluster_centers_
ax.scatter(centroids[:,0], centroids[:,1], marker='x', c='blue', alpha=0.5, linewidths=3, s=169)
print('centroids:', centroids)
# showandtell() # TODO: Comment this line out when you're ready to proceed
#
# TODO: Repeat the above steps for all 10 individuals, being sure to record their approximate home
# locations. You might want to use a for-loop, unless you enjoy typing.
#
for index,item in enumerate(unique_nums):
user = df[df['In'] == unique_nums[index]]
user = user[('06:00:00' > user['CallTime']) | (user['CallTime'] > '22:00:00')]
user = user[(user['DOW'] == 'Sat') | (user['DOW'] == 'Sun')]
user = user[['TowerLon', 'TowerLat']]
model = KMeans(n_clusters = 7)
model.fit(user)
centroids = model.cluster_centers_
ax.scatter(centroids[:,0], centroids[:,1], marker='x', c='blue', alpha=0.5, linewidths=3, s=169)
print(item, centroids)
| mit |
eclee25/flu-SDI-exploratory-age | scripts/OR_fluweeks.py | 1 | 3119 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 2/13/14
###Function: draw OR by week for flu weeks
###Import data:
###Command Line: python
##############################################
### notes ###
### packages/modules ###
import csv
import numpy as np
import matplotlib.pyplot as plt
import sys
## local modules ##
import ORgenerator as od
### data structures ###
# ilidict[(week, age marker)] = ILI
# wkdict[week] = seasonnum
ilidict, wkdict = {}, {} # unnecessary
# ORdict[week] = OR
# ARdict[week] = attack rate per 10000
ORdict, ARdict = {}, {}
### parameters ###
USchild = 20348657 + 20677194 + 22040343 #US child popn from 2010 Census
USadult = 21585999 + 21101849 + 19962099 + 20179642 + 20890964 + 22708591 + 22298125 + 19664805 #US adult popn from 2010 Census
seasons = range(1,11) #seasons for which ORs will be generated
### plotting settings ###
colorvec = ['grey', 'black', 'red', 'orange', 'gold', 'green', 'blue', 'cyan', 'darkviolet', 'hotpink']
labelvec = ['00-01', '01-02', '02-03', '03-04', '04-05', '05-06', '06-07', '07-08', '08-09', '09-10']
xlabels = range(40,54)
xlabels.extend(range(1,40))
### functions ###
### import data ###
datain=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks.csv','r')
data=csv.reader(datain, delimiter=',')
### program ###
# OR by week chart
# ilidict[(week, age marker)] = ILI
# wkdict[week] = seasonnum
# weeks = unique list of weeks for dataset
ilidict, wkdict, weeks = od.import_dwk(data, 0, 1, 2, 3)
ORdict, ARdict = od.ORgen_wk(ilidict, weeks)
for s in seasons:
# wkdummy will represent list of weeks for chart in season to use as key for OR dict
wkdummy = [key for key in sorted(weeks) if wkdict[key] == int(s)]
wkdummy = set(wkdummy)
if s == 1:
chartORs = [ORdict[wk] for wk in sorted(wkdummy)]
chartwks = xrange(13, 13 + len(sorted(wkdummy)))
print "season number and num weeks", s, len(wkdummy)
plt.plot(chartwks, chartORs, marker = 'o', color = colorvec[s-1], label = labelvec[s-1], linewidth = 2)
elif len(wkdummy) == 53:
# wkdummy needs to be sorted bc dict values don't have order
chartORs = [ORdict[wk] for wk in sorted(wkdummy)]
chartwks = xrange(len(sorted(wkdummy)))
print "season number and num weeks", s, len(wkdummy)
plt.plot(chartwks, chartORs, marker = 'o', color = colorvec[s-1], label = labelvec[s-1], linewidth = 2)
else:
chartORs = [ORdict[wk] for wk in sorted(wkdummy)]
avg53 = (chartORs[12] + chartORs[13])/2
chartORs.insert(13, avg53)
chartwks = xrange(len(sorted(wkdummy)) + 1)
print "season number and num weeks", s, len(wkdummy)
plt.plot(chartwks, chartORs, marker = 'o', color = colorvec[s-1], label = labelvec[s-1], linewidth = 2)
# plt.plot([33, 33], [0, 10], color = 'k', linewidth = 1)
plt.xlim([0, 35])
plt.ylim([0, 10])
plt.xlabel('Week Number', fontsize=24) # 12/1/13 increase size
plt.ylabel('OR, child:adult', fontsize=24)
# plt.ylabel('OR, US pop normalized', fontsize=24)
plt.legend(loc = 'upper left')
plt.xticks(xrange(1, 35), xlabels[:36])
plt.show()
| mit |
kwschultz/vq | PyVQ/pyvq/betas/quick_look.py | 2 | 10227 | import matplotlib
#
import numpy
import math
import pylab as plt
import h5py
import itertools
#
#plt.ion()
default_events = 'vq_output_hattonsenvy_3k/events_3000_d.h5'
events_2 = 'ca_model_hattonsenvy_105yrs_3km/events_3000.hdf5'
def quick_figs(vc_data_file=default_events, fnum_0=0, events_start=0, events_end=None, m0=7.0):
# make some quick figures for preliminary analysis.
with h5py.File(vc_data_file, 'r') as vc_data:
#
events = vc_data['events']
#
if events_start==None: events_start=0
if events_end==None: events_end=len(events)-1
events = events[events_start:events_end]
#
print "get magnitudes and then sort..."
mags = sorted(events['event_magnitude'].tolist())
#
print "get delta_ts..."
T=events['event_year']
#dts = [[t, t - f['events'][j]['event_year']] for j,t in enumerate(f['events']['event_year'])]
dts = [[t, t - T[j]] for j,t in enumerate(T[1:])]
#
print "... and bigmags "
big_mags = [[rw['event_year'], rw['event_magnitude']] for rw in events if rw['event_magnitude']>=m0]
big_mag_dts = [[rw[0], rw[0]-big_mags[j][0]] for j, rw in enumerate(big_mags[1:])]
#
print "Some summary stats:"
mean_dt_m0 = numpy.mean(zip(*big_mag_dts)[1])
std_dt_m0 = numpy.std(zip(*big_mag_dts)[1])
print "mean interval (N=%d) for m>%f: %f +/- %f" % (len(big_mags), m0, mean_dt_m0, std_dt_m0)
#
print "and now plot..."
#
figs=[]
figs+=[plt.figure(len(figs)+fnum_0)]
plt.clf()
#
# first: magnitude distributions
f=figs[-1]
ax = plt.gca()
ax.set_yscale('log')
#ax.plot(mags, reversed(xrange(1, len(mags)+1)), '.-')
ax.plot(*zip(*[[m,len(mags)-j] for j,m in enumerate(mags)]), color='b', marker='.', ls='-', zorder=4, label='Cumulative $N(>m)$')
# and the pdf...
dolog=True
ax.hist(mags,bins=200, range=[min(mags), max(mags)], log=dolog, histtype='step', label='Prob. Density')
plt.legend(loc=0, numpoints=1)
plt.title('Magnitudes')
#
# magnitudes PDF only.
'''
figs+=[plt.figure(len(figs)+fnum_0)]
f=figs[-1]
f.clf()
ax=f.gca()
dolog=True
ax.hist(mags,bins=200, range=[min(mags), max(mags)], log=dolog)
plt.title('Magnitudes (pdf)')
'''
#
# intervals, magnitudes time series:
figs+=[plt.figure(len(figs)+fnum_0)]
f=figs[-1]
f.clf()
ax=f.gca()
ldT = numpy.log10(zip(*dts)[1])
ax.set_yscale('log')
#ax.plot(T[1:], ldT, marker='.', ls='-', color='b', label='dt(t)')
ax.plot(T[1:], zip(*dts)[1], marker='.', ls='-', color='b', zorder=8, label='$dt(t)$')
ave_len = 100
print "plot mean intervals over %d intervals(%d events).(%d)" % (ave_len, ave_len+1, len(figs))
ax.plot(T[ave_len:], [(t-T[j])/float(ave_len) for j,t in enumerate(T[ave_len:])], color = 'c', lw=2,zorder=11, label='$<dt(t)>_{%d}$' % ave_len)
# set up dt range:
dts_sorted = sorted(zip(*dts)[1])
#
#print "dt_max at: %f (%d)" % (dt_max, int(.9*len(dts_sorted)))
ax.set_ylim(.9*min(zip(*dts)[1]), 1.1*max(zip(*dts)[1]))
ax.set_ylabel('Intervals $\\Delta t$')
#ax.draw()
ax_mags = ax.twinx()
#ax.vlines(*(zip(*big_mags)),[3.0 for x in big_mags], color='r')
ax_mags.vlines(*(zip(*big_mags)), ymax=[3.0 for x in big_mags], color='r', lw=1.25, zorder=2, label='m>%.2f' % m0)
ax_mags.vlines(T,[3.0 for m in mags], events['event_magnitude'], color='g', zorder=3, label='magnitudes')
ax_mags.set_ylim(2.0, 9.5)
ax_mags.set_ylabel('magnitude')
plt.legend(loc=0, numpoints=1)
#
# big-mag intervals:
# big_mag_dts
print "... big-mag time-series:"
figs+=[plt.figure(len(figs)+fnum_0)]
f=figs[-1]
f.clf()
ax=f.gca()
ax.set_yscale('log')
ax.set_ylabel('interval $\\Delta t_{m%.2f}$' % m0)
ax.plot(zip(*big_mag_dts)[0], zip(*big_mag_dts)[1], 'g.-', zorder=7, lw=1.5, label='$m>%.2f intervals')
ax_mags = ax.twinx()
ax_mags.vlines(*(zip(*big_mags)), ymax=[3.0 for x in big_mags], color='m', lw=1, zorder=1, label='m>%.2f' % m0, alpha=.5)
#plt.legend(loc=0, numpoints=1)
plt.title('big-mag and intervals')
#
# interval distributions:
#
figs+=[plt.figure(len(figs)+fnum_0)]
f=figs[-1]
f.clf()
ax=f.gca()
dolog=True
normed = False
X = numpy.log10(dts_sorted)
ax.hist(X, bins=200, range=[min(X), max(X)], log=dolog, histtype='stepfilled', normed=normed)
h_cum = ax.hist(X, bins=200, range=[min(X), max(X)], log=dolog, histtype='step', cumulative=True, normed=normed)
N = float(len(X))
if normed: N=1.0
ax.plot([.5*(x+h_cum[1][j]) for j,x in enumerate(h_cum[1][1:])], [N-x for x in h_cum[0]], 'c-')
#ax.plot([x for j,x in enumerate(h_cum[1][:-1])], h_cum[0], 'c-')
plt.title('intervals distribuiton (hist)')
plt.xlabel('log intervals $\\log \left( \\Delta t \\right)$')
plt.ylabel('N(dt)')
return h_cum
#
#def plot_recurrence(
class Sweep(object):
def __init__(self, event_number=0, vc_data_file=default_events, block_id=None):
self.sweep_sequences=sweep_sequence(event_number=event_number, block_id=block_id, vc_data_file=vc_data_file)
self.shear_stress_sequences = shear_stress_sequence(sweepses=self.sweep_sequences, do_print=False)
#
b_id_list = self.sweep_sequences['block_id'].tolist()
self.block_ids = {x:b_id_list.count(x) for x in b_id_list}
#self.block_ids = list(set(self.sweep_sequences['block_id'].tolist()))
#
# we could also, at this point, parse out the individual block sequences, maybe make a class Block().
#
def plot_slips(self, block_ids=None, fignum=0):
#if block_ids==None: block_ids=self.block_ids.keys()
#if isinstance(block_ids, float): block_ids=[int(block_ids)]
#if isinstance(block_ids, int): block_ids = [block_ids]
if block_ids==None: block_ids = self.check_block_ids_list(block_ids)
#
plt.figure(fignum)
plt.clf()
#
for block_id in block_ids:
rws = numpy.core.records.fromarrays(zip(*filter(lambda x: x['block_id']==block_id, self.shear_stress_sequences)), dtype=self.shear_stress_sequences.dtype)
plt.plot(rws['sweep_number'], rws['block_slip'], '.-', label='block_id: %d' % block_id)
plt.legend(loc=0, numpoints=1)
plt.title('Block slip sequences')
plt.xlabel('sweep number')
plt.ylabel('slip')
#
def plot_stress_drop(self, block_ids=None, fignum=0):
block_ids = self.check_block_ids_list(block_ids)
#
plt.figure(fignum)
plt.clf()
#
for block_id in block_ids:
rws = numpy.core.records.fromarrays(zip(*filter(lambda x: x['block_id']==block_id, self.shear_stress_sequences)), dtype=self.shear_stress_sequences.dtype)
plt.plot(rws['sweep_number'], rws['shear_diff'], '.-', label='block_id: %d' % block_id)
plt.plot([min(self.shear_stress_sequences['sweep_number']), max(self.shear_stress_sequences['sweep_number'])], [0., 0.], 'k-')
plt.legend(loc=0, numpoints=1)
plt.title('Block shear_stress drop sequences')
plt.xlabel('sweep number')
plt.ylabel('shear stress drop')
#
def plot_stress(self, block_ids=None, fignum=0):
block_ids = self.check_block_ids_list(block_ids)
#
plt.figure(fignum)
ax1=plt.gca()
plt.clf()
plt.figure(fignum)
plt.clf()
ax0=plt.gca()
#
for block_id in block_ids:
rws = numpy.core.records.fromarrays(zip(*filter(lambda x: x['block_id']==block_id, self.shear_stress_sequences)), dtype=self.shear_stress_sequences.dtype)
stress_seq = []
for rw in rws:
stress_seq += [[rw['sweep_number'], rw['shear_init']]]
stress_seq += [[rw['sweep_number'], rw['shear_final']]]
X,Y = zip(*stress_seq)
#
ax0.plot(X,Y, '.-', label='block_id: %d' % block_id)
#
plt.figure(fignum+1)
plt.plot(rws['sweep_number'], rws['shear_init'], '.-', label='block_id: %d' % block_id)
plt.plot(rws['sweep_number'], rws['shear_final'], '.-', label='block_id: %d' % block_id)
plt.figure(fignum)
ax0.plot([min(self.shear_stress_sequences['sweep_number']), max(self.shear_stress_sequences['sweep_number'])], [0., 0.], 'k-')
ax0.legend(loc=0, numpoints=1)
plt.figure(fignum)
plt.title('Block shear_stress sequences')
plt.xlabel('sweep number')
plt.ylabel('shear stress')
#
def check_block_ids_list(self, block_ids):
if block_ids==None: block_ids=self.block_ids.keys()
if isinstance(block_ids, float): block_ids=[int(block_ids)]
if isinstance(block_ids, int): block_ids = [block_ids]
#
return block_ids
#
def shear_stress_sequence(block_id=None, event_number=0, vc_data_file=default_events, do_print=True, sweepses=None):
if sweepses==None: sweepses = sweep_sequence(block_id=block_id, event_number=event_number, vc_data_file=vc_data_file)
#
outsies = [[rw['sweep_number'], rw['block_id'], rw['block_slip'], rw['shear_init'], rw['shear_final'], rw['shear_init']-rw['shear_final'], (rw['shear_init']-rw['shear_final'])/rw['shear_final']] for rw in sweepses]
#
if do_print:
for rw in outsies: print rw
#
cols = ['sweep_number', 'block_id', 'block_slip', 'shear_init', 'shear_final', 'shear_diff', 'shear_diff_norm']
#outsies = numpy.core.records.fromarrays(zip(*outsies), names=cols, formats = [type(x).__name__ for x in outsies[0]])
#return outsies
return numpy.core.records.fromarrays(zip(*outsies), names=cols, formats = [type(x).__name__ for x in outsies[0]])
#
def sweep_sequence(event_number=0, block_id=None, vc_data_file=default_events):
# sweep sequence for a single block in a single event.
#
with h5py.File(vc_data_file) as vc_data:
sweep_range = [vc_data['events'][event_number]['start_sweep_rec'], vc_data['events'][event_number]['end_sweep_rec']]
sweeps = vc_data['sweeps'][sweep_range[0]:sweep_range[1]][()]
#
# so we could filter out all the blocks != block_id, but let's just assume that we (might) want all the blocks (for default None value).
#if block_id==None or block_id not in (sweeps['block_id']): block_id=sweeps['block_id'][0]
if block_id!=None:
d_type = sweeps.dtype
#sweeps = filter(lambda x: x['block_id']==block_id, sweeps)
sweeps = numpy.core.records.fromarrays(zip(*filter(lambda x: x['block_id']==block_id, sweeps)), dtype=d_type)
#
return sweeps
def get_h5_col(col_name, vc_data_file=default_events):
#
if isinstance(col_name, str): col_name=[col_name]
if col_name[0] not in ('events', 'sweeps'): col_name.insert(0,'events')
#
with h5py.File(vc_data_file) as vc_data:
vc1 = vc_data[col_name[0]]
#
col = vc_data
for cl in col_name:
#
col=col[cl]
#
#
#
return col
| mit |
mapazarr/astropy_scripts | astropy_scripts/test_multiplots/use_plot_something.py | 1 | 2830 | import numpy as np
import matplotlib.pyplot as plt
from plot_something import plot_something
GRAPH_DEBUG = 0
# example 1: 2 plots in the same axis
# x coord
x = np.arange(0, 10, 0.1)
# 1st plot
y = np.sin(x)
ax = plot_something(x, y, style_kwargs=dict(color='blue', label='sin'))
# 2nd plot on top (like Draw("same") in ROOT)
y2 = np.cos(x)
ax = plot_something(x, y2, ax=ax, style_kwargs=dict(color='red', label='cos'))
# legend
ax.legend()
plt.draw()
if GRAPH_DEBUG:
plt.show() # wait until image is closed
# example 2: 2 plots in 2 pads of the same canvas
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(16., 8., forward=True)
axes[0] = plot_something(x, y, ax=axes[0], style_kwargs=dict(color='blue', label='sin'))
axes[0].legend()
axes[1] = plot_something(x, y2, ax=axes[1], style_kwargs=dict(color='red', label='cos'))
axes[1].legend()
plt.draw()
if GRAPH_DEBUG:
plt.show() # wait until image is closed
# examples using Cube class plots
from astropy.units import Quantity
from astropy.coordinates import Angle
from gammapy.background import Cube
from gammapy import datasets
filename = '../test_datasets/background/bg_cube_model_test.fits'
filename = datasets.get_path(filename, location='remote')
bg_cube_model = Cube.read(filename, format='table', scheme='bg_cube')
# example 3: same as example 2, but using plot_image function
# from Cube class
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(16., 8., forward=True)
axes[0] = bg_cube_model.plot_image(energy=Quantity(2., 'TeV'), ax=axes[0])
axes[1] = bg_cube_model.plot_image(energy=Quantity(20., 'TeV'), ax=axes[1])
plt.draw()
if GRAPH_DEBUG:
plt.show() # wait until image is closed
# example 4: same as example 1, but using plot_spectrum function
# from Cube class
fig = plt.figure()
ax = fig.add_subplot(111)
fig.set_size_inches(8., 8., forward=True)
ax = bg_cube_model.plot_spectrum(coord=Angle([0., 0.], 'degree'),
ax=ax,
style_kwargs=dict(color='blue',
label='(0, 0) deg'))
ax = bg_cube_model.plot_spectrum(coord=Angle([2., 2.], 'degree'),
ax=ax,
style_kwargs=dict(color='red',
label='(2, 2) deg'))
# update plot title
#ax.set_title('Multiple Spectra')
ax.set_title('')
# legend
ax.legend()
plt.draw()
if GRAPH_DEBUG:
plt.show() # wait until image is closed
# example 5: plot all images (i.e. one image per energy slice) in bg cube
# see ../gammapy_bg_cube_models_utilities.py plot_examples
# example 6: plot all spectra (i.e. one spectrum per det (X, Y) bin) in bg cube
# see ../gammapy_bg_cube_models_utilities.py plot_examples
plt.show() #don't quit at the end
| gpl-2.0 |
larsjbro/FYS4150 | project_5/source/tridiagonal_solvers.py | 1 | 7739 | '''
Created on 8. des. 2017
@author: ljb
'''
from __future__ import division, absolute_import
from numba import jit, float64, void
import numpy as np
import matplotlib.pyplot as plt
import timeit
import scipy.linalg as linalg
from fys4150.project5.figure_saver import my_savefig
def fun(x):
return 100 * np.exp(-10 * x)
def u(x):
return 1 - (1 - np.exp(-10)) * x - np.exp(-10 * x)
@jit(void(float64[:], float64[:], float64[:], float64[:], float64[:], float64[:]))
def conda_tridiagonal_solve(x, temp, a, b, c, f):
'''
Solving a tridiagonal matrix equation [a, b, c]*x = f
'''
n = len(f)
# decomposition and forward substitution
btemp = b[0]
x[0] = f[0] / btemp
for i in xrange(1, n):
temp[i] = c[i - 1] / btemp
btemp = b[i] - a[i - 1] * temp[i]
x[i] = (f[i] - a[i - 1] * x[i - 1]) / btemp
# backwards substitution
for i in xrange(n - 2, -1, -1):
x[i] -= temp[i + 1] * x[i + 1]
def tridiagonal_solve(a, b, c, f):
'''
Solving a tridiagonal matrix equation [a, b, c]*x = f
'''
a, b, c, f = np.atleast_1d(a, b, c, f)
n = len(f)
x = np.zeros(n)
temp = np.zeros(n)
conda_tridiagonal_solve(x, temp, a, b, c, f)
return x
def tridiagonal_solve_periodic(a, b, c, f):
'''
Solving a periodic tridiagonal matrix equation
A * x = f
where
diag(A) = [b0, b1, b2, ...., bn-1]
diag(A, -1) = [a0, a1, a2, ...., an-2]
diag(A, 1) = [c0, c1, c2, ...., cn-2]
A[0, n-1] = an-1 (periodic boundary conditions)
A[n-1, 0] = cn-1 (periodic boundary conditions)
Reference
---------
https://www.cfd-online.com/Wiki/Tridiagonal_matrix_algorithm_-_TDMA_(Thomas_algorithm)
'''
a, b, c, f = np.atleast_1d(a, b, c, f)
n = len(f)
y = np.zeros(n)
q = np.zeros(n)
temp = np.zeros(n)
b1 = np.copy(b)
b0 = b1[0]
cn1 = c[n - 1]
an1 = a[n - 1]
b1[0] = b0 * 2
b1[n - 1] += cn1 * an1 / b0
u = np.zeros(n)
u[0] = -b0
u[-1] = cn1
v = np.zeros(n)
v[0] = 1
v[-1] = - an1 / b0
conda_tridiagonal_solve(y, temp, a, b1, c, f)
conda_tridiagonal_solve(q, temp, a, b1, c, u)
numerator = np.dot(v, y)
denominator = (1 + np.dot(v, q))
if denominator==0:
scale=1.49
else:
scale = numerator / denominator
return y - q * scale
def tridiagonal_solve_specific_periodic(f):
'''
Solving a periodic tridiagonal matrix equation
A * x = f
where
diag(A) = [2,2,2, ...., 2]
diag(A, -1) = diag(A, 1) =[-1, -1, -1, ...., -1]
A[0, n-1] = A[n-1, 0] = -1 (periodic boundary conditions)
Reference
---------
https://www.cfd-online.com/Wiki/Tridiagonal_matrix_algorithm_-_TDMA_(Thomas_algorithm)
'''
f = np.atleast_1d(f)
n = len(f)
a = -np.ones(n)
b = 2 * np.ones(n)
c = -np.ones(n)
return tridiagonal_solve_periodic(a, b, c, f)
def tridiagonal_solve_specific(b):
'''
Solving a tridiagonal matrix equation [-1, 2, -1]*x = b
'''
b = np.atleast_1d(b)
n = len(b)
x = np.zeros(n)
temp = np.zeros(n)
conda_tridiagonal_solve_spesific(x, temp, b)
return x
@jit(void(float64[:], float64[:], float64[:]))
def conda_tridiagonal_solve_spesific(x, temp, b):
'''
Solving a tridiagonal matrix equation [-1, 2, -1]*x = b
'''
n = len(b)
# decomposition and forward substitution
btemp = 2.0
x[0] = b[0] / btemp
for i in xrange(1, n):
temp[i] = -1.0 / btemp
btemp = 2.0 + temp[i]
x[i] = (b[i] + x[i - 1]) / btemp
# backwards substitution
for i in xrange(n - 2, -1, -1):
x[i] -= temp[i + 1] * x[i + 1]
def cpu_time(repetition=10, n=10**6):
'''
Grid n =10^6 and two repetitions gave an average of 7.62825565887 seconds.
'''
t = timeit.timeit('solve_poisson_general({})'.format(n),
setup='from __main__ import solve_poisson_general',
number=repetition) / repetition
print(t)
return t
def cpu_time_specific(repetition=10, n=10**6):
'''
Grid n =10^6 and two repetitions gave an average of 7.512299363 seconds.
'''
t = timeit.timeit('solve_poisson_specific({})'.format(n),
setup='from __main__ import solve_poisson_specific',
number=repetition) / repetition
print(t)
return t
def cpu_time_lu_solve(repetition=10, n=10):
'''
Grid n =10^6 and two repetitions gave an average of 7.512299363 seconds.
'''
t = timeit.timeit('solve_poisson_with_lu({})'.format(n),
setup='from __main__ import solve_poisson_with_lu',
number=repetition) / repetition
print(t)
return t
def solve_poisson_general(n):
h = 1.0 / (n + 1) # step_length
x = np.arange(1, n + 1) * h
fx = fun(x)
a = -np.ones(n)
b = 2 * np.ones(n)
c = -np.ones(n)
v = tridiagonal_solve(a, b, c, fx * h ** 2)
return x, v
def solve_poisson_specific(n):
h = 1.0 / (n + 1) # step_length
x = np.arange(1, n + 1) * h
fx = fun(x)
v = tridiagonal_solve_specific(fx * h ** 2)
return x, v
def solve_specific_periodic_with_lu(f):
f = np.atleast_1d(f)
n = len(f)
a = -np.ones(n - 1)
b = 2 * np.ones(n)
c = -np.ones(n - 1)
A = np.diag(a, -1) + np.diag(b, 0) + np.diag(c, 1)
A[0, -1] -= 1
A[-1, 0] -= 1
v = lu_solve(A, f)
return v
def solve_poisson_with_lu(n):
h = 1.0 / (n + 1) # step_length
x = np.arange(1, n + 1) * h
fx = fun(x)
a = -np.ones(n - 1)
b = 2 * np.ones(n)
c = -np.ones(n - 1)
A = np.diag(a, -1) + np.diag(b, 0) + np.diag(c, 1)
v = lu_solve(A, fx * h ** 2)
return x, v
def tri_solve_test_compare():
n = 10
for i, n in enumerate([10, 100, 1000]):
x, v = solve_poisson_specific(n)
plt.figure(i)
plt.plot(x, v, '.', label='numerical')
plt.plot(x, u(x), label='exact')
plt.title('n={}'.format(n))
plt.legend()
filename = "task1b_n{}.png".format(n)
my_savefig(filename)
def error_test():
max_error = []
problem_sizes = np.array([10, 100, 1000, 10000, 100000, 1000000, 10000000])
for n in problem_sizes:
x, v = solve_poisson_specific(n)
ui = u(x)
max_error.append(np.log10(max(np.abs((v - ui) / ui))))
h = 1 / (problem_sizes + 1)
plt.plot(np.log10(h), max_error)
plt.title('Relative error')
plt.xlabel('log10(h)')
plt.ylabel('log10(relative error)')
filename = 'error.png'
my_savefig(filename)
def lu_solve(A, b):
'''
Solves A*x= b
'''
A, b = np.atleast_1d(A, b)
lu_and_pivot = linalg.lu_factor(A)
x = linalg.lu_solve(lu_and_pivot, b)
return x
def lu_test_compare():
n = 10
for i, n in enumerate([10, 100, 1000]):
x, v = solve_poisson_with_lu(n)
plt.figure(i)
plt.plot(x, v, '.', label='numerical')
plt.plot(x, u(x), label='exact')
plt.title('n={}'.format(n))
plt.legend()
def plot_run_times(sizes=(10, 100, 1000, 2000)):
times = np.zeros((len(sizes), 3))
for i, n in enumerate(sizes):
times[i, 0] = cpu_time_specific(10, n)
times[i, 1] = cpu_time(10, n)
times[i, 2] = cpu_time_lu_solve(10, n)
for i, name in enumerate(['tri_spec', 'tri', 'lu']):
plt.loglog(sizes, times[:, i], label=name)
plt.legend()
if __name__ == '__main__':
# cpu_time(2)
# cpu_time_specific(2)
# tri_solve_test_compare()
# solve_poisson_with_lu(10)
# error_test()
# lu_test_compare()
# plot_run_times(sizes)
plt.show()
| bsd-2-clause |
polde-live/pyml | pyml/assignment1.py | 1 | 2486 | import numpy as np
import pandas as pd
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
# Question 0
# You should write your whole answer within the function provided. The autograder will call
# this function and compare the return value against the correct solution value
def answer_zero():
# This function returns the number of features of the breast cancer dataset, which is an integer.
# The assignment question description will tell you the general format the autograder is expecting
cancer = load_breast_cancer()
# print(cancer.DESCR)
# cancer.keys()
return len(cancer['feature_names'])
# You can examine what your function returns by calling it in the cell. If you have questions
# about the assignment formats, check out the discussion forums for any FAQs
assert answer_zero() == 30
# Question 1
def answer_one():
cancer = load_breast_cancer()
colnames = np.append(cancer['feature_names'], 'target')
cancer_df = pd.DataFrame(data=np.c_[cancer['data'], cancer['target']],
columns=colnames)
return cancer_df
# Question 2
def answer_two():
cancerdf = answer_one()
targ = cancerdf['target']
bening_count = int(sum(targ))
malignant_count = int(targ.size - bening_count)
target = pd.Series([malignant_count, bening_count], index = ['malignant', 'benign'])
return target
# Question 3
def answer_three():
cancerdf = answer_one()
X = cancerdf.drop('target', axis=1)
y = cancerdf['target']
return X, y
# Question 4
def answer_four():
X, y = answer_three()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0)
return X_train, X_test, y_train, y_test
# Question 5
def answer_five():
X_train, X_test, y_train, y_test = answer_four()
knn = KNeighborsClassifier(n_neighbors = 1)
return knn.fit(X_train, y_train)
# Question 6
def answer_six():
cancerdf = answer_one()
means = cancerdf.mean()[:-1].values.reshape(1, -1)
knn = answer_five()
return knn.predict(means)
# Question 7
def answer_seven():
X_train, X_test, y_train, y_test = answer_four()
knn = answer_five()
return knn.predict(X_test)
# Question 8
def answer_eight():
X_train, X_test, y_train, y_test = answer_four()
knn = answer_five()
return knn.score(X_test, y_test)
| unlicense |
m00nlight/hackerrank | ai/Machine-Learning/Stack-Exchange-Question-Classifier/main.py | 1 | 1631 | from __future__ import division
from sklearn import svm, preprocessing
import numpy as np
from sklearn.feature_selection import VarianceThreshold
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import json
def solve():
training = []
annotation = []
# download training file and sample test file from the problem description
# url: https://www.hackerrank.com/challenges/stack-exchange-question-classifier
with open("training.json") as f:
f.readline()
for line in f:
data = json.loads(line)
annotation.append(data['topic'])
training.append(data['question'])
count_vect = CountVectorizer(ngram_range = (1, 2), \
token_pattern = r'\b\w+\b',\
min_df = 1)
training_counts = count_vect.fit_transform(training)
tfidf_transformer = TfidfTransformer()
training_tfidf = tfidf_transformer.fit_transform(training_counts)
classifier = svm.LinearSVC().fit(training_tfidf, annotation)
q = int(raw_input())
qs = []
for _ in range(q):
data = json.loads(raw_input().strip())
qs.append(data['question'])
qs_counts = count_vect.transform(qs)
qs_tfidf = tfidf_transformer.transform(qs_counts)
ans = classifier.predict(qs_tfidf)
for a in ans:
print a
if __name__ == '__main__':
solve()
| gpl-2.0 |
demisto/content | Packs/Base/Scripts/GetMLModelEvaluation/GetMLModelEvaluation.py | 1 | 18748 | import numpy as np
import pandas as pd
from sklearn.metrics import precision_score, recall_score, precision_recall_curve
from tabulate import tabulate
from typing import Dict
from CommonServerPython import *
# pylint: disable=no-member
METRICS = {}
METRICS['Precision'] = 'The precision of the class in the evaluation set that were classified as this class by the ' \
'model. Precision is calculated by dividing the TPs of the class by the number of incidents that ' \
'the model predicted as this class.'
METRICS['TP (true positive)'] = 'The number of incidents from the class in the evaluation set that were predicted ' \
'correctly. '
METRICS['FP (false positive)'] = 'The number of incidents from other classes that were predicted incorrectly as this class.'
METRICS['Coverage'] = 'The number of incidents from the class in the evaluation set for which the confidence level of ' \
'the model exceeded the threshold in the prediction.'
METRICS['Total'] = 'The total number of incidents from the class in the evaluation set.'
def bold_hr(s):
return '**{}:**'.format(s)
def binarize(arr, threshold):
return np.where(arr >= threshold, 1.0, 0)
def calculate_confusion_matrix(y_true, y_pred, y_pred_per_class, threshold):
indices_higher_than_threshold = set()
for i, y in enumerate(y_pred):
if y_pred_per_class[y][i] >= threshold:
indices_higher_than_threshold.add(i)
y_true_at_threshold = [y for i, y in enumerate(y_true) if i in indices_higher_than_threshold]
y_pred_at_threshold = [y for i, y in enumerate(y_pred) if i in indices_higher_than_threshold]
test_tag = pd.Series(y_true_at_threshold)
ft_test_predictions_labels = pd.Series(y_pred_at_threshold)
csr_matrix = pd.crosstab(test_tag, ft_test_predictions_labels, rownames=['True'], colnames=['Predicted'],
margins=True)
return csr_matrix
def generate_metrics_df(y_true, y_true_per_class, y_pred, y_pred_per_class, threshold):
df = pd.DataFrame(columns=['Class', 'Precision', 'Recall', 'TP', 'FP', 'Coverage', 'Total'])
for class_ in sorted(y_pred_per_class):
row = calculate_df_row(class_, threshold, y_true_per_class, y_pred_per_class)
df = df.append(row, ignore_index=True)
df = df.append({'Class': 'All',
'Precision': df["Precision"].mean(),
'Recall': df["Recall"].mean(),
'TP': df["TP"].sum(),
'FP': df["FP"].sum(),
'Coverage': df["Coverage"].sum(),
'Total': df["Total"].sum()}, ignore_index=True)
df = df[['Class', 'Precision', 'TP', 'FP', 'Coverage', 'Total']]
explained_metrics = ['Precision', 'TP (true positive)', 'FP (false positive)', 'Coverage', 'Total']
explanation = ['{} {}'.format(bold_hr(metric), METRICS[metric]) for metric in explained_metrics]
df.set_index('Class', inplace=True)
return df, explanation
def calculate_df_row(class_, threshold, y_true_per_class, y_pred_per_class):
y_pred_class = y_pred_per_class[class_]
y_true_class = y_true_per_class[class_]
y_pred_class_binary = binarize(y_pred_class, threshold)
precision = precision_score(y_true=y_true_class, y_pred=y_pred_class_binary)
recall = recall_score(y_true=y_true_class, y_pred=y_pred_class_binary)
classified_correctly = sum(1 for y_true_i, y_pred_i in zip(y_true_class, y_pred_class_binary) if y_true_i == 1
and y_pred_i == 1)
above_thresh = sum(1 for i, y_true_i in enumerate(y_true_class) if y_true_i == 1
and any(y_pred_per_class[c][i] >= threshold for c in y_pred_per_class))
fp = sum(1 for i, y_true_i in enumerate(y_true_class) if y_true_i == 0 and y_pred_class_binary[i] == 1.0)
total = int(sum(y_true_class))
row = {'Class': class_,
'Precision': precision,
'Recall': recall,
'TP': classified_correctly,
'FP': fp,
'Coverage': int(above_thresh),
'Total': total}
return row
def reformat_df_fractions_to_percentage(metrics_df):
hr_df = metrics_df.copy()
hr_df['Precision'] = hr_df['Precision'].apply(lambda p: '{:.1f}%'.format(p * 100))
hr_df['TP'] = hr_df.apply(lambda row: '{}/{} ({:.1f}%)'.format(int(row['TP']),
int(row['Coverage']),
float(row['TP']) * 100 / row['Coverage']),
axis=1)
hr_df['Coverage'] = hr_df.apply(lambda row: '{}/{} ({:.1f}%)'.format(int(row['Coverage']), row['Total'],
float(row['Coverage']) * 100 / row['Total']),
axis=1)
return hr_df
def output_report(y_true, y_true_per_class, y_pred, y_pred_per_class, found_threshold, target_precision,
actual_threshold_precision, detailed_output=True):
csr_matrix_at_threshold = calculate_confusion_matrix(y_true, y_pred, y_pred_per_class, found_threshold)
csr_matrix_no_threshold = calculate_confusion_matrix(y_true, y_pred, y_pred_per_class, 0)
metrics_df, metrics_explanation = generate_metrics_df(y_true, y_true_per_class, y_pred, y_pred_per_class,
found_threshold)
coverage = metrics_df.loc[['All']]['Coverage'][0]
test_set_size = metrics_df.loc[['All']]['Total'][0]
human_readable_threshold = ['## Summary']
# in case the found threshold meets the target accuracy
if actual_threshold_precision >= target_precision or abs(found_threshold - target_precision) < 10 ** -2:
human_readable_threshold += ['- A confidence threshold of {:.2f} meets the conditions of required precision.'
.format(found_threshold)]
else:
human_readable_threshold += ['- Could not find a threshold which meets the conditions of required precision. '
'The confidence threshold of {:.2f} achieved highest '
'possible precision'.format(found_threshold)]
human_readable_threshold += [
'- {}/{} incidents of the evaluation set were predicted with higher confidence than this threshold.'.format(
int(coverage), int(test_set_size)),
'- The remainder, {}/{} incidents of the evaluation set, were predicted with lower confidence than this threshold '
'(these predictions were ignored).'.format(
int(test_set_size - coverage), int(test_set_size)),
'- Expected coverage ratio: The model will attempt to provide a prediction for {:.2f}% of incidents. '
'({}/{})'.format(
float(coverage) / test_set_size * 100, int(coverage), int(test_set_size)),
'- Evaluation of the model performance using this probability threshold can be found below:']
pd.set_option('display.max_columns', None)
tablualted_csr = tabulate(reformat_df_fractions_to_percentage(metrics_df), tablefmt="pipe", headers="keys")
class_metrics_human_readable = ['## Metrics per Class', tablualted_csr]
class_metrics_explanation_human_readable = ['### Metrics Explanation'] + ['- ' + row for row in metrics_explanation]
csr_matrix_readable = ['## Confusion Matrix',
'This table displays the predictions of the model on the evaluation set per each '
+ 'class:',
tabulate(csr_matrix_at_threshold,
tablefmt="pipe",
headers="keys").replace("True", "True \\ Predicted"),
'\n']
csr_matrix_no_thresh_readable = ['## Confusion Matrix - No Threshold',
'This table displays the predictions of the model on the evaluation set per each '
+ 'class when no threshold is used:',
tabulate(csr_matrix_no_threshold,
tablefmt="pipe",
headers="keys").replace("True", "True \\ Predicted"),
'\n']
human_readable = [] # type: ignore
if detailed_output:
human_readable += human_readable_threshold + ['\n']
else:
human_readable += ['## Results for confidence threshold = {:.2f}'.format(found_threshold)] + ['\n']
human_readable += class_metrics_human_readable + ['\n']
human_readable += class_metrics_explanation_human_readable
human_readable += csr_matrix_readable
human_readable += csr_matrix_no_thresh_readable
human_readable = '\n'.join(human_readable)
contents = {'threshold': found_threshold,
'csr_matrix_at_threshold': csr_matrix_at_threshold.to_json(orient='index'),
'csr_matrix_no_threshold': csr_matrix_no_threshold.to_json(orient='index'),
'metrics_df': metrics_df.to_json()}
entry = {
'Type': entryTypes['note'],
'Contents': contents,
'ContentsFormat': formats['json'],
'HumanReadable': human_readable,
'HumanReadableFormat': formats['markdown'],
'EntryContext': {
'GetMLModelEvaluation': {
'Threshold': found_threshold,
'ConfusionMatrixAtThreshold': csr_matrix_at_threshold.to_json(orient='index'),
'ConfusionMatrixNoThreshold': csr_matrix_no_threshold.to_json(orient='index'),
'Metrics': metrics_df.to_json()
}
}
}
return entry
def merge_entries(entry, per_class_entry):
entry = {
'Type': entryTypes['note'],
'Contents': entry['Contents'],
'ContentsFormat': formats['json'],
'HumanReadable': entry['HumanReadable'] + '\n' + per_class_entry['HumanReadable'],
'HumanReadableFormat': formats['markdown'],
'EntryContext': {**entry['EntryContext'], **per_class_entry['EntryContext']}
}
return entry
def find_threshold(y_true_str, y_pred_str, customer_target_precision, target_recall, detailed_output=True):
y_true = convert_str_to_json(y_true_str, 'yTrue')
y_pred_all_classes = convert_str_to_json(y_pred_str, 'yPred')
labels = sorted(set(y_true + list(y_pred_all_classes[0].keys())))
n_instances = len(y_true)
y_true_per_class = {class_: np.zeros(n_instances) for class_ in labels}
for i, y in enumerate(y_true):
y_true_per_class[y][i] = 1.0
y_pred_per_class = {class_: np.zeros(n_instances) for class_ in labels}
y_pred = []
for i, y in enumerate(y_pred_all_classes):
predicted_class = sorted(y.items(), key=lambda x: x[1], reverse=True)[0][0]
y_pred_per_class[predicted_class][i] = y[predicted_class]
y_pred.append(predicted_class)
class_to_arrs = {class_: {} for class_ in labels} # type: Dict[str, Dict[str, Any]]
for class_ in labels:
precision_arr, recall_arr, thresholds_arr = precision_recall_curve(y_true_per_class[class_],
y_pred_per_class[class_])
class_to_arrs[class_]['precisions'] = precision_arr
class_to_arrs[class_]['recalls'] = recall_arr
class_to_arrs[class_]['thresholds'] = thresholds_arr
# find threshold for all classes such as precision of all classes are higher than target precision:
unified_threshold, unified_threshold_precision, target_unified_precision = find_best_threshold_for_target_precision(
class_to_arrs, customer_target_precision, labels)
if unified_threshold is None or unified_threshold_precision is None:
error_message = 'Could not find any threshold at ranges {} - {:.2f}.'.format(target_unified_precision,
customer_target_precision)
return_error(error_message)
entry = output_report(np.array(y_true), y_true_per_class, np.array(y_pred), y_pred_per_class, unified_threshold,
customer_target_precision, unified_threshold_precision, detailed_output)
per_class_entry = calculate_per_class_report_entry(class_to_arrs, labels, y_pred_per_class, y_true_per_class)
res = merge_entries(entry, per_class_entry)
return res
def find_best_threshold_for_target_precision(class_to_arrs, customer_target_precision, labels):
target_unified_precision = round(customer_target_precision, 2)
unified_threshold_found = False
threshold = None
threshold_precision = None
while not unified_threshold_found:
threshold_per_class = {}
precision_per_class = {}
for class_ in labels:
# indexing is done by purpose - the ith precision corresponds with threshold i-1. Last precision is 1
for i, precision in enumerate(class_to_arrs[class_]['precisions'][:-1]):
if class_to_arrs[class_]['thresholds'][i] == 0:
continue
if precision > target_unified_precision:
threshold_per_class[class_] = class_to_arrs[class_]['thresholds'][i]
precision_per_class[class_] = precision
break
if len(threshold_per_class) == len(labels):
threshold_candidates = sorted(list(threshold_per_class.values()))
for threshold in threshold_candidates:
legal_threshold_for_all_classes = True
threshold_precision = sys.maxsize
for class_ in labels:
i = np.argmax(class_to_arrs[class_]['thresholds'] >= threshold) # type: ignore
threshold_precision_for_class = class_to_arrs[class_]['precisions'][i]
threshold_precision = min(threshold_precision, threshold_precision_for_class)
if threshold_precision_for_class >= target_unified_precision:
legal_threshold_for_all_classes = True
else:
legal_threshold_for_all_classes = False
break
if legal_threshold_for_all_classes:
unified_threshold_found = True
break
elif target_unified_precision < 0:
break
target_unified_precision -= 0.01
return threshold, threshold_precision, target_unified_precision
def calculate_per_class_report_entry(class_to_arrs, labels, y_pred_per_class, y_true_per_class):
per_class_hr = ['## Per-Class Report']
per_class_hr += [
'The following tables present evlauation of the model per class at different confidence thresholds:']
class_to_thresholds = {}
for class_ in labels:
class_to_thresholds[class_] = set([0.001]) # using no threshold
for target_precision in np.arange(0.95, 0.5, -0.05):
# indexing is done by purpose - the ith precision corresponds with threshold i-1. Last precision is 1
for i, precision in enumerate(class_to_arrs[class_]['precisions'][:-1]):
if class_to_arrs[class_]['thresholds'][i] == 0:
continue
if precision > target_precision and class_to_arrs[class_]['recalls'][i] > 0:
threshold = class_to_arrs[class_]['thresholds'][i]
class_to_thresholds[class_].add(threshold)
break
if len(class_to_thresholds[class_]) >= 4:
break
per_class_context = {}
for class_ in labels:
class_threshold_df = pd.DataFrame(columns=['Threshold', 'Precision', 'Recall', 'TP', 'FP', 'Coverage', 'Total'])
for threshold in sorted(class_to_thresholds[class_]):
row = calculate_df_row(class_, threshold, y_true_per_class, y_pred_per_class)
row['Threshold'] = threshold
class_threshold_df = class_threshold_df.append(row, ignore_index=True)
class_threshold_df = reformat_df_fractions_to_percentage(class_threshold_df)
class_threshold_df['Threshold'] = class_threshold_df['Threshold'].apply(lambda p: '{:.2f}'.format(p))
class_threshold_df = class_threshold_df[['Threshold', 'Precision', 'TP', 'FP', 'Coverage', 'Total']]
class_threshold_df.sort_values(by='Coverage', ascending=False, inplace=True)
class_threshold_df.drop_duplicates(subset='Threshold', inplace=True, keep='first')
class_threshold_df.drop_duplicates(subset='Precision', inplace=True, keep='first')
class_threshold_df.set_index('Threshold', inplace=True)
per_class_context[class_] = class_threshold_df.to_json()
tabulated_class_df = tabulate(class_threshold_df, tablefmt="pipe", headers="keys")
per_class_hr += ['### {}'.format(class_), tabulated_class_df]
per_class_entry = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': [],
'HumanReadable': '\n'.join(per_class_hr),
'HumanReadableFormat': formats['markdown'],
'EntryContext': {'GetMLModelEvaluation': {'PerClassReport': per_class_context}}
}
return per_class_entry
def convert_str_to_json(str_json, var_name):
try:
y_true = json.loads(str_json)
return y_true
except Exception as e:
return_error('Exception while reading {} :{}'.format(var_name, e))
def main():
y_pred_all_classes = demisto.args()["yPred"]
y_true = demisto.args()["yTrue"]
target_precision = calculate_and_validate_float_parameter("targetPrecision")
target_recall = calculate_and_validate_float_parameter("targetRecall")
detailed_output = 'detailedOutput' in demisto.args() and demisto.args()['detailedOutput'] == 'true'
entries = find_threshold(y_true_str=y_true,
y_pred_str=y_pred_all_classes,
customer_target_precision=target_precision,
target_recall=target_recall,
detailed_output=detailed_output)
demisto.results(entries)
def calculate_and_validate_float_parameter(var_name):
try:
res = float(demisto.args()[var_name]) if var_name in demisto.args() else 0
except Exception:
return_error('{} must be a float between 0-1 or left empty'.format(var_name))
if res < 0 or res > 1:
return_error('{} must be a float between 0-1 or left empty'.format(var_name))
return res
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
| mit |
darshanthaker/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/blocking_input.py | 69 | 12119 | """
This provides several classes used for blocking interaction with figure windows:
:class:`BlockingInput`
creates a callable object to retrieve events in a blocking way for interactive sessions
:class:`BlockingKeyMouseInput`
creates a callable object to retrieve key or mouse clicks in a blocking way for interactive sessions.
Note: Subclass of BlockingInput. Used by waitforbuttonpress
:class:`BlockingMouseInput`
creates a callable object to retrieve mouse clicks in a blocking way for interactive sessions.
Note: Subclass of BlockingInput. Used by ginput
:class:`BlockingContourLabeler`
creates a callable object to retrieve mouse clicks in a blocking way that will then be used to place labels on a ContourSet
Note: Subclass of BlockingMouseInput. Used by clabel
"""
import time
import numpy as np
from matplotlib import path, verbose
from matplotlib.cbook import is_sequence_of_strings
class BlockingInput(object):
"""
Class that creates a callable object to retrieve events in a
blocking way.
"""
def __init__(self, fig, eventslist=()):
self.fig = fig
assert is_sequence_of_strings(eventslist), "Requires a sequence of event name strings"
self.eventslist = eventslist
def on_event(self, event):
"""
Event handler that will be passed to the current figure to
retrieve events.
"""
# Add a new event to list - using a separate function is
# overkill for the base class, but this is consistent with
# subclasses
self.add_event(event)
verbose.report("Event %i" % len(self.events))
# This will extract info from events
self.post_event()
# Check if we have enough events already
if len(self.events) >= self.n and self.n > 0:
self.fig.canvas.stop_event_loop()
def post_event(self):
"""For baseclass, do nothing but collect events"""
pass
def cleanup(self):
"""Disconnect all callbacks"""
for cb in self.callbacks:
self.fig.canvas.mpl_disconnect(cb)
self.callbacks=[]
def add_event(self,event):
"""For base class, this just appends an event to events."""
self.events.append(event)
def pop_event(self,index=-1):
"""
This removes an event from the event list. Defaults to
removing last event, but an index can be supplied. Note that
this does not check that there are events, much like the
normal pop method. If not events exist, this will throw an
exception.
"""
self.events.pop(index)
def pop(self,index=-1):
self.pop_event(index)
pop.__doc__=pop_event.__doc__
def __call__(self, n=1, timeout=30 ):
"""
Blocking call to retrieve n events
"""
assert isinstance(n, int), "Requires an integer argument"
self.n = n
self.events = []
self.callbacks = []
# Ensure that the figure is shown
self.fig.show()
# connect the events to the on_event function call
for n in self.eventslist:
self.callbacks.append( self.fig.canvas.mpl_connect(n, self.on_event) )
try:
# Start event loop
self.fig.canvas.start_event_loop(timeout=timeout)
finally: # Run even on exception like ctrl-c
# Disconnect the callbacks
self.cleanup()
# Return the events in this case
return self.events
class BlockingMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve mouse clicks in a
blocking way.
This class will also retrieve keyboard clicks and treat them like
appropriate mouse clicks (delete and backspace are like mouse button 3,
enter is like mouse button 2 and all others are like mouse button 1).
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig,
eventslist=('button_press_event',
'key_press_event') )
def post_event(self):
"""
This will be called to process events
"""
assert len(self.events)>0, "No events yet"
if self.events[-1].name == 'key_press_event':
self.key_event()
else:
self.mouse_event()
def mouse_event(self):
'''Process a mouse click event'''
event = self.events[-1]
button = event.button
if button == 3:
self.button3(event)
elif button == 2:
self.button2(event)
else:
self.button1(event)
def key_event(self):
'''
Process a key click event. This maps certain keys to appropriate
mouse click events.
'''
event = self.events[-1]
key = event.key
if key == 'backspace' or key == 'delete':
self.button3(event)
elif key == 'enter':
self.button2(event)
else:
self.button1(event)
def button1( self, event ):
"""
Will be called for any event involving a button other than
button 2 or 3. This will add a click if it is inside axes.
"""
if event.inaxes:
self.add_click(event)
else: # If not a valid click, remove from event list
BlockingInput.pop(self)
def button2( self, event ):
"""
Will be called for any event involving button 2.
Button 2 ends blocking input.
"""
# Remove last event just for cleanliness
BlockingInput.pop(self)
# This will exit even if not in infinite mode. This is
# consistent with matlab and sometimes quite useful, but will
# require the user to test how many points were actually
# returned before using data.
self.fig.canvas.stop_event_loop()
def button3( self, event ):
"""
Will be called for any event involving button 3.
Button 3 removes the last click.
"""
# Remove this last event
BlockingInput.pop(self)
# Now remove any existing clicks if possible
if len(self.events)>0:
self.pop()
def add_click(self,event):
"""
This add the coordinates of an event to the list of clicks
"""
self.clicks.append((event.xdata,event.ydata))
verbose.report("input %i: %f,%f" %
(len(self.clicks),event.xdata, event.ydata))
# If desired plot up click
if self.show_clicks:
self.marks.extend(
event.inaxes.plot([event.xdata,], [event.ydata,], 'r+') )
self.fig.canvas.draw()
def pop_click(self,index=-1):
"""
This removes a click from the list of clicks. Defaults to
removing the last click.
"""
self.clicks.pop(index)
if self.show_clicks:
mark = self.marks.pop(index)
mark.remove()
self.fig.canvas.draw()
def pop(self,index=-1):
"""
This removes a click and the associated event from the object.
Defaults to removing the last click, but any index can be
supplied.
"""
self.pop_click(index)
BlockingInput.pop(self,index)
def cleanup(self):
# clean the figure
if self.show_clicks:
for mark in self.marks:
mark.remove()
self.marks = []
self.fig.canvas.draw()
# Call base class to remove callbacks
BlockingInput.cleanup(self)
def __call__(self, n=1, timeout=30, show_clicks=True):
"""
Blocking call to retrieve n coordinate pairs through mouse
clicks.
"""
self.show_clicks = show_clicks
self.clicks = []
self.marks = []
BlockingInput.__call__(self,n=n,timeout=timeout)
return self.clicks
class BlockingContourLabeler( BlockingMouseInput ):
"""
Class that creates a callable object that uses mouse clicks or key
clicks on a figure window to place contour labels.
"""
def __init__(self,cs):
self.cs = cs
BlockingMouseInput.__init__(self, fig=cs.ax.figure )
def button1(self,event):
"""
This will be called if an event involving a button other than
2 or 3 occcurs. This will add a label to a contour.
"""
# Shorthand
cs = self.cs
if event.inaxes == cs.ax:
conmin,segmin,imin,xmin,ymin = cs.find_nearest_contour(
event.x, event.y, cs.labelIndiceList)[:5]
# Get index of nearest level in subset of levels used for labeling
lmin = cs.labelIndiceList.index(conmin)
# Coordinates of contour
paths = cs.collections[conmin].get_paths()
lc = paths[segmin].vertices
# In pixel/screen space
slc = cs.ax.transData.transform(lc)
# Get label width for rotating labels and breaking contours
lw = cs.get_label_width(cs.labelLevelList[lmin],
cs.labelFmt, cs.labelFontSizeList[lmin])
"""
# requires python 2.5
# Figure out label rotation.
rotation,nlc = cs.calc_label_rot_and_inline(
slc, imin, lw, lc if self.inline else [],
self.inline_spacing )
"""
# Figure out label rotation.
if self.inline: lcarg = lc
else: lcarg = None
rotation,nlc = cs.calc_label_rot_and_inline(
slc, imin, lw, lcarg,
self.inline_spacing )
cs.add_label(xmin,ymin,rotation,cs.labelLevelList[lmin],
cs.labelCValueList[lmin])
if self.inline:
# Remove old, not looping over paths so we can do this up front
paths.pop(segmin)
# Add paths if not empty or single point
for n in nlc:
if len(n)>1:
paths.append( path.Path(n) )
self.fig.canvas.draw()
else: # Remove event if not valid
BlockingInput.pop(self)
def button3(self,event):
"""
This will be called if button 3 is clicked. This will remove
a label if not in inline mode. Unfortunately, if one is doing
inline labels, then there is currently no way to fix the
broken contour - once humpty-dumpty is broken, he can't be put
back together. In inline mode, this does nothing.
"""
# Remove this last event - not too important for clabel use
# since clabel normally doesn't have a maximum number of
# events, but best for cleanliness sake.
BlockingInput.pop(self)
if self.inline:
pass
else:
self.cs.pop_label()
self.cs.ax.figure.canvas.draw()
def __call__(self,inline,inline_spacing=5,n=-1,timeout=-1):
self.inline=inline
self.inline_spacing=inline_spacing
BlockingMouseInput.__call__(self,n=n,timeout=timeout,
show_clicks=False)
class BlockingKeyMouseInput(BlockingInput):
"""
Class that creates a callable object to retrieve a single mouse or
keyboard click
"""
def __init__(self, fig):
BlockingInput.__init__(self, fig=fig, eventslist=('button_press_event','key_press_event') )
def post_event(self):
"""
Determines if it is a key event
"""
assert len(self.events)>0, "No events yet"
self.keyormouse = self.events[-1].name == 'key_press_event'
def __call__(self, timeout=30):
"""
Blocking call to retrieve a single mouse or key click
Returns True if key click, False if mouse, or None if timeout
"""
self.keyormouse = None
BlockingInput.__call__(self,n=1,timeout=timeout)
return self.keyormouse
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.