repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Martin09/E-BeamPatterns | 111 Wafers - 1.2cm Triangles/111A Nanowires/v1.3/gdsCAD_v045/templates111.py | 2 | 33363 | # -*- coding: utf-8 -*-
"""
Templates for automating the design of different wafer styles.
.. note::
Copyright 2009-2012 Lucas Heitzmann Gabrielli
Copyright 2013 Andrew G. Mark
gdsCAD (based on gdspy) is released under the terms of the GNU GPL
"""
# TODO: Make it more pythonic, create separate classes for cells, blocks etc. to make it easier to read
import string
from operator import itemgetter
import networkx as nx
import numpy as np
from descartes.patch import PolygonPatch
from .core import Cell, Path, Boundary
from .shapes import Circle, Label, LineLabel
from shapely.affinity import rotate as rotateshape
from shapely.affinity import translate as translateshape
from shapely.geometry import Polygon, Point, LineString, box
RED = '#F0421D'
ORANGE = '#F0AC1D'
GREEN = '#1DF042'
BLUE = '#1DF0AC'
BLACK = '#000000'
# Helper function:
# Given two points from a line, returns a cell containing a dashed line connecting the two points
def dashed_line(pt1, pt2, dashlength, width, layer):
line = LineString((pt1, pt2))
dash_pts = np.arange(0, line.length, dashlength).tolist()
if len(dash_pts) % 2 == 1: # Odd number
dash_pts.append(line.length) # Add last point on line
dash_pts = list(map(line.interpolate, dash_pts)) # Interpolate points along this line to make dashes
dash_pts = [pt.xy for pt in dash_pts]
dash_pts = np.reshape(dash_pts, (-1, 2, 2))
lines = [Path(list(linepts), width=width, layer=layer) for linepts in dash_pts]
dline = Cell('DASHLINE')
dline.add(lines)
return dline
class Wafer_TriangStyle(Cell):
"""
Wafer style for [111] wafers consisting of triangular blocks of patterned features.
:param name: The name of the new wafer cell
:param wafer_r: the radius of the wafer in um
:param cells: a list of cells that will be tiled to fill each blocks
the cells will be cycled until all blocks are filled.
:param block_gap: the gap between the triangular blocks
:param cell_gap: the gap between the square cells within each block
:param trisize: in um, length of triangle sides
:param cellsize: size of each cell within a block
:param MCIterations: Number of monte carlo iterations used to find optimal position of cells within the blocks
:param doMCSearch: Whether or not to optimize the placement of the square cells within each triangular block
:param block_gap: the distance to leave between blocks
:param symmetric_chips: makes the up-facing and down-facing chips symmetric by rotating them 180 degrees. However for direction-sensitive devices (ex: branched structures) the 180 degree rotation is undersirable.
:returns: A new wafer ``Cell``
Spacing between cells in a block is determined automatically based on the cell
bounding box, or by using the attribute cell.spacing if it is available.
"""
# the placement of the wafer alignment points
align_pts = None
def __init__(self,
name,
cells=None,
wafer_r=25.5e3,
trisize=10e3,
cellsize=2e3,
block_gap=0.,
cell_gap=200.,
doMCSearch=True,
MCIterations=30, # Small square cells
doMCBlockSearch=True,
MCBlockIterations=50, # Large triangular blocks
mkWidth=10,
cellsAtEdges=False,
symmetric_chips=True):
Cell.__init__(self, name)
self.wafer_r = wafer_r
self.trisize = trisize
self.cellsize = cellsize
self.block_gap = block_gap
self.cell_gap = cell_gap
self.doMCSearch = doMCSearch
self.MCIterations = MCIterations
self.doMCBlockSearch = doMCBlockSearch
self.MCBlockIterations = MCBlockIterations
# Create a circle shape with the radius of the wafer
circ = Point(0., 0.)
self.waferShape = circ.buffer(wafer_r)
self.blockOffset = (0, 0)
self.cells = cells
self.cell_layers = self._cell_layers()
self._label = None
self.upCellLattice = []
self.downCellLattice = []
self.upCenters = []
self.downCenters = []
self.upTris = []
self.downTris = []
self.cellsAtEdges = cellsAtEdges
self.symmetric_chips = symmetric_chips
def _cell_layers(self):
"""
A list of all active layers in ``cells``
"""
cell_layers = set()
for c in self.cells:
if isinstance(c, Cell):
cell_layers |= set(c.get_layers())
else:
for s in c:
cell_layers |= set(s.get_layers())
return list(cell_layers)
def add_aligment_marks(self, layers):
"""
Create alignment marks on all active layers
"""
if not (type(layers) == list): layers = [layers]
d_layers = self.cell_layers
# styles=['B' if i%2 else 'B' for i in range(len(d_layers))]
# am = AlignmentMarks(styles, d_layers)
am = Cell('CONT_ALGN')
# Define dimensions of the alignment cross
t = 200. # Thickness
t /= 2.
h = 2000. # Height
w = 2000. # Width
crosspts = [
(-t, t), (-t, h), (t, h), (t, t), (w, t), (w, -t), (t, -t), (t, -h),
(-t, -h), (-t, -t), (-w, -t), (-w, t)]
# Create shapely polygon for later calculation
crossPolygon = Polygon(crosspts)
crossPolygons = []
for pt in self.align_pts:
crossPolygons.extend([
translateshape(crossPolygon, xoff=pt[0], yoff=pt[1])])
# TODO: Replace these two loops with a single loop, looping over an array of block objects
# TODO: Make the deleting more efficient by using del for multiple indexes?
i_del = []
# Loop over all triangular blocks
for i, tri in enumerate(self.upTris):
for poly in crossPolygons: # Loop over all alignment crosses
if poly.intersects(tri) or poly.within(tri) or poly.contains(
tri):
# If conflict is detected, remove that triangular block
i_del.append(i)
print(('up:' + str(self.upTris[i].centroid.xy)))
self.upTris = [tri for i, tri in enumerate(self.upTris) if i not in i_del]
# Repeat for down-facing triangles
i_del = []
for i, tri in enumerate(self.downTris):
for poly in crossPolygons:
if poly.intersects(tri) or poly.within(tri) or poly.contains(
tri):
# If conflict is detected, remove that triangular block
i_del.append(i)
print(('down:' + str(self.downTris[i].centroid.xy)))
self.downTris = [tri for i, tri in enumerate(self.downTris) if i not in i_del]
# Refresh the centers of the remaining triangles
self.upCenters = [list(zip(*tri.centroid.xy)[0]) for tri in self.upTris]
self.downCenters = [list(zip(*tri.centroid.xy)[0])
for tri in self.downTris]
for l in layers: # Add marker to all layers
cross = Boundary(crosspts, layer=l) # Create gdsCAD shape
am.add(cross)
mblock = Cell('WAF_ALGN_BLKS')
mblock.add(am)
for pt in self.align_pts:
self.add(mblock, origin=pt)
def add_orientation_text(self, layers):
"""
Create Orientation Label
"""
if not (type(layers) == list): layers = [layers]
tblock = Cell('WAF_ORI_TEXT')
for l in layers:
for (t, pt) in list(self.o_text.items()):
txt = Label(t, 1000, layer=l)
bbox = txt.bounding_box
txt.translate(-np.mean(bbox, 0)) # Center text around origin
txt.translate(np.array(pt))
tblock.add(txt)
self.add(tblock)
def add_dicing_marks(self, layers, mkWidth=100):
"""
Create dicing marks
"""
if not (type(layers) == list): layers = [layers]
# Define the array and wafer parameters
gap = self.block_gap
wafer_r = self.wafer_r
sl_tri = self.trisize
sl_lattice = sl_tri + gap / np.tan(np.deg2rad(30))
h_lattice = np.sqrt(3.) / 2. * sl_lattice
# Create the lattice of the "up" facing triangles
points = self.createPtLattice(2. * wafer_r, sl_lattice / 2., h_lattice)
points = [np.array(elem) for elem in points]
points = points + np.array(
[-sl_lattice / 2., 0]) # Shift lattice so we can cleave the wafer at y=0
points = points + np.array(self.blockOffset) # Shift by point from MC search (if applicable)
points = [
point
for point in points
if (Point(point).distance(Point(0, 0)) < wafer_r)
]
import pylab as plt
# Plot points
x, y = list(zip(*points))
plt.plot(x, y, 'ko')
# Create a lineshape of the boundary of the circle
c = self.waferShape.boundary
# Create a set (unordered with unique entries)
dicinglines = set()
# For each point in the lattice, create three lines (one along each direction)
for x, y in points:
l0 = LineString([(-4. * wafer_r, y), (4. * wafer_r, y)])
l1 = rotateshape(l0, 60, origin=(x, y))
l2 = rotateshape(l0, -60, origin=(x, y))
# See where these lines intersect the wafer outline
i0 = c.intersection(l0)
i1 = c.intersection(l1)
i2 = c.intersection(l2)
p0s = tuple(map(tuple, np.round((i0.geoms[0].coords[0], i0.geoms[
1].coords[0]))))
p1s = tuple(map(tuple, np.round((i1.geoms[0].coords[0], i1.geoms[
1].coords[0]))))
p2s = tuple(map(tuple, np.round((i2.geoms[0].coords[0], i2.geoms[
1].coords[0]))))
# Add these points to a unique unordered set
dicinglines.add(p0s)
dicinglines.add(p1s)
dicinglines.add(p2s)
# At the end of the loop, the set will contain a list of point pairs which can be uesd to make the dicing marks
dmarks = Cell('DIC_MRKS')
for l in layers:
for p1, p2 in dicinglines:
dicingline = Path([p1, p2], width=mkWidth, layer=l)
dmarks.add(dicingline)
self.add(dmarks)
return points
def add_wafer_outline(self, layers):
"""
Create Wafer Outline
"""
if not (type(layers) == list): layers = [layers]
outline = Cell('WAF_OLINE')
for l in layers:
circ = Circle((0, 0), self.wafer_r, 100, layer=l)
outline.add(circ)
self.add(outline)
# Gets an optimized list of points where the cells will then be projected within each block
def getCellLattice(self, cellsize=2000):
iterations = self.MCIterations
ycelloffset = self.cell_gap / 3.5 # Arbitrary, change by trial and error
if self.doMCSearch:
best = [0, 0, 0, 0]
# Iterates many times to find the best fit
for i in range(iterations):
# Random seed point
rndpt = (0, np.random.randint(-cellsize, cellsize))
# Make cells around this point
cells = self.makeCells(startpt=rndpt, cellsize=cellsize)
if not cells:
continue
centroidDist = np.array([cell.centroid.xy for cell in cells]).squeeze()
if len(centroidDist.shape) == 2:
centroidDist = centroidDist.mean(0)
if len(cells) > best[1]:
best = [rndpt, len(cells), cells, centroidDist]
elif len(cells) == best[1]:
# Choose the one that is closer to the center of the wafer
if np.sqrt(rndpt[0] ** 2 + rndpt[1] ** 2) < np.sqrt(best[0][0] ** 2 + best[0][1] ** 2):
# if centroidDist < best[3]:
best = [rndpt, len(cells), cells, centroidDist]
# print("Current: {:f}, Best {:f}").format(len(cells),best[1])
# centroidDist = np.array([tri.centroid.xy for tri in cells]).squeeze().mean(0)
# centroidDist = np.sqrt(centroidDist[0]**2+centroidDist[1]**2)
# centroidDist = np.array([cell.centroid.xy for cell in cells]).squeeze()
# Choose the best configuration (fits the most cells and is closest to centroid)
cells = best[2]
else:
cells = self.makeCells(cellsize=2000)
sl_tri = self.trisize
h_tri = np.sqrt(3.) / 2. * sl_tri
gap = self.block_gap
from matplotlib import pyplot
fig = pyplot.figure(1, dpi=90)
ax = fig.add_subplot(111)
ax.grid()
ax.axis('equal')
block = Polygon([
[-sl_tri / 2., -h_tri / 3.], [sl_tri / 2., -h_tri / 3.],
[0, 2. * h_tri / 3.], [-sl_tri / 2., -h_tri / 3.]
])
block = translateshape(block, yoff=h_tri / 3. + gap / 2.)
block = translateshape(block, xoff=self.blockOffset[0],
yoff=self.blockOffset[1]) # TODO: plot output not working properly because of this?
patch = PolygonPatch(block,
facecolor="#{0:0{1}X}".format(np.random.randint(0, 16777215), 6),
# facecolor=RED,
edgecolor=BLACK,
alpha=0.3,
zorder=2)
ax.add_patch(patch)
ax.plot(block.exterior.coords.xy[0], block.exterior.coords.xy[1], 'k-')
for cell in cells:
cell = translateshape(cell, yoff=h_tri / 3. + gap / 2. + ycelloffset)
cell = translateshape(cell, xoff=self.blockOffset[0],
yoff=self.blockOffset[1]) # TODO: plot output not working properly because of this?
patch = PolygonPatch(cell,
facecolor="#{0:0{1}X}".format(np.random.randint(0, 16777215), 6),
edgecolor='k',
alpha=0.3,
zorder=2)
ax.add_patch(patch)
# Convert cells to lattice of points
cellLattice = np.array([list(zip(*cell.centroid.xy))[0] for cell in cells])
cellLattice = cellLattice + np.array([0, ycelloffset])
return cellLattice
# Make takes square cells and sees how many can be fit into a triangular block
def makeCells(self, cellsize=2000, startpt=(0, 0)):
gap = self.cell_gap
# Define the parameters of our shapes
if self.cellsAtEdges:
sl_tri = self.trisize * 1.5 # Only needed if you want to put cells very close the edge of triangle chip
else:
sl_tri = self.trisize
h_tri = np.sqrt(3.) / 2. * sl_tri
# Create the triangular block
block = Polygon([
[-sl_tri / 2., -h_tri / 3.], [sl_tri / 2., -h_tri / 3.],
[0, 2. * h_tri / 3.], [-sl_tri / 2., -h_tri / 3.]
])
# Make a square cell
cell = box(-cellsize / 2., -cellsize / 2., cellsize / 2., cellsize / 2.)
# Make a lattice for the cells
# lattice = self.createPtLattice(sl_tri, cellsize / 2. + gap / 2.,cellsize/2. + gap)
lattice = self.createPtLattice(sl_tri, (cellsize + gap) / 2., (cellsize + gap) * np.sqrt(3.) / 2.)
lattice = lattice + np.array(startpt)
lattice = [
pt for pt in lattice if Point(pt).within(block)
] # Keep only points within triangular block
# Use the lattice of points to translate the cell all over the block
cells = [translateshape(cell, xoff=x, yoff=y) for x, y in lattice]
# Keep only the cells that are fully within the block
cells = [f for f in cells if f.within(block)]
return cells
def build_and_add_blocks(self):
"""
Create blocks and add them to the wafer Cell
"""
self.upCellLattice = self.getCellLattice(cellsize=2000)
# Create a cell for the triangular blocks
block_up = Cell('upblock')
for x, y in self.upCellLattice:
block_up.add(self.cells, origin=(x, y))
# Take each point in block lattice and make a copy of the block in that location
for x, y in self.upCenters:
self.add(block_up, origin=(x, y))
if self.symmetric_chips:
for x, y in self.downCenters:
self.add(block_up, origin=(x, y), rotation=180)
else:
self.downCellLattice = np.array(self.upCellLattice) * np.array([1, -1])
block_down = Cell('downblock')
for x, y in self.downCellLattice:
block_down.add(self.cells, origin=(x, y))
for x, y in self.downCenters:
self.add(block_down, origin=(x, y))
def plotTriangles(self, tris):
from matplotlib import pyplot
from matplotlib.patches import Circle
fig = pyplot.figure(1, dpi=90)
ax = fig.add_subplot(111)
ax.grid()
# Draw the wafer
circle = Circle(
(0, 0),
self.wafer_r,
facecolor="#{0:0{1}X}".format(np.random.randint(0, 16777215), 6),
edgecolor=BLACK,
alpha=1)
ax.add_patch(circle)
tricenters = [tri.centroid.xy for tri in tris]
x, y = list(zip(*tricenters))
ax.plot(x, y, 'bo')
# Draw all the triangles
for i, item in enumerate(tris):
x, y = item.exterior.coords.xy
ax.plot(x, y, 'k-')
patch = PolygonPatch(item,
facecolor="#{0:0{1}X}".format(np.random.randint(0, 16777215), 6),
edgecolor=BLACK,
alpha=0.5,
zorder=2)
ax.add_patch(patch)
ax.axis('equal')
def makeTriang(self, xs, ys, s, orient):
h = np.sqrt(3.) / 2. * s
ps = []
for x, y in zip(xs, ys):
if orient == "up":
p0 = [x - s / 2., y - h / 3.]
p1 = [x, y + 2. * h / 3.]
p2 = [x + s / 2., y - h / 3.]
else:
p0 = [x - s / 2., y + h / 3.]
p1 = [x, y - 2. * h / 3.]
p2 = [x + s / 2., y + h / 3.]
ps.append(Polygon([p0, p1, p2]))
return ps
def createPtLattice(self, size, xgap, ygap):
G = nx.Graph(directed=False)
G.add_node((0, 0))
for n in range(int(size / min([xgap, ygap]))):
for (q, r) in G.nodes():
G.add_edge((q, r), (q - xgap, r - ygap))
G.add_edge((q, r), (q + xgap, r + ygap))
G.add_edge((q, r), (q - xgap, r + ygap))
G.add_edge((q, r), (q + xgap, r - ygap))
uniquepts = set(tuple(map(tuple, np.round(list(G.node.keys()), 10))))
return list(map(np.array, uniquepts)) # Return only unique points
def makeBlocks(self, trisize, startpt=(0, 0)):
gap = self.block_gap
wafer_r = self.wafer_r
sl_tri = self.trisize # Sidelength of the triangular blocks
h_tri = np.sqrt(3.) / 2. * sl_tri # Height of triangular blocks
sl_lattice = sl_tri + gap / np.tan(
np.deg2rad(30)
) # Sidelength of the block lattice (including the gaps between blocks)
h_lattice = np.sqrt(
3.) / 2. * sl_lattice # Height of the lattice triangles
# Create the lattice of the "up" facing triangles
points = self.createPtLattice(2. * wafer_r, sl_lattice / 2., h_lattice)
points = points + np.array([
0, h_tri / 3. + gap / 2.
]) # Shift lattice so we can cleave the wafer at y=0
points = points + np.array(startpt) # Shift lattice by starting point if doing an MC search
# Create the lattice of "down" facing triangles by shifting previous lattice
points2 = points + np.array([sl_lattice / 2., h_lattice / 3])
x, y = list(zip(*points))
x2, y2 = list(zip(*points2))
tris1 = self.makeTriang(np.array(x), np.array(y), sl_tri, "up")
tris2 = self.makeTriang(np.array(x2), np.array(y2), sl_tri, "down")
wafer = self.waferShape
upTris = [triangle for triangle in tris1 if triangle.within(wafer)]
downTris = [triangle for triangle in tris2 if triangle.within(wafer)]
return upTris, downTris
def _place_blocks(self):
"""
Create the list of valid block sites based on block size and wafer diam.
"""
sl_tri = self.trisize # Sidelength of the triangular blocks
h_tri = np.sqrt(3.) / 2. * sl_tri # Height of triangular blocks
if self.doMCBlockSearch:
best = [0, 0, 0, 0]
# Iterates many times to find the best fit
for i in range(self.MCBlockIterations):
# Random seed point
# rndpt = (np.random.randint(-sl_tri/2., sl_tri/2.), np.random.randint(-h_tri/2., h_tri/2.))
rndpt = (0, np.random.randint(-h_tri, 0))
# Make cells around this point
upTris, downTris = self.makeBlocks(sl_tri, startpt=rndpt)
NTris = (len(upTris) + len(downTris))
if NTris > best[1]:
centroidDist = np.array([tri.centroid.xy for tri in upTris + downTris]).squeeze().mean(0)
centroidDist = np.sqrt(centroidDist[0] ** 2 + centroidDist[1] ** 2)
# centroidDist = abs(rndpt[1])
best = [rndpt, NTris, (upTris, downTris), centroidDist]
elif NTris == best[1]:
# Choose the pattern that is most centered on the wafer
centroidDist = np.array([tri.centroid.xy for tri in upTris + downTris]).squeeze().mean(0)
centroidDist = np.sqrt(centroidDist[0] ** 2 + centroidDist[1] ** 2)
# centroidDist = abs(rndpt[1])
# print centroidDist
if centroidDist < best[3]:
best = [rndpt, NTris, (upTris, downTris), centroidDist]
# print("Current: {:f}, Best {:f}").format(NTris,best[1])
# Choose the best configuration (fits the most cells)
self.upTris, self.downTris = best[2]
self.blockOffset = best[0]
else:
self.upTris, self.downTris = self.makeBlocks(sl_tri)
self.blockOffset = (0, 0)
# Debugging
self.plotTriangles(self.downTris + self.upTris)
# Find the centers of the triangles
self.upCenters = [list(zip(*tri.centroid.xy)[0]) for tri in self.upTris]
self.downCenters = [list(zip(*tri.centroid.xy)[0]) for tri in self.downTris]
# %%
sl_lattice = self.trisize + self.block_gap / np.tan(np.deg2rad(30))
h_lattice = np.sqrt(3.) / 2. * sl_lattice
base = h_lattice
# Create label for each block (taken from templates._placeblocks)
# String prefixes to associate with each row/column index
x1s, y1s = set(), set()
for tri in self.upTris:
x1s.add(
np.round(tri.centroid.x, 8)
) # In x use centroid as reference, in y use lower bound so up and down triangles give almost the same value
y1s.add(base * round(float(tri.bounds[1]) / base))
# Create dictionary of up and down triangles
self.orientrows = dict(list(zip(y1s, ["up" for i, y in enumerate(y1s)])))
# Create dictionary of up and down triangles
x2s, y2s = set(), set()
for tri in self.downTris:
x2s.add(np.round(tri.centroid.x, 8))
y2s.add(base * round(float(tri.bounds[1]) / base))
self.orientrows.update(dict(list(zip(y2s, ["down" for i, y in enumerate(y2s)
]))))
x1s.update(x2s)
xs = sorted(list(x1s))
self.blockcols = dict(list(zip(xs, [
string.uppercase[i] for i, x in enumerate(xs)
])))
y1s.update(y2s)
ys = sorted(list(y1s))
self.blockrows = dict(list(zip(ys, [str(i) for i, y in enumerate(ys)])))
# Square cell labels ex: "A", "B", "C"...
def add_cellLabels(self, layers, center=False):
if not (type(layers) == list): layers = [layers]
cellLattice = sorted(self.upCellLattice,
key=itemgetter(1, 0)) # Sort the array first
celllabelsUp = Cell('CellLabelsUp')
h = self.cellsize
vOffsetFactor = 1.
txtSize = 200
for i, pt in enumerate(cellLattice):
cellid = string.uppercase[i]
celllabel = Cell('LBL_F_' + cellid)
for l in layers:
txt = Label(cellid, txtSize, layer=l)
bbox = txt.bounding_box
offset = np.array(pt)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
txt.translate(offset) # Translate it to bottom of wafer
celllabel.add(txt)
if center:
celllabelsUp.add(celllabel) # Middle of cell
else:
celllabelsUp.add(celllabel, origin=(
0, -h / 2. * vOffsetFactor + np.mean(bbox, 0)[1])) # Bottom of cell
for tri in self.upTris:
self.add(celllabelsUp, origin=tri.centroid)
cellLattice = sorted(self.downCellLattice,
key=itemgetter(1, 0),
reverse=True)
celllabelsDown = Cell('CellLabelsDown')
h = self.cellsize
for i, pt in enumerate(cellLattice):
cellid = string.uppercase[i]
celllabel = Cell('LBL_F_' + cellid)
for l in layers:
txt = Label(cellid, txtSize, layer=l)
bbox = txt.bounding_box
offset = np.array(pt)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
if self.symmetric_chips:
txt.rotate(180)
txt.translate(offset) # Translate it to bottom of wafer
celllabel.add(txt)
if center:
celllabelsDown.add(celllabel) # Middle of cell
else:
celllabelsDown.add(celllabel,
origin=(0, -h / 2. * vOffsetFactor + np.mean(bbox, 0)[1])) # Bottom of cell
for tri in self.downTris:
self.add(celllabelsDown, origin=tri.centroid)
# Triangular block labels ex: "A0", "B0", "C0"...
def add_blockLabels(self, layers, center=False):
if not (type(layers) == list): layers = [layers]
vOffsetFactor = 1.
blocklabelsUp = Cell('BlockLabelsUp')
h = self.upTris[0].bounds[3] - self.upTris[0].bounds[1]
sl_lattice = self.trisize + self.block_gap / np.tan(np.deg2rad(30))
h_lattice = np.sqrt(3.) / 2. * sl_lattice
base = h_lattice
for tri in self.upTris:
lbl_col = self.blockcols[np.round(tri.centroid.x, 8)]
lbl_row = self.blockrows[base * round(float(tri.bounds[1]) / base)]
blockid = str(lbl_col) + str(lbl_row)
blocklabel = Cell('LBL_B_' + blockid)
for l in layers:
txt = Label(blockid, 1000, layer=l)
bbox = txt.bounding_box
offset = np.array(tri.centroid)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
txt.translate(offset) # Translate it to bottom of wafer
blocklabel.add(txt)
blocklabelsUp.add(blocklabel)
if center:
self.add(blocklabelsUp)
else:
self.add(blocklabelsUp, origin=(0, h / 2. * vOffsetFactor))
blocklabelsDown = Cell('BlockLabelsDown')
for tri in self.downTris:
lbl_col = self.blockcols[np.round(tri.centroid.x, 8)]
lbl_row = self.blockrows[base * round(float(tri.bounds[1]) / base)]
blockid = str(lbl_col) + str(lbl_row)
blocklabel = Cell('LBL_' + blockid)
for l in layers:
txt = Label(blockid, 1000, layer=l)
bbox = txt.bounding_box
offset = np.array(tri.centroid)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
if self.symmetric_chips:
txt.rotate(180)
txt.translate(offset) # Translate it to bottom of wafer
blocklabel.add(txt)
blocklabelsDown.add(blocklabel)
if center:
self.add(blocklabelsDown)
else:
self.add(blocklabelsDown, origin=(0, -h / 2. * vOffsetFactor))
def add_sub_dicing_ticks(self, length, thickness, layers):
if not (type(layers) == list): layers = [layers]
l = layers[0]
_h = self.upTris[0].bounds[3] - self.upTris[0].bounds[1]
_w = self.upTris[0].bounds[2] - self.upTris[0].bounds[0]
y_bottom = self.upTris[0].bounds[1]
y_centroid = self.upTris[0].centroid.y
offset = y_centroid - y_bottom
mark = Path([(0, 0), (0, -length)], width=thickness, layer=l)
mark_cell = Cell('SubDicingTick')
mark_cell.add(mark)
tri_sub_dMarks = Cell('TriSubDMarks')
tri_sub_dMarks.add(mark_cell, rotation=30, origin=(0, offset))
tri_sub_dMarks.add(mark_cell, rotation=-30, origin=(0, offset))
tri_sub_dMarks.add(mark_cell, rotation=30, origin=(_w / 4., offset - _h / 2.))
tri_sub_dMarks.add(mark_cell, rotation=90, origin=(_w / 4., offset - _h / 2.))
tri_sub_dMarks.add(mark_cell, rotation=-30, origin=(-_w / 4., offset - _h / 2.))
tri_sub_dMarks.add(mark_cell, rotation=-90, origin=(-_w / 4., offset - _h / 2.))
# Horizontal marks
# This is a mess... should fix it later. Past Martin says sorry...
tri_sub_dMarks.add(mark_cell, rotation=-90, origin=(_w * 3. / 8. - 300., offset - _h / 4. - _h / 20.))
tri_sub_dMarks.add(mark_cell, rotation=90, origin=(-_w * 3. / 8. + 300., offset - _h / 4. - _h / 20.))
tri_sub_dMarks.add(mark_cell, rotation=-90, origin=(_w * 1. / 8. + 300., offset - _h * 3. / 4. + _h / 20.))
tri_sub_dMarks.add(mark_cell, rotation=90, origin=(-_w * 1. / 8. - 300., offset - _h * 3. / 4. + _h / 20.))
for tri in self.downTris:
tri_center = np.array(tri.centroid)
self.add(tri_sub_dMarks, origin=tri_center)
for tri in self.upTris:
tri_center = np.array(tri.centroid)
self.add(tri_sub_dMarks, origin=tri_center, rotation=180)
def add_waferLabel(self, label, layers, pos=None):
"""
Create a label
"""
if not (type(layers) == list): layers = [layers]
if self._label is None:
self._label = Cell(self.name + '_LBL')
self.add(self._label)
else:
self._label.elements = []
offset = np.array([0, -self.wafer_r + self.block_gap]) if pos is None else np.array(pos)
labelsize = 1000.
for l in layers:
txt = LineLabel(label, labelsize, style='romand', line_width=labelsize / 20., layer=l)
bbox = txt.bounding_box
txt.translate(-np.mean(bbox, 0)) # Center text around origin
txt.translate(offset) # Translate it to bottom of wafer
self._label.add(txt)
# TODO: create a square cell helper class? Do not confuse with Cell class of gdsCAD
class Blocks(Cell):
"""
Block object in the form of a triangle, facing either up or down
"""
# TODO: add the inner and outer (triangle+gap) polygons to this block for easier use later
def __init__(self, side_len, orient, name, center=[0., 0.]):
super(Blocks, self).__init__(name)
self.center = center
self.orient = orient
self.side_len = side_len
self.height = np.sqrt(3.) / 2. * side_len
self.ptList = self.calcPts()
self.polygon = Polygon(self.ptList)
def calcPts(self):
x, y = self.center
h = self.height
s = self.side_len
if self.orient == "up":
p0 = [x - s / 2., y - h / 3.]
p1 = [x, y + 2. * h / 3.]
p2 = [x + s / 2., y - h / 3.]
else:
p0 = [x - s / 2., y + h / 3.]
p1 = [x, y - 2. * h / 3.]
p2 = [x + s / 2., y + h / 3.]
ptsList = [p0, p1, p2]
return ptsList
| gpl-3.0 |
siutanwong/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
guilgautier/DPPy | dppy/multivariate_jacobi_ope.py | 1 | 36593 | # coding: utf8
""" Implementation of the class :class:`MultivariateJacobiOPE` used in :cite:`GaBaVa19` for Monte Carlo with Determinantal Point Processes
It has 3 main methods:
- :py:meth:`~dppy.multivariate_jacobi_ope.MultivariateJacobiOPE.sample` to generate samples
- :py:meth:`~dppy.multivariate_jacobi_ope.MultivariateJacobiOPE.K` to evaluate the corresponding projection kernel
- :py:meth:`~dppy.multivariate_jacobi_ope.MultivariateJacobiOPE.plot` to display 1D or 2D samples
"""
import numpy as np
import itertools as itt
from scipy import stats
from scipy.special import beta, betaln, factorial, gamma, gammaln
from scipy.special import eval_jacobi
# from scipy.special import logsumexp
import matplotlib.pyplot as plt
from dppy.random_matrices import mu_ref_beta_sampler_tridiag as tridiagonal_model
from dppy.utils import check_random_state, inner1d
class MultivariateJacobiOPE:
"""
Multivariate Jacobi Orthogonal Polynomial Ensemble used in :cite:`GaBaVa19` for Monte Carlo with Determinantal Point Processes
This corresponds to a continuous multivariate projection DPP with state space :math:`[-1, 1]^d` with respect to
- reference measure :math:`\\mu(dx) = w(x) dx` (see also :py:meth:`~dppy.multivariate_jacobi_ope.MultivariateJacobiOPE.eval_w`), where
.. math::
w(x) = \\prod_{i=1}^{d} (1-x_i)^{a_i} (1+x_i)^{b_i}
- kernel :math:`K` (see also :py:meth:`~dppy.multivariate_jacobi_ope.MultivariateJacobiOPE.K`)
.. math::
K(x, y) = \\sum_{\\mathfrak{b}(k)=0}^{N-1}
P_{k}(x) P_{k}(y)
= \\Phi(x)^{\\top} \\Phi(y)
where
- :math:`k \\in \\mathbb{N}^d` is a multi-index ordered according to the ordering :math:`\\mathfrak{b}` (see :py:meth:`compute_ordering`)
- :math:`P_{k}(x) = \\prod_{i=1}^d P_{k_i}^{(a_i, b_i)}(x_i)` is the product of orthonormal Jacobi polynomials
.. math::
\\int_{-1}^{1}
P_{k}^{(a_i,b_i)}(u) P_{\\ell}^{(a_i,b_i)}(u)
(1-u)^{a_i} (1+u)^{b_i} d u
= \\delta_{k\\ell}
so that :math:`(P_{k})` are orthonormal w.r.t :math:`\\mu(dx)`
- :math:`\\Phi(x) = \\left(P_{\\mathfrak{b}^{-1}(0)}(x), \\dots, P_{\\mathfrak{b}^{-1}(N-1)}(x) \\right)^{\\top}`
:param N:
Number of points :math:`N \\geq 1`
:type N:
int
:param jacobi_params:
Jacobi parameters :math:`[(a_i, b_i)]_{i=1}^d`
The number of rows :math:`d` prescribes the ambient dimension of the points i.e. :math:`x_{1}, \\dots, x_{N} \\in [-1, 1]^d`.
- when :math:`d=1`, :math:`a_1, b_1 > -1`
- when :math:`d \\geq 2`, :math:`|a_i|, |b_i| \\leq \\frac{1}{2}`
:type jacobi_params:
array_like
.. seealso::
- :ref:`multivariate_jacobi_ope`
- when :math:`d=1`, the :ref:`univariate Jacobi ensemble <jacobi_banded_matrix_model>` is sampled by computing the eigenvalues of a properly randomized :ref:`tridiagonal matrix <jacobi_banded_matrix_model>` of :cite:`KiNe04`
- :cite:`BaHa16` initiated the use of the multivariate Jacobi ensemble for Monte Carlo integration. In particular, they proved CLT with variance decay of order :math:`N^{-(1+1/d)}` which is faster that the :math:`N^{-1}` rate of vanilla Monte Carlo where the points are drawn i.i.d. from the base measure.
"""
def __init__(self, N, jacobi_params):
self.N, self.jacobi_params, self.dim =\
self._check_params(N, jacobi_params)
self.ordering = compute_ordering(self.N, self.dim)
self.deg_max, self.degrees_1D_polynomials =\
compute_degrees_1D_polynomials(np.max(self.ordering, axis=0))
self.norms_1D_polynomials =\
compute_norms_1D_polynomials(self.jacobi_params, self.deg_max)
self.square_norms_multiD_polynomials =\
np.prod((self.norms_1D_polynomials**2)[self.ordering,
range(self.dim)],
axis=1)
self.mass_of_mu = self.square_norms_multiD_polynomials[0]
self.rejection_bounds =\
compute_rejection_bounds(self.jacobi_params,
self.ordering,
log_scale=True)
def _check_params(self, N, jacobi_params):
""" Check that:
- The number of points :math:`N \\geq 1`
- Jacobi parameters
- when :math:`d=1` we must have :math:`a_1, b_1 > -1`
- when :math:`d \\geq 2` we must have :math:`|a_i|, |b_i| \\leq \\frac{1}{2}`.
"""
if type(N) is not int or N < 1:
raise ValueError('Number of points N={} < 1'.format(N))
dim = jacobi_params.size // 2
if dim == 1 and not np.all(jacobi_params > -1):
raise ValueError('d=1, Jacobi parameters must be > -1')
elif dim >= 2 and not np.all(np.abs(jacobi_params) <= 0.5):
raise ValueError('d={}, Jacobi parameters be in [-0.5, 0.5]^d'.format(dim))
return N, jacobi_params, dim
def eval_w(self, X):
"""Evaluate :math:`w(x) = \\prod_{i=1}^{d} (1-x_i)^{a_i} (1+x_i)^{b_i}` which corresponds to the density of the base measure :math:`\\mu(dx) = w(x) dx`
:param X:
:math:`M\\times d` array of :math:`M` points :math:`\\in [-1, 1]^d`
:type X:
array_like
:return:
:math:`w(x) = \\prod_{i=1}^{d} (1-x_i)^{a_i} (1+x_i)^{b_i}`
:rtype:
array_like
"""
a, b = self.jacobi_params.T
return np.prod((1.0 - X)**a * (1.0 + X)**b, axis=-1)
def eval_multiD_polynomials(self, X):
"""Evaluate
.. math::
\\mathbf{\\Phi}(X)
:= \\begin{pmatrix}
\\Phi(x_1)^{\\top}\\\\
\\vdots\\\\
\\Phi(x_M)^{\\top}
\\end{pmatrix}
where :math:`\\Phi(x) = \\left(P_{\\mathfrak{b}^{-1}(0)}(x), \\dots, P_{\\mathfrak{b}^{-1}(N-1)}(x) \\right)^{\\top}` such that
:math:`K(x, y) = \\Phi(x)^{\\top} \\Phi(y)`.
Recall that :math:`\\mathfrak{b}` denotes the ordering chosen to order multi-indices :math:`k\\in \\mathbb{N}^d`.
This is done by evaluating each of the `three-term recurrence relations <https://en.wikipedia.org/wiki/Jacobi_polynomials#Recurrence_relations>`_ satisfied by each univariate orthogonal Jacobi polynomial, using the dedicated `see also SciPy <https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.eval_jacobi.html>`_ :func:`scipy.special.eval_jacobi` satistified by the respective univariate Jacobi polynomials :math:`P_{k_i}^{(a_i, b_i)}(x_i)`.
Then we use the slicing feature of the Python language to compute :math:`\\Phi(x)=\\left(P_{k}(x) = \\prod_{i=1}^d P_{k_i}^{(a_i, b_i)}(x_i)\\right)_{k=\\mathfrak{b}^{-1}(0), \\dots, \\mathfrak{b}^{-1}(N-1)}^{\\top}`
:param X:
:math:`M\\times d` array of :math:`M` points :math:`\\in [-1, 1]^d`
:type X:
array_like
:return:
:math:`\\mathbf{\\Phi}(X)` - :math:`M\\times N` array
:rtype:
array_like
.. seealso::
- evaluation of the kernel :py:meth:`~dppy.multivariate_jacobi_ope.MultivariateJacobiOPE.K`
"""
poly_1D_jacobi = eval_jacobi(self.degrees_1D_polynomials,
self.jacobi_params[:, 0],
self.jacobi_params[:, 1],
np.atleast_2d(X)[:, None])\
/ self.norms_1D_polynomials
return np.prod(poly_1D_jacobi[:, self.ordering, range(self.dim)],
axis=2)
def K(self, X, Y=None, eval_pointwise=False):
"""Evalute :math:`\\left(K(x, y)\\right)_{x\\in X, y\\in Y}` if ``eval_pointwise=False`` or :math:`\\left(K(x, y)\\right)_{(x, y)\\in (X, Y)}` otherwise
.. math::
K(x, y) = \\sum_{\\mathfrak{b}(k)=0}^{N-1}
P_{k}(x) P_{k}(y)
= \\phi(x)^{\\top} \\phi(y)
where
- :math:`k \\in \\mathbb{N}^d` is a multi-index ordered according to the ordering :math:`\\mathfrak{b}`, :py:meth:`compute_ordering`
- :math:`P_{k}(x) = \\prod_{i=1}^d P_{k_i}^{(a_i, b_i)}(x_i)` is the product of orthonormal Jacobi polynomials
.. math::
\\int_{-1}^{1}
P_{k}^{(a_i,b_i)}(u) P_{\\ell}^{(a_i,b_i)}(u)
(1-u)^{a_i} (1+u)^{b_i} d u
= \\delta_{k\\ell}
so that :math:`(P_{k})` are orthonormal w.r.t :math:`\\mu(dx)`
- :math:`\\Phi(x) = \\left(P_{\\mathfrak{b}^{-1}(0)}(x), \\dots, P_{\\mathfrak{b}^{-1}(N-1)}(x) \\right)`, see :py:meth:`eval_multiD_polynomials`
:param X:
:math:`M\\times d` array of :math:`M` points :math:`\\in [-1, 1]^d`
:type X:
array_like
:param Y:
:math:`M'\\times d` array of :math:`M'` points :math:`\\in [-1, 1]^d`
:type Y:
array_like (default None)
:param eval_pointwise:
sets pointwise evaluation of the kernel, if ``True``, :math:`X` and :math:`Y` must have the same shape, see Returns
:type eval_pointwise:
bool (default False)
:return:
If ``eval_pointwise=False`` (default), evaluate the kernel matrix
.. math::
\\left(K(x, y)\\right)_{x\\in X, y\\in Y}
If ``eval_pointwise=True`` kernel matrix
Pointwise evaluation of :math:`K` as depicted in the following pseudo code output
- if ``Y`` is ``None``
- :math:`\\left(K(x, y)\\right)_{x\\in X, y\\in X}` if ``eval_pointwise=False``
- :math:`\\left(K(x, x)\\right)_{x\\in X}` if ``eval_pointwise=True``
- otherwise
- :math:`\\left(K(x, y)\\right)_{x\\in X, y\\in Y}` if ``eval_pointwise=False``
- :math:`\\left(K(x, y)\\right)_{(x, y)\\in (X, Y)}` if ``eval_pointwise=True`` (in this case X and Y should have the same shape)
:rtype:
array_like
.. seealso::
:py:meth:`eval_multiD_polynomials`
"""
X = np.atleast_2d(X)
if Y is None or Y is X:
phi_X = self.eval_multiD_polynomials(X)
if eval_pointwise:
return inner1d(phi_X, phi_X, axis=1)
else:
return phi_X.dot(phi_X.T)
else:
len_X = len(X)
phi_XY = self.eval_multiD_polynomials(np.vstack((X, Y)))
if eval_pointwise:
return inner1d(phi_XY[:len_X], phi_XY[len_X:], axis=1)
else:
return phi_XY[:len_X].dot(phi_XY[len_X:].T)
def sample_chain_rule_proposal(self, nb_trials_max=10000,
random_state=None):
"""Use a rejection sampling mechanism to sample
.. math::
\\frac{1}{N} K(x, x) w(x) dx
= \\frac{1}{N}
\\sum_{\\mathfrak{b}(k)=0}^{N-1}
\\left( \\frac{P_k(x)}{\\left\\| P_k \\right\\|} \\right)^2
w(x)
with proposal distribution
.. math::
w_{eq}(x) d x
= \\prod_{i=1}^{d}
\\frac{1}{\\pi\\sqrt{1-(x_i)^2}}
d x_i
Since the target density is a mixture, we can sample from it by
1. Select a multi-index :math:`k` uniformly at random in :math:`\\left\\{ \\mathfrak{b}^{-1}(0), \\dots, \\mathfrak{b}^{-1}(N-1) \\right\\}`
2. Sample from :math:`\\left( \\frac{P_k(x)}{\\left\\| P_k \\right\\|} \\right)^2 w(x) dx` with proposal :math:`w_{eq}(x) d x`.
The acceptance ratio writes
.. math::
\\frac{
\\left( \\frac{P_k(x)}{\\left\\| P_k \\right\\|} \\right)^2
w(x)}
{w_{eq}(x)}
= \\prod_{i=1}^{d}
\\pi
\\left(
\\frac{P_{k_i}^{(a_i, b_i)}(x)}
{\\left\\| P_{k_i}^{(a_i, b_i)} \\right\\|}
\\right)^2
(1-x_i)^{a_i+\\frac{1}{2}}
(1+x_i)^{b_i+\\frac{1}{2}}
\\leq C_{k}
which can be bounded using the result of :cite:`Gau09` on Jacobi polynomials.
.. note::
Each of the rejection constant :math:`C_{k}` is computed at initialization of the :py:class:`MultivariateJacobiOPE` object using :py:meth:`compute_rejection_bounds`
:return:
A sample :math:`x\\in[-1,1]^d` with probability distribution :math:`\\frac{1}{N} K(x,x) w(x)`
:rtype:
array_like
.. seealso::
- :py:meth:`compute_rejection_bounds`
- :py:meth:`sample`
"""
rng = check_random_state(random_state)
a, b = self.jacobi_params.T
a_05, b_05 = a + 0.5, b + 0.5
d = self.dim
ind = rng.randint(self.N)
k = self.ordering[ind]
Pk_square_norm = self.square_norms_multiD_polynomials[ind]
# norm_Pk = self.poly_multiD_norm[ind]
rejection_bound = self.rejection_bounds[ind]
for trial in range(nb_trials_max):
# Propose x ~ w_eq(x) = \prod_{i=1}^{d} 1/pi 1/sqrt(1-(x_i)^2)
# rng.beta is defined as beta(a, b) = x^(a-1) (1-x)^(b-1)
x = 1.0 - 2.0 * rng.beta(0.5, 0.5, size=self.dim)
# Compute (P_k(x)/||P_k||)^2
Pk2_x = np.prod(eval_jacobi(k, a, b, x))**2 / Pk_square_norm
# Pk2_x = (np.prod(eval_jacobi(k, a, b, x)) / norm_Pk)**2
# Compute w(x) / w_eq(x)
w_over_w_eq =\
np.pi**d * np.prod((1.0 - x)**a_05 * (1.0 + x)**b_05)
if rng.rand() * rejection_bound < Pk2_x * w_over_w_eq:
break
else:
print('marginal distribution 1/N K(x,x), rejection fails after {} proposals'.format(trial))
return x
def sample(self, nb_trials_max=10000, random_state=None, tridiag_1D=True):
"""Use the chain rule :cite:`HKPV06` (Algorithm 18) to sample :math:`\\left(x_{1}, \\dots, x_{N} \\right)` with density
.. math::
& \\frac{1}{N!}
\\left(K(x_n,x_p)\\right)_{n,p=1}^{N}
\\prod_{n=1}^{N} w(x_n)\\\\
&= \\frac{1}{N} K(x_1,x_1) w(x_1)
\\prod_{n=2}^{N}
\\frac{
K(x_n,x_n)
- K(x_n,x_{1:n-1})
\\left[\\left(K(x_k,x_l)\\right)_{k,l=1}^{n-1}\\right]^{-1}
K(x_{1:n-1},x_n)
}{N-(n-1)}
w(x_n)\\\\
&= \\frac{\\| \\Phi(x) \\|^2}{N} \\omega(x_1) d x_1
\\prod_{n=2}^{N}
\\frac{\\operatorname{distance}^2(\\Phi(x_n), \\operatorname{span}\\{\\Phi(x_p)\\}_{p=1}^{n-1})}
{N-(n-1)}
\\omega(x_n) d x_n
The order in which the points were sampled can be forgotten to obtain a valid sample of the corresponding DPP
- :math:`x_1 \\sim \\frac{1}{N} K(x,x) w(x)` using :py:meth:`sample_chain_rule_proposal`
- :math:`x_n | Y = \\left\\{ x_{1}, \\dots, x_{n-1} \\right\\}`, is sampled using rejection sampling with proposal density :math:`\\frac{1}{N} K(x,x) w(x)` and rejection bound \\frac{N}{N-(n-1)}
.. math::
\\frac{1}{N-(n-1)} [K(x,x) - K(x, Y) K_Y^{-1} K(Y, x)] w(x)
\\leq \\frac{N}{N-(n-1)} \\frac{1}{N} K(x,x) w(x)
.. note::
Using the gram structure :math:`K(x, y) = \\Phi(x)^{\\top} \\Phi(y)` the numerator of the successive conditionals reads
.. math::
K(x, x) - K(x, Y) K(Y, Y)^{-1} K(Y, x)
&= \\operatorname{distance}^2(\\Phi(x_n), \\operatorname{span}\\{\\Phi(x_p)\\}_{p=1}^{n-1})\\\\
&= \\left\\| (I - \\Pi_{\\operatorname{span}\\{\\Phi(x_p)\\}_{p=1}^{n-1}} \\phi(x)\\right\\|^2
which can be computed simply in a vectorized way.
The overall procedure is akin to a sequential Gram-Schmidt orthogonalization of :math:`\\Phi(x_{1}), \\dots, \\Phi(x_{N})`.
.. seealso::
- :ref:`continuous_dpps_exact_sampling_projection_dpp_chain_rule`
- :py:meth:`sample_chain_rule_proposal`
"""
rng = check_random_state(random_state)
if self.dim == 1 and tridiag_1D:
sample = tridiagonal_model(a=self.jacobi_params[0, 0] + 1,
b=self.jacobi_params[0, 1] + 1,
size=self.N,
random_state=rng)[:, None]
return 1.0 - 2.0 * sample
sample = np.zeros((self.N, self.dim))
phi = np.zeros((self.N, self.N))
for n in range(self.N):
for trial in range(nb_trials_max):
# Propose a point ~ 1/N K(x,x) w(x)
sample[n] = self.sample_chain_rule_proposal(random_state=rng)
# Schur complement (numerator of x_n | Y = x_1:n-1)
# = K(x, x) - K(x, Y) K(Y, Y)^-1 K(Y, x)
# = ||(I - Proj{phi(Y)}) phi(x)||^2
phi[n] = self.eval_multiD_polynomials(sample[n])
K_xx = phi[n].dot(phi[n]) # self.K(sample[n], sample[n])
phi[n] -= phi[:n].dot(phi[n]).dot(phi[:n])
schur = phi[n].dot(phi[n])
# accept: x_n = x, or reject
if rng.rand() < schur / K_xx:
# normalize phi(x_n) / ||phi(x_n)||
phi[n] /= np.sqrt(schur)
break
else:
print('conditional x_{} | x_1,...,x_{}, rejection fails after {} proposals'.format(n + 1, n, trial))
return sample
def plot(self, sample, weighted=''):
if self.dim >= 3:
raise NotImplementedError('Visualizations in d>=3 not implemented')
tols = 5e-2 * np.ones_like(self.jacobi_params)
tols = np.zeros_like(self.jacobi_params)
tols[1, 0] = 8e-2
weights = np.ones(len(sample))
if weighted == 'BH':
# w_n = 1 / K(x_n, x_n)
weights = 1. / self.K(sample, eval_pointwise=True)
elif weighted == 'EZ':
Phi_X = self.eval_multiD_polynomials(sample)
idx = np.tile(np.arange(self.N), (self.N, 1))
idx = idx[~np.eye(idx.shape[0], dtype=bool)].reshape(self.N, -1)
# w_n = +/- c det A / det B
# = +/- c sgn(det A) sgn(det B) exp(logdet A − logdet B)
sgn_det_A, log_det_A = np.array(np.linalg.slogdet(Phi_X[idx, 1:]))
sgn_det_B, log_det_B = np.linalg.slogdet(Phi_X)
np.exp(log_det_A - log_det_B, out=weights)
weights *= sgn_det_A * sgn_det_B
weights[1::2] *= -1
weights /= max(weights.min(), weights.max(), key=abs)
ticks_pos = [-1, 0, 1]
ticks_labs = list(map(str, ticks_pos))
if self.dim == 1:
fig, ax_main = plt.subplots(figsize=(6, 4))
ax_main.tick_params(axis='both', which='major', labelsize=18)
ax_main.set_xticks(ticks_pos)
ax_main.set_xticklabels(ticks_labs)
ax_main.spines['right'].set_visible(False)
ax_main.spines['top'].set_visible(False)
ax_main.scatter(sample[:, 0],
np.zeros_like(sample[:, 0]),
s=weights)
ax_main.hist(sample[:, 0],
bins=10,
weights=weights,
density=True,
orientation='vertical',
alpha=0.5)
# Top densities
X_ = np.linspace(-1 + tols[0, 1], 1 - tols[0, 0], 200)[:, None]
ax_main.plot(X_,
0.5 * stats.beta(*(1 + self.jacobi_params[0])).pdf(0.5 * (1 - X_)),
ls='--', c='red', lw=3, alpha=0.7,
label=r'$a_1 = {:.2f}, b_1 = {:.2f}$'.format(*self.jacobi_params[0]))
x_lim = ax_main.get_xlim()
y_lim = ax_main.get_ylim()
if not weighted:
tol = 5e-2
X_ = np.linspace(-1 + tol, 1 - tol, 200)[:, None]
ax_main.plot(X_,
0.5 * stats.beta(0.5, 0.5).pdf(0.5 * (1 - X_)),
c='orange', ls='-', lw=3,
label=r'$a = b = -0.5$')
ax_main.legend(fontsize=15,
loc='center',
bbox_to_anchor=(0.5, -0.15 if weighted else -0.17),
labelspacing=0.1,
frameon=False)
elif self.dim == 2:
# Create Fig and gridspec
fig = plt.figure(figsize=(6, 6))
grid = plt.GridSpec(6, 6, hspace=0., wspace=0.)
ax_main = fig.add_subplot(grid[1:, :-1],
xticks=ticks_pos, xticklabels=ticks_labs,
yticks=ticks_pos, yticklabels=ticks_labs)
ax_main.tick_params(axis='both', which='major', labelsize=18)
if weighted == 'EZ':
weights *= 100
w_geq_0 = weights >= 0
ax_main.scatter(sample[w_geq_0, 0],
sample[w_geq_0, 1],
s=weights[w_geq_0], alpha=0.7)
ax_main.scatter(sample[~w_geq_0, 0],
sample[~w_geq_0, 1],
s=-weights[~w_geq_0], alpha=0.7)
else:
weights *= 20
ax_main.scatter(sample[:, 0],
sample[:, 1],
s=weights, alpha=0.8)
x_lim = ax_main.get_xlim()
y_lim = ax_main.get_ylim()
# Top plot
ax_top = fig.add_subplot(grid[0, :-1],
xticks=ticks_pos, xticklabels=[],
yticks=[], yticklabels=[],
frameon=False)
ax_top.set_xlim(x_lim)
# Top histogram
ax_top.hist(sample[:, 0],
bins=10,
weights=np.abs(weights),
density=True,
orientation='vertical',
alpha=0.5)
# Top densities
X_ = np.linspace(-1 + tols[0, 1], 1 - tols[0, 0], 200)[:, None]
l_top, = ax_top.plot(X_,
0.5 * stats.beta(*(1 + self.jacobi_params[0])).pdf(0.5 * (1 - X_)),
ls='--', c='red', lw=3, alpha=0.7)
# Right plot
ax_right = fig.add_subplot(grid[1:, -1],
xticks=[], xticklabels=[],
yticks=ticks_pos, yticklabels=[],
frameon=False)
ax_right.set_ylim(y_lim)
# Right histogram
ax_right.hist(sample[:, 1],
bins=10,
weights=np.abs(weights),
density=True,
orientation='horizontal',
alpha=0.5)
# Right densities
X_ = np.linspace(-1 + tols[1, 1], 1 - tols[1, 0], 200)[:, None]
l_right, = ax_right.plot(0.5 * stats.beta(*(1 + self.jacobi_params[1])).pdf(0.5 * (1 - X_)),
X_,
ls='--', c='green', lw=3, alpha=0.7)
leg_axes = [l_top, l_right]
leg_text = [', '.join([r'$a_{} = {:.2f}$'.format(i+1, jac_par[0]),
r'$b_{} = {:.2f}$'.format(i+1, jac_par[1])])
for i, jac_par in enumerate(self.jacobi_params)]
if not weighted:
tol = 5e-2
X_ = np.linspace(-1 + tol, 1 - tol, 200)[:, None]
l_arcsine, = ax_top.plot(X_,
0.5 * stats.beta(0.5, 0.5).pdf(0.5 * (1 - X_)),
c='orange', ls='-', lw=3)
ax_right.plot(0.5 * stats.beta(0.5, 0.5).pdf(0.5 * (1 - X_)),
X_,
c='orange', ls='-', lw=3)
leg_axes.append(l_arcsine)
leg_text.append(r'$a = b = -0.5$')
ax_main.legend(leg_axes,
leg_text,
fontsize=15,
loc='center',
bbox_to_anchor=(0.5, -0.15 if weighted else -0.18),
labelspacing=0.1,
frameon=False)
def compute_ordering(N, d):
""" Compute the ordering of the multi-indices :math:`\\in\\mathbb{N}^d` defining the order between the multivariate monomials as described in Section 2.1.3 of :cite:`BaHa16`.
:param N:
Number of polynomials :math:`(P_k)` considered to build the kernel :py:meth:`~dppy.multivariate_jacobi_ope.MultivariateJacobiOPE.K` (number of points of the corresponding :py:class:`MultivariateJacobiOPE`)
:type N:
int
:param d:
Size of the multi-indices :math:`k\\in \\mathbb{N}^d` characterizing the _degree_ of :math:`P_k` (ambient dimension of the points x_{1}, \\dots, x_{N} \\in [-1, 1]^d)
:type d:
int
:return:
Array of size :math:`N\\times d` containing the first :math:`N` multi-indices :math:`\\in\\mathbb{N}^d` in the order prescribed by the ordering :math:`\\mathfrak{b}` :cite:`BaHa16` Section 2.1.3
:rtype:
array_like
For instance, for :math:`N=12, d=2`
.. code:: python
[(0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (2, 0), (2, 1), (2, 2), (0, 3), (1, 3), (2, 3)]
.. seealso::
- :cite:`BaHa16` Section 2.1.3
"""
layer_max = np.floor(N**(1.0 / d)).astype(np.int16)
ordering = itt.chain.from_iterable(
filter(lambda x: m in x,
itt.product(range(m + 1), repeat=d))
for m in range(layer_max + 1))
return list(ordering)[:N]
def compute_norms_1D_polynomials(jacobi_params, deg_max):
""" Compute the square norms :math:`\\|P_{k}^{(a_i,b_i)}\\|^2` of each (univariate) orthogoanl Jacobi polynomial for :math:`k=0` to ``deg_max`` and :math:`a_i, b_i =` ``jacobi_params[i, :]``
Recall that the Jacobi polynomials :math:`\\left( P_{k}^{(a_i,b_i)} \\right)` are `orthogonal <http://en.wikipedia.org/wiki/Jacobi_polynomials#Orthogonality>`_ w.r.t. :math:`(1-u)^{a_i} (1+u)^{b_i} du`.
.. math::
\\|P_{k}^{(a_i,b_i)}\\|^2
&= \\int_{-1}^{1}
\\left( P_{k}^{(a_i,b_i)}(u) \\right)^2
(1-u)^{a_i} (1+u)^{b_i} d u\\\\
&= \\frac{2^{a_i+b_i+1}}
{2k+a_i+b_i+1}
\\frac{\\Gamma(k+a_i+1)\\Gamma(k+b_i+1)}
{\\Gamma(k+a_i+b_i+1)n!}
:param jacobi_params:
Jacobi parameters :math:`[(a_i, b_i)]_{i=1}^d \\in [-\\frac{1}{2}, \\frac{1}{2}]^{d \\times 2}`
The number of rows :math:`d` prescribes the ambient dimension of the points i.e. :math:`x_{1}, \\dots, x_{N} \\in [-1, 1]^d`
:type jacobi_params:
array_like
:param deg_max:
Maximal degree of 1D Jacobi polynomials
:type deg_max:
int
:return:
Array of size ``deg_max + 1`` :math:`\\times d` with entry :math:`k,i` given by :math:`\\|P_{k}^{(a_i,b_i)}\\|^2`
:rtype:
array_like
.. seealso::
- `Wikipedia Jacobi polynomials <http://en.wikipedia.org/wiki/Jacobi_polynomials#Orthogonality>`_
- :py:meth:`compute_ordering`
"""
# Initialize
# - [square_norms]_ij = ||P_i^{a_j, b_j}||^2
dim = jacobi_params.size // 2
square_norms = np.zeros((deg_max + 1, dim))
n = np.arange(1, deg_max + 1)[:, None]
arcsine = np.all(jacobi_params == -0.5, axis=1)
if any(arcsine):
# |P_0|^2 = pi
# |P_n|^2 = 1/2 (Gamma(n+1/2)/n!)^2 otherwise
square_norms[0, arcsine] = np.pi
square_norms[1:, arcsine] =\
0.5 * np.exp(2 * (gammaln(n + 0.5) - gammaln(n + 1)))
# 0.5 * (gamma(n + 0.5) / factorial(n))**2
non_arcsine = np.any(jacobi_params != -0.5, axis=1)
if any(non_arcsine):
# |P_n|^2 =
# 2^(a + b + 1) Gamma(n + 1 + a) Gamma(n + 1 + b)
# n! (2n + a + b + 1) Gamma(n + 1 + a + b)
a = jacobi_params[non_arcsine, 0]
b = jacobi_params[non_arcsine, 1]
square_norms[0, non_arcsine] = 2**(a + b + 1) * beta(a + 1, b + 1)
square_norms[1:, non_arcsine] = np.exp((a + b + 1) * np.log(2)
+ gammaln(n + 1 + a)
+ gammaln(n + 1 + b)
- gammaln(n + 1)
- np.log(2 * n + 1 + a + b)
- gammaln(n + 1 + a + b))
return np.sqrt(square_norms)
def compute_rejection_bounds(jacobi_params, ordering, log_scale=True):
""" Compute the rejection constants for the acceptance/rejection mechanism used in :py:meth:`sample_chain_rule_proposal` to sample
.. math::
\\frac{1}{N} K(x, x) w(x) dx
= \\frac{1}{N}
\\sum_{\\mathfrak{b}(k)=0}^{N-1}
\\left( \\frac{P_k(x)}{\\left\\| P_k \\right\\|} \\right)^2
w(x)
with proposal distribution
.. math::
w_{eq}(x) d x
= \\prod_{i=1}^{d} \\frac{1}{\\pi\\sqrt{1-(x_i)^2}} d x_i
To get a sample:
1. Draw a multi-index :math:`k` uniformly at random in :math:`\\left\\{ \\mathfrak{b}^{-1}(0), \\dots, \\mathfrak{b}^{-1}(N-1) \\right\\}`
2. Sample from :math:`P_k(x)^2 w(x) dx` with proposal :math:`w_{eq}(x) d x`.
The acceptance ratio writes
.. math::
\\frac{\\left( \\frac{P_k(x)}{\\left\\| P_k \\right\\|} \\right)^2
w(x)}
{w_{eq}(x)}
= \\prod_{i=1}^{d}
\\pi
\\left(
\\frac{P_{k_i}^{(a_i, b_i)}(x)}
{\\left\\| P_{k_i}^{(a_i, b_i)} \\right\\|}
\\right)^2
(1-x_i)^{a_i+\\frac{1}{2}}
(1+x_i)^{b_i+\\frac{1}{2}}
\\leq C_k
- For :math:`k_i>0` we use a result on Jacobi polynomials given by, e.g., :cite:`Gau09`, for :math:`\\quad|a|,|b| \\leq \\frac{1}{2}`
.. math::
&
\\pi
(1-u)^{a+\\frac{1}{2}}
(1+u)^{b+\\frac{1}{2}}
\\left(
\\frac{P_{n}^{(a, b)}(u)}
{\\left\\| P_{n}^{(a, b)} \\right\\|}
\\right)^2\\\\
&\\leq
\\frac{2}
{n!(n+(a+b+1) / 2)^{2 \\max(a,b)}}
\\frac{\\Gamma(n+a+b+1)
\\Gamma(n+\\max(a,b)+1)}
{\\Gamma(n+\\min(a,b)+1)}
- For :math:`k_i=0`, we use less involved properties of the `Jacobi polynomials <https://en.wikipedia.org/wiki/Jacobi_polynomials>`_:
- :math:`P_{0}^{(a, b)} = 1`
- :math:`\\|P_{0}^{(a, b)}\\|^2 = 2^{a+b+1} \\operatorname{B}(a+1,b+1)`
- :math:`m = \\frac{b-a}{a+b+1}` is the mode of :math:`(1-u)^{a+\\frac{1}{2}} (1+u)^{b+\\frac{1}{2}}` (valid since :math:`a+\\frac{1}{2}, b+\\frac{1}{2} > 0`)
So that,
.. math::
\\pi
(1-u)^{a+\\frac{1}{2}}
(1+u)^{b+\\frac{1}{2}}
\\left(\\frac{P_{0}^{(a, b)}(u)}
{\\|P_{0}^{(a, b)}\\|}\\right)^{2}
&=
\\frac
{\\pi
(1-u)^{a+\\frac{1}{2}}
(1+u)^{b+\\frac{1}{2}}}
{\\|P_{0}^{(a, b)}\\|^2} \\\\
&\\leq
\\frac
{\\pi
(1-m)^{a+\\frac{1}{2}}
(1+m)^{b+\\frac{1}{2}}}
{2^{a+b+1} \\operatorname{B}(a+1,b+1)}
:param jacobi_params:
Jacobi parameters :math:`[(a_i, b_i)]_{i=1}^d \\in [-\\frac{1}{2}, \\frac{1}{2}]^{d \\times 2}`.
The number of rows :math:`d` prescribes the ambient dimension of the points i.e. :math:`x_{1}, \\dots, x_{N} \\in [-1, 1]^d`
:type jacobi_params:
array_like
:param ordering:
Ordering of the multi-indices :math:`\\in\\mathbb{N}^d` defining the order between the multivariate monomials (see also :py:meth:`compute_ordering`)
- the number of rows corresponds to the number :math:`N` of monomials considered.
- the number of columns :math:`=d`
:type ordering:
array_like
:param log_scale:
If True, the rejection bound is computed using the logarithmic versions ``betaln``, ``gammaln`` of ``beta`` and ``gamma`` functions to avoid overflows
:type log_scale:
bool
:return:
The rejection bounds :math:`C_{k}` for :math:`k = \\mathfrak{b}^{-1}(0), \\dots, \\mathfrak{b}^{-1}(N-1)`
:rtype:
array_like
.. seealso::
- :cite:`Gau09` for the domination when :math:`k_i > 0`
- :py:meth:`compute_poly1D_norms`
"""
# Initialize [bounds]_ij on
# pi (1-x)^(a_j+1/2) (1+x)^(b_j+1/2) P_i^2/||P_i||^2
deg_max, dim = np.max(ordering), jacobi_params.size // 2
bounds = np.zeros((deg_max + 1, dim))
arcsine = np.all(jacobi_params == -0.5, axis=1)
if any(arcsine):
bounds[0, arcsine] = 0.0 if log_scale else 1.0
bounds[1:, arcsine] = np.log(2.0) if log_scale else 2.0
non_arcsine = np.any(jacobi_params != -0.5, axis=1)
if any(non_arcsine):
# bounds[non_arcsine, 0]
# = pi * (1-mode)^(a+1/2) (1+mode)^(b+1/2) * 1 / ||P_0||^2
# where mode = argmax (1-x)^(a+1/2) (1+x)^(b+1/2) = (b-a)/(a+b+1)
a = jacobi_params[non_arcsine, 0]
b = jacobi_params[non_arcsine, 1]
mode = (b - a) / (a + b + 1)
if log_scale:
log_square_norm_P_0 =\
(a + b + 1) * np.log(2) + betaln(a + 1, b + 1)
bounds[0, non_arcsine] =\
np.log(np.pi)\
+ (0.5 + a) * np.log(1 - mode)\
+ (0.5 + b) * np.log(1 + mode)\
- log_square_norm_P_0
else:
square_norm_P_0 = 2**(a + b + 1) * beta(a + 1, b + 1)
bounds[0, non_arcsine] =\
np.pi\
* (1 - mode)**(0.5 + a)\
* (1 + mode)**(0.5 + b)\
/ square_norm_P_0
# bounds[1:, non_arcsine] =
# 2 * Gamma(n + 1 + a + b) Gamma(n + 1 + max(a,b))
# n! * (n+(a+b+1)/2)^(2 * max(a,b)) * Gamma(n + 1 + min(a,b))
min_a_b = np.minimum(a, b)
max_a_b = np.maximum(a, b)
n = np.arange(1, deg_max + 1)[:, None]
if log_scale:
bounds[1:, non_arcsine] =\
np.log(2)\
+ gammaln(n + 1 + a + b)\
+ gammaln(n + 1 + max_a_b)\
- gammaln(n + 1)\
- 2 * max_a_b * np.log(n + 0.5 * (a + b + 1))\
- gammaln(n + 1 + min_a_b)
else:
bounds[1:, non_arcsine] =\
2\
* gamma(n + 1 + a + b)\
* gamma(n + 1 + max_a_b)\
/ factorial(n)\
/ (n + 0.5 * (a + b + 1))**(2 * max_a_b)\
/ gamma(n + 1 + min_a_b)
if log_scale:
return np.exp(np.sum(bounds[ordering, range(dim)], axis=1))
else:
return np.prod(bounds[ordering, range(dim)], axis=1)
def compute_degrees_1D_polynomials(max_degrees):
""" deg[i, j] = i if i <= max_degrees[j] else 0
"""
max_deg, dim = max(max_degrees), len(max_degrees)
degrees = np.tile(np.arange(max_deg + 1)[:, None], (1, dim))
degrees[degrees > max_degrees] = 0
return max_deg, degrees
| mit |
hackforwesternmass/hunger-story | create_world_bank.py | 1 | 3111 | ##
## Reshape World Bank World Development Indicators for data analysis
##
import pandas as pd
#grab all indicators from 1990 and on
all = pd.read_excel('data/world_development_indicators_download.xlsx', 'Data',
usecols = [0,1,2,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,
49,50,51,52,53,54,55,56,57])
all = all.rename(columns = {
'Country Name':'country',
'Country Code':'country_code',
'Indicator Name':'indicator',
})
#create dataframe that's a subset of the indicators we want
indicator_list = [
'Prevalence of undernourishment (% of population)',
'Crop production index (2004-2006 = 100)',
'Agricultural land (% of land area)',
'Food production index (2004-2006 = 100)',
'Food exports (% of merchandise exports)',
'Food imports (% of merchandise imports)',
'Land area (sq. km)',
'Population (Total)',
'Unemployment, female (% of female labor force) (modeled ILO estimate)',
'Unemployment, male (% of male labor force) (modeled ILO estimate)',
'Unemployment, total (% of total labor force) (modeled ILO estimate)',
'GDP (constant 2005 US$)',
'GDP per capita (constant 2005 US$)',
'Adjusted net national income (constant 2005 US$)',
]
fs = all[all.indicator.isin(indicator_list)]
#create three DataFrames: individual countries,
#continent/regional aggregates, world aggregate
#(see "Country" tab of the original indicators downloaded .xlsx
#country_code details)
world = fs[fs.country_code.isin(['WLD'])]
#continent/regional aggregates
agg_list = ['ARB', 'CSS', 'EAS', 'EMU', 'LCN', 'MEA', 'PSS', 'SAS', 'SSF']
agg = fs[fs.country_code.isin(agg_list)]
#individual countries - remove country names that represent aggregates
agg_country_code_list = [
'ARB', 'CSS', 'EAS', 'EAP', 'CEA', 'EMU', 'ECS', 'ECA', 'CEU', 'EUU',
'HPC', 'HIC', 'NOC', 'OEC', 'LCN', 'LAC', 'CLA', 'LDC', 'LMY', 'LIC',
'LMC', 'MEA', 'MNA', 'CME', 'MIC', 'NAC', 'OED', 'OSS', 'PSS', 'SST',
'SAS', 'CSA', 'SSF', 'SSA', 'CAA', 'UMC', 'WLD'
]
fs = fs[~fs.country_code.isin(agg_country_code_list)]
#reshape to put years in rows instead of columns
fs = pd.melt(fs, id_vars=['country', 'country_code', 'indicator'], var_name = 'year')
agg = pd.melt(agg, id_vars=['country', 'country_code', 'indicator'], var_name = 'year')
world = pd.melt(world, id_vars=['country', 'country_code', 'indicator'], var_name = 'year')
#reshape again to put indicators in columns instead of rows & save results
fs = pd.pivot_table(fs, values='value', index=['country', 'country_code', 'year'], columns = ['indicator'])
agg = pd.pivot_table(agg, values='value', index=['country', 'country_code', 'year'], columns = ['indicator'])
world = pd.pivot_table(world, values='value', index=['country', 'country_code', 'year'], columns = ['indicator'])
#write files
fs.to_csv('data/world_development_indicators.csv', cols=indicator_list)
agg.to_csv('data/world_development_indicators_agg.csv', cols=indicator_list)
world.to_csv('data/world_development_indicators_world.csv', cols=indicator_list)
| mit |
jefflyn/buddha | src/mlia/Ch03/treePlotter.py | 3 | 3824 | '''
Created on Oct 14, 2010
@author: Peter Harrington
'''
import matplotlib.pyplot as plt
decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def getNumLeafs(myTree):
numLeafs = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
numLeafs += getNumLeafs(secondDict[key])
else: numLeafs +=1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
thisDepth = 1 + getTreeDepth(secondDict[key])
else: thisDepth = 1
if thisDepth > maxDepth: maxDepth = thisDepth
return maxDepth
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',
xytext=centerPt, textcoords='axes fraction',
va="center", ha="center", bbox=nodeType, arrowprops=arrow_args )
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)
def plotTree(myTree, parentPt, nodeTxt):#if the first key tells you what feat was split on
numLeafs = getNumLeafs(myTree) #this determines the x width of this tree
depth = getTreeDepth(myTree)
firstStr = myTree.keys()[0] #the text label for this node should be this
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
plotTree(secondDict[key],cntrPt,str(key)) #recursion
else: #it's a leaf node print the leaf node
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
#if you do get a dictonary you know it's a tree, and the first element will be another dict
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) #no ticks
#createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW; plotTree.yOff = 1.0;
plotTree(inTree, (0.5,1.0), '')
plt.show()
#def createPlot():
# fig = plt.figure(1, facecolor='white')
# fig.clf()
# createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
# plotNode('a decision node', (0.5, 0.1), (0.1, 0.5), decisionNode)
# plotNode('a leaf node', (0.8, 0.1), (0.3, 0.8), leafNode)
# plt.show()
def retrieveTree(i):
listOfTrees =[{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
{'no surfacing': {0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
]
return listOfTrees[i]
#createPlot(thisTree) | artistic-2.0 |
osvaldshpengler/BuildingMachineLearningSystemsWithPython | ch02/stump.py | 24 | 1604 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from sklearn.datasets import load_iris
data = load_iris()
features = data.data
labels = data.target_names[data.target]
is_setosa = (labels == 'setosa')
features = features[~is_setosa]
labels = labels[~is_setosa]
is_virginica = (labels == 'virginica')
# Initialize to a value that is worse than any possible test
best_acc = -1.0
# Loop over all the features
for fi in range(features.shape[1]):
# Test every possible threshold value for feature fi
thresh = features[:, fi].copy()
# Test them in order
thresh.sort()
for t in thresh:
# Generate predictions using t as a threshold
pred = (features[:, fi] > t)
# Accuracy is the fraction of predictions that match reality
acc = (pred == is_virginica).mean()
# We test whether negating the test is a better threshold:
acc_neg = ((~pred) == is_virginica).mean()
if acc_neg > acc:
acc = acc_neg
negated = True
else:
negated = False
# If this is better than previous best, then this is now the new best:
if acc > best_acc:
best_acc = acc
best_fi = fi
best_t = t
best_is_negated = negated
print('Best threshold is {0} on feature {1} (index {2}), which achieves accuracy of {3:.1%}.'.format(
best_t, data.feature_names[best_fi], best_fi, best_acc))
| mit |
ahellander/pyurdme | examples/coral_reef/coral.py | 5 | 7211 | #!/usr/bin/env python
import math
import matplotlib.pyplot as plt
import numpy
import pyurdme
class CoralReef(pyurdme.URDMEModel):
""" Model developed by Briggs and Drawert 3/31/2014, based on a
non-spatial model by Briggs and Adam.
"""
def __init__(self, name="coral_reef", D_c=1.0, D_m=1.0, version=1):
pyurdme.URDMEModel.__init__(self, name)
# Species
Coral = pyurdme.Species(name="Coral",diffusion_constant=0.0)
Coral_m = pyurdme.Species(name="Coral_m",diffusion_constant=D_c)
MA = pyurdme.Species(name="MA", diffusion_constant=0.0)
MA_m = pyurdme.Species(name="MA_m", diffusion_constant=D_m)
Turf = pyurdme.Species(name="Turf", diffusion_constant=0.0)
self.add_species([Coral, MA, Coral_m, MA_m, Turf])
# Parameters
phi_c = pyurdme.Parameter(name="phi_c", expression=0.0011) #1/year
phi_m = pyurdme.Parameter(name="phi_m", expression=0.001) #1/year
g_tc = pyurdme.Parameter(name="g_tc", expression=0.1) #1/year
g_tm = pyurdme.Parameter(name="g_tm", expression=0.2) #1/year
Gamma = pyurdme.Parameter(name="Gamma", expression=0.05)
dc = pyurdme.Parameter(name="dc", expression=0.05) #1/year
dm = pyurdme.Parameter(name="dm", expression=1.0) #1/year
#dm = pyurdme.Parameter(name="dm", expression=0.2) #1/year
phi_g = pyurdme.Parameter(name="psi_g", expression=0.0)
# Death rate of mobile propgules. Combine with diffusion to determine spread.
mu_c = pyurdme.Parameter(name="mu_c", expression=1.0) #1/year
mu_m = pyurdme.Parameter(name="mu_m", expression=1.0) #1/year
# mobile propogules destroyed by estabilished
alpha_c = pyurdme.Parameter(name="alpha_c", expression=0.1) #1/year
alpha_m = pyurdme.Parameter(name="alpha_m", expression=0.5) #1/year
# Production of mobile propogules
R_c = pyurdme.Parameter(name="R_c", expression=1.0) #1/year
R_m = pyurdme.Parameter(name="R_m", expression=1.0) #1/year
self.add_parameter([phi_c, phi_m, g_tc, g_tm, Gamma, dc, dm, phi_g, mu_c, mu_m, alpha_c, alpha_m, R_c, R_m])
# Reactions:
# C -> T : dc
self.add_reaction(pyurdme.Reaction(name="R3", reactants={Coral:1}, products={Turf:1}, rate=dc))
# MA -> T : dm
self.add_reaction(pyurdme.Reaction(name="R4", reactants={MA:1}, products={Turf:1}, rate=dm))
# T + C_m -> C : phi_c
self.add_reaction(pyurdme.Reaction(name="R5", reactants={Turf:1, Coral_m:1}, products={Coral:1}, rate=phi_c))
# T + MA_m -> MA : phi_m
self.add_reaction(pyurdme.Reaction(name="R6", reactants={Turf:1, MA_m:1}, products={MA:1}, rate=phi_m))
# C + T -> 2C : g_tc * exp(-1.0 * psi_g * MA / 100)
self.add_reaction(pyurdme.Reaction(name="R7", reactants={Turf:1, Coral:1}, products={Coral:2}, propensity_function="g_tc*Turf*Coral*exp(-1.0 * psi_g * MA / Space_per_voxel)/vol"))
# MA + T -> 2MA : g_tm
self.add_reaction(pyurdme.Reaction(name="R8", reactants={Turf:1, MA:1}, products={MA:2}, rate=g_tm))
# C + MA -> 2MA : Gamma * g_tm
self.add_reaction(pyurdme.Reaction(name="R9", reactants={Coral:1, MA:1}, products={MA:2}, propensity_function="g_tm*Gamma*Coral*MA/vol"))
# C -> C + C_m : R_c
self.add_reaction(pyurdme.Reaction(name="R10", reactants={Coral:1}, products={Coral:1, Coral_m:1}, rate=R_c))
# MA -> MA + MA_m : R_m
self.add_reaction(pyurdme.Reaction(name="R11", reactants={MA:1}, products={MA:1, MA_m:1}, rate=R_m))
# C_m -> 0 : mu_c
self.add_reaction(pyurdme.Reaction(name="R12", reactants={Coral_m:1}, products={}, rate=mu_c))
# MA_m -> 0 : mu_m
self.add_reaction(pyurdme.Reaction(name="R13", reactants={MA_m:1}, products={}, rate=mu_m))
# MA + C_m -> MA : alpha_c
self.add_reaction(pyurdme.Reaction(name="R14", reactants={MA:1, Coral_m:1}, products={MA:1}, rate=alpha_c))
# C + MA_m -> C : alpha_m
self.add_reaction(pyurdme.Reaction(name="R15", reactants={Coral:1, MA_m:1}, products={Coral:1}, rate=alpha_m))
# A unit square
# each grid point is 10cm x 10cm, domain is 5m x 5m
self.mesh = pyurdme.URDMEMesh.generate_square_mesh(L=5, nx=50, ny=50, periodic=True)
Space_per_voxel = 10
self.add_parameter(pyurdme.Parameter(name="Space_per_voxel", expression=Space_per_voxel)) #1/year
if True:
# Start with two colonys
self.set_initial_condition_distribute_uniformly({Turf:Space_per_voxel})
self.set_initial_condition_place_near({Coral:Space_per_voxel}, point=[1,1])
self.set_initial_condition_place_near({Turf:0}, point=[1,1])
self.set_initial_condition_place_near({MA:Space_per_voxel}, point=[4,4])
self.set_initial_condition_place_near({Turf:0}, point=[4,4])
else:
# Every voxel is the same
self.set_initial_condition_distribute_uniformly({Turf:0})
self.set_initial_condition_distribute_uniformly({Coral:Space_per_voxel-1})
self.set_initial_condition_distribute_uniformly({MA:1})
for vndx in range(self.u0.shape[1]):
tot = 0
for sndx, sname in enumerate(self.listOfSpecies):
tot += self.u0[sndx][vndx]
if tot > 100:
for sndx, sname in enumerate(self.listOfSpecies):
print "u0[{0}][{1}] = {2}".format(sname, vndx, self.u0[sndx][vndx])
#self.timespan(numpy.linspace(0,500,501)) #500 years
#self.timespan(numpy.linspace(0,5,72)) #5 years, by months
self.timespan(numpy.linspace(0,11,66)) #10 years, by 2 months
if __name__ == "__main__":
model = CoralReef()
result = model.run(report_level=1)
print "Writing PavaView compatable output to 'output_coral' directory"
result.export_to_vtk(species='Coral',folder_name="output_coral")
x_vals = model.mesh.coordinates()[:, 0]
y_vals = model.mesh.coordinates()[:, 1]
C_vals = result.get_species("Coral")
MA_vals = result.get_species("MA")
Turf_vals = result.get_species("Turf")
num_vox = len(x_vals)
plt.figure(figsize=(12,6), dpi=100)
tndx = -1 #show end timepoint
tval = model.tspan[tndx]
plt.subplot(1,3,1)
heatmap, xedges, yedges = numpy.histogram2d(x=x_vals, y=y_vals, weights=C_vals[tndx,:], bins=int(math.sqrt(num_vox)))
plt.imshow(heatmap)
cb = plt.colorbar()
cb.set_label('Coral population')
plt.title('t={0}'.format(tval))
plt.subplot(1,3,2)
heatmap, xedges, yedges = numpy.histogram2d(x=x_vals, y=y_vals, weights=MA_vals[tndx,:], bins=int(math.sqrt(num_vox)))
plt.imshow(heatmap)
cb = plt.colorbar()
cb.set_label('MA population')
plt.title('t={0}'.format(tval))
plt.subplot(1,3,3)
heatmap, xedges, yedges = numpy.histogram2d(x=x_vals, y=y_vals, weights=Turf_vals[tndx,:], bins=int(math.sqrt(num_vox)))
plt.imshow(heatmap)
cb = plt.colorbar()
cb.set_label('Free Turf')
plt.title('t={0}'.format(tval))
plt.show()
| gpl-3.0 |
mortada/fredapi | fredapi/fred.py | 1 | 19144 |
import os
import sys
import xml.etree.ElementTree as ET
if sys.version_info[0] >= 3:
import urllib.request as url_request
import urllib.parse as url_parse
import urllib.error as url_error
else:
import urllib2 as url_request
import urllib as url_parse
import urllib2 as url_error
import pandas as pd
urlopen = url_request.urlopen
quote_plus = url_parse.quote_plus
urlencode = url_parse.urlencode
HTTPError = url_error.HTTPError
class Fred(object):
earliest_realtime_start = '1776-07-04'
latest_realtime_end = '9999-12-31'
nan_char = '.'
max_results_per_request = 1000
root_url = 'https://api.stlouisfed.org/fred'
def __init__(self,
api_key=None,
api_key_file=None):
"""
Initialize the Fred class that provides useful functions to query the Fred dataset. You need to specify a valid
API key in one of 3 ways: pass the string via api_key, or set api_key_file to a file with the api key in the
first line, or set the environment variable 'FRED_API_KEY' to the value of your api key. You can sign up for a
free api key on the Fred website at http://research.stlouisfed.org/fred2/
"""
self.api_key = None
if api_key is not None:
self.api_key = api_key
elif api_key_file is not None:
f = open(api_key_file, 'r')
self.api_key = f.readline().strip()
f.close()
else:
self.api_key = os.environ.get('FRED_API_KEY')
if self.api_key is None:
import textwrap
raise ValueError(textwrap.dedent("""\
You need to set a valid API key. You can set it in 3 ways:
pass the string with api_key, or set api_key_file to a
file with the api key in the first line, or set the
environment variable 'FRED_API_KEY' to the value of your
api key. You can sign up for a free api key on the Fred
website at http://research.stlouisfed.org/fred2/"""))
def __fetch_data(self, url):
"""
helper function for fetching data given a request URL
"""
url += '&api_key=' + self.api_key
try:
response = urlopen(url)
root = ET.fromstring(response.read())
except HTTPError as exc:
root = ET.fromstring(exc.read())
raise ValueError(root.get('message'))
return root
def _parse(self, date_str, format='%Y-%m-%d'):
"""
helper function for parsing FRED date string into datetime
"""
rv = pd.to_datetime(date_str, format=format)
if hasattr(rv, 'to_pydatetime'):
rv = rv.to_pydatetime()
return rv
def get_series_info(self, series_id):
"""
Get information about a series such as its title, frequency, observation start/end dates, units, notes, etc.
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
Returns
-------
info : Series
a pandas Series containing information about the Fred series
"""
url = "%s/series?series_id=%s" % (self.root_url, series_id)
root = self.__fetch_data(url)
if root is None or not len(root):
raise ValueError('No info exists for series id: ' + series_id)
info = pd.Series(list(root)[0].attrib)
return info
def get_series(self, series_id, observation_start=None, observation_end=None, **kwargs):
"""
Get data for a Fred series id. This fetches the latest known data, and is equivalent to get_series_latest_release()
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
observation_start : datetime or datetime-like str such as '7/1/2014', optional
earliest observation date
observation_end : datetime or datetime-like str such as '7/1/2014', optional
latest observation date
kwargs : additional parameters
Any additional parameters supported by FRED. You can see https://api.stlouisfed.org/docs/fred/series_observations.html for the full list
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
url = "%s/series/observations?series_id=%s" % (self.root_url, series_id)
if observation_start is not None:
observation_start = pd.to_datetime(observation_start,
errors='raise')
url += '&observation_start=' + observation_start.strftime('%Y-%m-%d')
if observation_end is not None:
observation_end = pd.to_datetime(observation_end, errors='raise')
url += '&observation_end=' + observation_end.strftime('%Y-%m-%d')
if kwargs.keys():
url += '&' + urlencode(kwargs)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No data exists for series id: ' + series_id)
data = {}
for child in root:
val = child.get('value')
if val == self.nan_char:
val = float('NaN')
else:
val = float(val)
data[self._parse(child.get('date'))] = val
return pd.Series(data)
def get_series_latest_release(self, series_id):
"""
Get data for a Fred series id. This fetches the latest known data, and is equivalent to get_series()
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
Returns
-------
info : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
return self.get_series(series_id)
def get_series_first_release(self, series_id):
"""
Get first-release data for a Fred series id. This ignores any revision to the data series. For instance,
The US GDP for Q1 2014 was first released to be 17149.6, and then later revised to 17101.3, and 17016.0.
This will ignore revisions after the first release.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
df = self.get_series_all_releases(series_id)
first_release = df.groupby('date').head(1)
data = first_release.set_index('date')['value']
return data
def get_series_as_of_date(self, series_id, as_of_date):
"""
Get latest data for a Fred series id as known on a particular date. This includes any revision to the data series
before or on as_of_date, but ignores any revision on dates after as_of_date.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
as_of_date : datetime, or datetime-like str such as '10/25/2014'
Include data revisions on or before this date, and ignore revisions afterwards
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
as_of_date = pd.to_datetime(as_of_date)
df = self.get_series_all_releases(series_id)
data = df[df['realtime_start'] <= as_of_date]
return data
def get_series_all_releases(self, series_id):
"""
Get all data for a Fred series id including first releases and all revisions. This returns a DataFrame
with three columns: 'date', 'realtime_start', and 'value'. For instance, the US GDP for Q4 2013 was first released
to be 17102.5 on 2014-01-30, and then revised to 17080.7 on 2014-02-28, and then revised to 17089.6 on
2014-03-27. You will therefore get three rows with the same 'date' (observation date) of 2013-10-01 but three
different 'realtime_start' of 2014-01-30, 2014-02-28, and 2014-03-27 with corresponding 'value' of 17102.5, 17080.7
and 17089.6
Parameters
----------
series_id : str
Fred series id such as 'GDP'
Returns
-------
data : DataFrame
a DataFrame with columns 'date', 'realtime_start' and 'value' where 'date' is the observation period and 'realtime_start'
is when the corresponding value (either first release or revision) is reported.
"""
url = "%s/series/observations?series_id=%s&realtime_start=%s&realtime_end=%s" % (self.root_url,
series_id,
self.earliest_realtime_start,
self.latest_realtime_end)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No data exists for series id: ' + series_id)
data = {}
i = 0
for child in root:
val = child.get('value')
if val == self.nan_char:
val = float('NaN')
else:
val = float(val)
realtime_start = self._parse(child.get('realtime_start'))
# realtime_end = self._parse(child.get('realtime_end'))
date = self._parse(child.get('date'))
data[i] = {'realtime_start': realtime_start,
# 'realtime_end': realtime_end,
'date': date,
'value': val}
i += 1
data = pd.DataFrame(data).T
return data
def get_series_vintage_dates(self, series_id):
"""
Get a list of vintage dates for a series. Vintage dates are the dates in history when a
series' data values were revised or new data values were released.
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
Returns
-------
dates : list
list of vintage dates
"""
url = "%s/series/vintagedates?series_id=%s" % (self.root_url, series_id)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No vintage date exists for series id: ' + series_id)
dates = []
for child in root:
dates.append(self._parse(child.text))
return dates
def __do_series_search(self, url):
"""
helper function for making one HTTP request for data, and parsing the returned results into a DataFrame
"""
root = self.__fetch_data(url)
series_ids = []
data = {}
num_results_returned = 0 # number of results returned in this HTTP request
num_results_total = int(root.get('count')) # total number of results, this can be larger than number of results returned
for child in root:
num_results_returned += 1
series_id = child.get('id')
series_ids.append(series_id)
data[series_id] = {"id": series_id}
fields = ["realtime_start", "realtime_end", "title", "observation_start", "observation_end",
"frequency", "frequency_short", "units", "units_short", "seasonal_adjustment",
"seasonal_adjustment_short", "last_updated", "popularity", "notes"]
for field in fields:
data[series_id][field] = child.get(field)
if num_results_returned > 0:
data = pd.DataFrame(data, columns=series_ids).T
# parse datetime columns
for field in ["realtime_start", "realtime_end", "observation_start", "observation_end", "last_updated"]:
data[field] = data[field].apply(self._parse, format=None)
# set index name
data.index.name = 'series id'
else:
data = None
return data, num_results_total
def __get_search_results(self, url, limit, order_by, sort_order, filter):
"""
helper function for getting search results up to specified limit on the number of results. The Fred HTTP API
truncates to 1000 results per request, so this may issue multiple HTTP requests to obtain more available data.
"""
order_by_options = ['search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated',
'observation_start', 'observation_end', 'popularity']
if order_by is not None:
if order_by in order_by_options:
url = url + '&order_by=' + order_by
else:
raise ValueError('%s is not in the valid list of order_by options: %s' % (order_by, str(order_by_options)))
if filter is not None:
if len(filter) == 2:
url = url + '&filter_variable=%s&filter_value=%s' % (filter[0], filter[1])
else:
raise ValueError('Filter should be a 2 item tuple like (filter_variable, filter_value)')
sort_order_options = ['asc', 'desc']
if sort_order is not None:
if sort_order in sort_order_options:
url = url + '&sort_order=' + sort_order
else:
raise ValueError('%s is not in the valid list of sort_order options: %s' % (sort_order, str(sort_order_options)))
data, num_results_total = self.__do_series_search(url)
if data is None:
return data
if limit == 0:
max_results_needed = num_results_total
else:
max_results_needed = limit
if max_results_needed > self.max_results_per_request:
for i in range(1, max_results_needed // self.max_results_per_request + 1):
offset = i * self.max_results_per_request
next_data, _ = self.__do_series_search(url + '&offset=' + str(offset))
data = data.append(next_data)
return data.head(max_results_needed)
def search(self, text, limit=1000, order_by=None, sort_order=None, filter=None):
"""
Do a fulltext search for series in the Fred dataset. Returns information about matching series in a DataFrame.
Parameters
----------
text : str
text to do fulltext search on, e.g., 'Real GDP'
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/series/search?search_text=%s&" % (self.root_url,
quote_plus(text))
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
return info
def search_by_release(self, release_id, limit=0, order_by=None, sort_order=None, filter=None):
"""
Search for series that belongs to a release id. Returns information about matching series in a DataFrame.
Parameters
----------
release_id : int
release id, e.g., 151
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/release/series?release_id=%d" % (self.root_url, release_id)
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
if info is None:
raise ValueError('No series exists for release id: ' + str(release_id))
return info
def search_by_category(self, category_id, limit=0, order_by=None, sort_order=None, filter=None):
"""
Search for series that belongs to a category id. Returns information about matching series in a DataFrame.
Parameters
----------
category_id : int
category id, e.g., 32145
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/category/series?category_id=%d&" % (self.root_url,
category_id)
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
if info is None:
raise ValueError('No series exists for category id: ' + str(category_id))
return info
| apache-2.0 |
choldgraf/download | setup.py | 1 | 1959 | #! /usr/bin/env python
#
# Copyright (C) 2015 Chris Holdgraf
# <[email protected]>
#
# Adapted from MNE-Python
import os
from setuptools import setup
descr = """A quick module to help downloading files using python."""
with open("./download/__init__.py", "r") as ff:
lines = ff.readlines()
for line in lines:
if line.startswith("__version__"):
__version__ = line.split("= ")[-1].strip().strip('"')
break
DISTNAME = "download"
DESCRIPTION = descr
MAINTAINER = "Chris Holdgraf"
MAINTAINER_EMAIL = "[email protected]"
URL = "https://github.com/choldgraf/download"
LICENSE = "BSD (3-clause)"
DOWNLOAD_URL = "https://github.com/choldgraf/download"
with open("./README.rst", "r") as ff:
LONG_DESCRIPTION = ff.read()
if __name__ == "__main__":
if os.path.exists("MANIFEST"):
os.remove("MANIFEST")
setup(
name=DISTNAME,
maintainer=MAINTAINER,
include_package_data=False,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/x-rst",
license=LICENSE,
url=URL,
version=__version__,
download_url=DOWNLOAD_URL,
zip_safe=False, # the package can run out of an .egg file
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
],
platforms="any",
packages=["download"],
package_data={},
scripts=[],
install_requires=["tqdm", "six", "requests"],
extras_require={
"dev": ["numpy", "codecov", "pytest", "pytest-cov"],
"sphinx": ["matplotlib", "pandas", "sphinx", "sphinx-gallery", "pillow"],
},
)
| mit |
rolandwz/pymisc | ustrader/main.py | 1 | 4589 | # -*- coding: utf-8 -*-
import datetime, time, csv, os, shutil
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import MultiCursor
from utils.db import SqliteDB
from utils.rwlogging import log
from trader import Trader
import dataloader
from indicator import ma, macd, bolling, rsi, kdj
from strategy import maTrader, bollingTrader, macdTrader, rsiTrader, kdjTrader
from strategy import n2Trader, n3Trader, s3Trader, s4Trader, s5Trader, s6Trader
from strategy import l5Trader
from mas import maStrategy, fmaStrategy
path = os.path.dirname(__file__)
def calculateIndicators(table):
#tables = ['XAGUSD30', 'XAGUSD60', 'XAGUSD240', 'XAGUSD1440', 'XAGUSD10080', 'XAGUSD43200', ]
#for table in tables:
h1ma5 = ma.calc_all_ma(table, 'LWMA', 5)
#return
bollings = bolling.calc_all_bolling(table)
macds = macd.calc_all_macd(table)
rsis = rsi.calc_all_rsi(table)
kdjs = kdj.calc_all_kdj(table)
#return
h1ma5 = ma.calc_all_ma(table, 'MA', 5)
h1ma10 = ma.calc_all_ma(table, 'MA', 10)
h1ma20 = ma.calc_all_ma(table, 'MA', 20)
h1ema5 = ma.calc_all_ma(table, 'EMA', 5)
h1ema10 = ma.calc_all_ma(table, 'EMA', 10)
h1ema20 = ma.calc_all_ma(table, 'EMA', 20)
#return
h1sma5 = ma.calc_all_ma(table, 'SMA', 5)
h1sma10 = ma.calc_all_ma(table, 'SMA', 10)
h1sma20 = ma.calc_all_ma(table, 'SMA', 20)
def drawStats(prices):
for i in range(5, 51):
drawStat(prices, i)
def drawStat(prices, period):
l = len(prices)
ps = [0] * l
pdts = [0] * l
std = [0] * l
stdper = [0] * l
diff = [0] * l
dmean = [0] * l
days = 0
for i in range(l):
pdts[i] = prices[i]['dt']
ps[i] = prices[i]['close']
diff[i] = prices[i]['high'] - prices[i]['low']
if i < period - 1: continue
std[i] = round(np.std(ps[i-period+1 : i+1], dtype=np.float64, ddof=0), 3)
stdper[i] = round(std[i] / np.mean(ps[i-period+1 : i+1]), 3)
dmean[i] = round(np.mean(diff[i-period+1 : i+1]), 3)
if (std[i-1] < 1 and std[i] >= 1) or (std[i-1] > 1 and std[i] <= 1):
dtstr = prices[i]['dt'].strftime('%Y-%m-%d')
log.info(dtstr + ', std change to ' + str(std[i]) + ', days: ' + str(days))
days = 0
days += 1
macds = macd.calc_macd(prices, 12, 26, 9)
fig = plt.figure()
ax1 = fig.add_subplot(311)
ax1.set_ylabel('Price')
ax1.grid()
ax1.plot_date(pdts, ps, color='b', linestyle='-', marker='', label='Equity')
ax2 = fig.add_subplot(312)
ax2.set_ylabel('Std')
ax2.grid()
ax2.plot_date(pdts, std, color='b', linestyle='-', marker='', label='Equity')
ax3 = fig.add_subplot(313)
ax3.set_ylabel('MACD')
ax3.grid()
ax3.plot_date(pdts, stdper, color='b', linestyle='-', marker='', label='Equity')
#multi = MultiCursor(fig.canvas, (ax1, ax2, ax3), color='r', lw=1, horizOn=False, vertOn=True)
#plt.show()
#return
fname = str(period)
plt.savefig(os.path.join(os.path.dirname(__file__), 'result/' + fname + '.png'), dpi=150)
plt.close(fig)
return
def clearLog():
logdir = os.path.join(path, 'logs')
rsdir = os.path.join(path, 'result')
rslist = os.listdir(rsdir)
for f in rslist:
fp = os.path.join(rsdir, f)
if os.path.isfile(fp):
os.remove(fp)
#log.debug('del' + fp)
elif os.path.isdir(fp):
shutil.rmtree(fp)
logfiles =['trader.csv', 'balance.csv', 'trades.csv', 'strategy.csv', 'main.log',]
for logfile in logfiles:
with open(os.path.join(logdir, logfile), 'w'):
pass
#print logdir, lsdir
#shutil.rmtree(logdir)
#os.mkdir(logdir)
if __name__ == "__main__":
#XAGUSD1440_FLUC, XAGUSD1440_UP, XAGUSD1440_DOWN, XAGUSD1440_V, XAGUSD1440_RV,
#XAGUSD1440_FLAT, XAGUSD1440_FLU
#XAGUSD1440_2013, XAGUSD1440_2012, XAGUSD1440_2011, XAGUSD1440_ALL, XAGUSD1440_AFTER08
clearLog()
prices = dataloader.importToArray('XAUUSD60_20122013')
maStrategy.runStrategy(prices)
#s6Trader.runStrategy(prices)
#l5Trader.runStrategy(prices)
#maTrader.runStrategy(prices)
#drawStats(prices)
#oneTrader.runStrategy(prices)
#part = prices[109:]
#ps = [p['close'] for p in part]
#ps.reverse()
#pr = [p['rmb'] for p in part]
#pr.reverse()
#
#for i in range(len(part)):
# part[i]['close'] = ps[i]
# part[i]['rmb'] = pr[i]
#maTrader.runStrategy(prices, 0)
#drawStats(prices)
#kdjTrader.runStrategy(prices)
#bollingTrader.runStrategy(prices)
#maTrader.runStrategy(prices, 112) #XAGUSD1440_FLAT
#maTrader.runStrategy(prices, 109) #XAGUSD1440_FLU
#rsiTrader.runStrategy(prices)
#macdTrader.runStrategy(prices)
#importAll()
#importTable('XAGUSD1440')
#maTrader.runStrategy('XAGUSD1440')
#bollingTrader.runStrategy('XAGUSD1440')
#calculateIndicators('XAGUSD1440')
#strategyMA()
#strategyBolling()
| mit |
ningchi/scikit-learn | sklearn/kernel_ridge.py | 17 | 6479 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True)
n_samples = X.shape[0]
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
w4nderlust/lingua-politicae | ml/fb2graph.py | 1 | 16718 | import sys
import json
from os import listdir
import unidecode
from time import time
from collections import defaultdict
from ascii_graph import Pyasciigraph
from os.path import isfile, join, basename
from sklearn.cluster import SpectralClustering, KMeans
from sklearn.metrics import silhouette_score, calinski_harabaz_score
from tabulate import tabulate
from tweetokenize import Tokenizer
import numpy as np
from scipy import stats
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from globals import POLITICIANS_INFO_FILE_PATH, FACEBOOK_POSTS_DIRECTORY, POLITICIANS_GRAPH_FILE_PATH
stopwords_file_path = 'ml/res/italian_stopwords_big.txt'
english_stopwords_file_path = 'ml/res/english_stopwords.txt'
tweet_stopwords = ['URL', 'ELLIPSIS', 'NUMBER', 'USERNAME']
terms_per_node = 5
terms_per_edge = 5
num_clusters_range = range(2, 15)
clustering_algorithm = KMeans
clustering_quality_measure = calinski_harabaz_score
politicians_info = {}
with open(POLITICIANS_INFO_FILE_PATH, 'r') as politicians_info_file:
politicians_info_list = json.load(politicians_info_file)
for politician in politicians_info_list:
politicians_info[politician['facebook']] = politician
# Utils
def file2name(filename):
return filename.replace('_facebook', '').replace('.json', '')
def tokenize(text):
tokens = [token for token in tokenizer.tokenize(unidecode.unidecode(text.replace("'", " "))) if len(token) > 2]
return tokens
def term_scores(vectorizer, matrix):
# http://stackoverflow.com/questions/16078015/
matrix_binary = matrix.copy()
matrix_binary[matrix_binary > 0] = 1
matrix_binary = matrix_binary.astype(np.int32)
scores = zip(vectorizer.get_feature_names(),
np.asarray(matrix.sum(axis=0)).ravel(),
np.asarray(matrix_binary.sum(axis=0)).ravel(),
vectorizer.idf_)
sorted_scores = sorted(scores, key=lambda x: x[1], reverse=True)
return sorted_scores
# Initilization
print("Initilizing...")
t0 = time()
with open(stopwords_file_path) as f:
stopwords_list = f.readlines()
with open(english_stopwords_file_path) as f:
english_stopwords_list = f.readlines()
stopwords = [word.strip() for word in stopwords_list if word.strip()] + [word.strip() for word in english_stopwords_list
if word.strip()] + tweet_stopwords
politicians_sorted = sorted(list(politicians_info.keys()))
politician_posts = defaultdict(list)
post_list = []
tokenizer = Tokenizer()
print("done in {:0.4f}s".format(time() - t0))
# Collect tweets from JSON
print("Collecting posts...")
t0 = time()
posts_so_far = 0
only_jsons = [f for f in listdir(FACEBOOK_POSTS_DIRECTORY) if
isfile(join(FACEBOOK_POSTS_DIRECTORY, f)) and f.endswith('.json') and not f == basename(POLITICIANS_INFO_FILE_PATH)]
for post_file in only_jsons:
with open(join(FACEBOOK_POSTS_DIRECTORY, post_file)) as tf:
posts = json.load(tf)
for post in posts:
if 'message' in post:
post_list.append(unidecode.unidecode(post['message']))
politician_posts[file2name(post_file)].append(posts_so_far)
posts_so_far += 1
print("done in {:0.4f}s".format(time() - t0))
# print(tweet_list)
# print(politician_posts)
# Collect politicion names
# Calculating tf-idf features
print("Calculating vectorization...")
t0 = time()
# vectorizer = CountVectorizer(max_df=0.95, min_df=5,
# max_features=50000,
# stop_words=stopwords,
# tokenizer=tokenize)
vectorizer = TfidfVectorizer(max_df=0.95, min_df=5,
max_features=10000,
stop_words=stopwords,
tokenizer=tokenize)
matrix = vectorizer.fit_transform(post_list)
print("done in {:0.4f}s".format(time() - t0))
# print('tfidf:')
# print(tfidf)
# Show tfidf matrix stats
vocab = vectorizer.get_feature_names()
print("Vocabulary:")
print(vocab)
vocab_max_len = max(map(len, vocab))
scores = term_scores(vectorizer, matrix)
print("Word scores:")
print(tabulate(scores, headers=['Word', 'Score', 'DF', 'IDF']))
'''
vec = tfidf_matrix.named_steps['vec']
features = vec.get_feature_names()
def top_tfidf_feats(row, features, top_n=25):
# Get top n tfidf values in row and return them with their corresponding feature names.
topn_ids = np.argsort(row)[::-1][:top_n]
top_feats = [(features[i], row[i]) for i in topn_ids]
df = pd.DataFrame(top_feats)
df.columns = ['feature', 'tfidf']
return df
def top_feats_in_doc(tfidf_matrix, features, row_id, top_n=25):
# Top tfidf features in specific document (matrix row)
row = np.squeeze(tfidf_matrix[row_id].toarray())
return top_tfidf_feats(row, features, top_n)
def top_mean_feats(tfidf_matrix, features, grp_ids=None, min_tfidf=0.1, top_n=25):
# Return the top n features that on average are most important amongst documents in rows
# indentified by indices in grp_ids.
if grp_ids:
D = tfidf_matrix[grp_ids].toarray()
else:
D = tfidf_matrix.toarray()
D[D < min_tfidf] = 0
tfidf_means = np.mean(D, axis=0)
return top_tfidf_feats(tfidf_means, features, top_n)
print(top_mean_feats(tfidf_matrix, features))
'''
# Calculating politicians prototypes
print("Calculating politicians prototypes...")
t0 = time()
politician_proto = {}
for politician, tweet_ids in politician_posts.items():
proto = np.zeros(matrix.shape[1])
for tweet_id in tweet_ids:
proto += matrix[tweet_id]
politician_proto[politician] = proto
print("done in {:0.4f}s".format(time() - t0))
# print(politician_proto)
# Calculating similarities
print("Calculating similarities...")
t0 = time()
similarity_matrix = {}
for i in range(len(politicians_sorted)):
for j in range(i + 1, len(politicians_sorted)):
proto_politician_i = politician_proto[politicians_sorted[i]]
proto_politician_j = politician_proto[politicians_sorted[j]]
cosine_similarity = np.dot(proto_politician_i, proto_politician_j.T) / \
(np.linalg.norm(proto_politician_i) * np.linalg.norm(proto_politician_j))
similarity_matrix[(politicians_sorted[i], politicians_sorted[j])] = np.asscalar(cosine_similarity)
print("done in {:0.4f}s".format(time() - t0))
print('similarity_matrix:')
print(sorted(similarity_matrix.items(), key=lambda x: -x[1]))
print('similarity matrix scores distribution:')
scores = np.array([v for v in similarity_matrix.values()])
min_scores = np.min(scores)
percentile10_scores = np.percentile(scores, 10)
percentile25_scores = np.percentile(scores, 25)
mean_scores = np.mean(scores)
median_scores = np.median(scores)
percentile75_scores = np.percentile(scores, 75)
percentile90_scores = np.percentile(scores, 90)
max_scores = np.max(scores)
std_scores = np.std(scores)
frequency, values = np.histogram(scores, bins=20)
scores_ranges = []
last = None
for value in values:
if last is None:
last = [value, None]
elif last[1] is None:
last[1] = value
scores_ranges.append(last)
else:
last = [last[1], value]
scores_ranges.append(last)
ascii_graph = Pyasciigraph()
for line in ascii_graph.graph('Scores distribution',
zip(["[{:0.4f},{:0.4f}]".format(r[0], r[1]) for r in scores_ranges], frequency)):
print(line)
print(
"min: {:0.4f}, 10th: {:0.4f}, 25th: {:0.4f}, mean: {:0.4f}, median: {:0.4f}, 75th: {:0.4f}, 90th: {:0.4f}, max: {:0.4f}, std: {:0.4f}".format(
min_scores, percentile10_scores, percentile25_scores, mean_scores,
median_scores, percentile75_scores, percentile90_scores, max_scores, std_scores))
# Calculating terms per edge
print("Calculating terms per edge...")
t0 = time()
def weighted_distance(a, b):
return (1 + np.absolute(a - b)) / np.sqrt((a + 1) * (b + 1))
terms_per_edge_matrix = {}
for i in range(len(politicians_sorted)):
for j in range(i + 1, len(politicians_sorted)):
# proto_politician_i = np.squeeze(np.asarray(politician_proto[politicians_sorted[i]]))
# proto_politician_i /= np.linalg.norm(proto_politician_i)
#
# proto_politician_j = np.squeeze(np.asarray(politician_proto[politicians_sorted[j]]))
# proto_politician_j /= np.linalg.norm(proto_politician_j)
#
# distance = np.absolute(proto_politician_i - proto_politician_j)
# sorted_word_ids = np.argsort(distance)
#
# sorted_proto_politician_i = proto_politician_i[sorted_word_ids]
# sorted_proto_politician_j = proto_politician_j[sorted_word_ids]
#
# words_used_by_politician_i = sorted_proto_politician_i != 0
# words_used_by_politician_j = sorted_proto_politician_j != 0
# words_used_by_both = np.logical_and(words_used_by_politician_i, words_used_by_politician_j)
#
# filtered_sorted_word_ids = sorted_word_ids[words_used_by_both]
#
# # normalization
# filtered_distance = distance[filtered_sorted_word_ids]
# z_normalized_filtered_distance = stats.zscore(filtered_distance)
#
# most_similar_weights = -z_normalized_filtered_distance[:terms_per_edge]
# most_similar_word_ids = filtered_sorted_word_ids[:terms_per_edge]
# most_similar_words = [vocab[i] for i in most_similar_word_ids]
# most_similar = list(zip(most_similar_words, most_similar_weights))
#
# most_different_weights = -z_normalized_filtered_distance[-terms_per_edge:]
# most_different_word_ids = filtered_sorted_word_ids[-terms_per_edge:]
# most_different_words = [vocab[i] for i in most_different_word_ids]
# most_different = list(zip(most_different_words, most_different_weights))
#
# terms_per_edge_matrix[(politicians_sorted[i], politicians_sorted[j])] = {"most_similar": most_similar,
# "most_different": most_different}
proto_politician_i = np.squeeze(np.asarray(politician_proto[politicians_sorted[i]]))
proto_politician_j = np.squeeze(np.asarray(politician_proto[politicians_sorted[j]]))
difference = proto_politician_i - proto_politician_j
distance = weighted_distance(proto_politician_i, proto_politician_j)
z_normalized_difference = stats.zscore(difference)
sorted_difference_word_ids = np.argsort(difference)
sorted_distance_word_ids = np.argsort(distance)
# get top k similar words
sorted_proto_politician_i = proto_politician_i[sorted_distance_word_ids]
sorted_proto_politician_j = proto_politician_j[sorted_distance_word_ids]
words_used_by_politician_i = sorted_proto_politician_i != 0
words_used_by_politician_j = sorted_proto_politician_j != 0
words_used_by_both = np.logical_and(words_used_by_politician_i, words_used_by_politician_j)
filtered_sorted_word_ids = sorted_distance_word_ids[words_used_by_both]
filtered_sorted_z_normalized_difference = z_normalized_difference[filtered_sorted_word_ids]
most_correlated_with_both_weights = filtered_sorted_z_normalized_difference[:terms_per_edge]
most_correlated_with_both_word_ids = filtered_sorted_word_ids[:terms_per_edge]
most_correlated_with_both_words = [vocab[i] for i in most_correlated_with_both_word_ids]
most_correlated_with_both = list(zip(most_correlated_with_both_words, most_correlated_with_both_weights))
# get dissimilar words
sorted_z_normalized_difference = z_normalized_difference[sorted_difference_word_ids]
most_correlated_with_i_weights = sorted_z_normalized_difference[-terms_per_edge:]
most_correlated_with_i_ids = sorted_difference_word_ids[-terms_per_edge:]
most_correlated_with_i_words = [vocab[i] for i in most_correlated_with_i_ids]
most_correlated_with_i = list(zip(most_correlated_with_i_words, most_correlated_with_i_weights))
most_correlated_with_j_weights = sorted_z_normalized_difference[:terms_per_edge]
most_correlated_with_j_ids = sorted_difference_word_ids[:terms_per_edge]
most_correlated_with_j_words = [vocab[i] for i in most_correlated_with_j_ids]
most_correlated_with_j = list(zip(most_correlated_with_j_words, most_correlated_with_j_weights))
terms_per_edge_matrix[(politicians_sorted[i], politicians_sorted[j])] = {
"most_correlated_with_both": most_correlated_with_both,
"most_correlated_with_source": most_correlated_with_i,
"most_correlated_with_target": most_correlated_with_j}
print("done in {:0.4f}s".format(time() - t0))
# Calculating terms per edge
print("Calculating terms per node...")
t0 = time()
terms_per_node_matrix = {}
for i in range(len(politicians_sorted)):
proto_politician_i = np.squeeze(np.asarray(politician_proto[politicians_sorted[i]]))
sorted_distance_word_ids = np.argsort(proto_politician_i)
sorted_proto_politician_i = proto_politician_i[sorted_distance_word_ids]
words_used_by_politician_i = np.logical_not(sorted_proto_politician_i == 0)
filtered_sorted_word_ids = sorted_distance_word_ids[words_used_by_politician_i]
most_important_word_ids = filtered_sorted_word_ids[-terms_per_node:]
most_important_weights = proto_politician_i[most_important_word_ids]
most_important_words = [vocab[i] for i in most_important_word_ids]
most_important = list(zip(most_important_words, most_important_weights))
terms_per_node_matrix[politicians_sorted[i]] = {"most_important": most_important}
print("done in {:0.4f}s".format(time() - t0))
# Calculating clusters and finding the best scoring number of them
print("Calculating clustering...")
t0 = time()
politician_vectors = []
for politician in politicians_sorted:
politician_vectors.append(politician_proto[politician] / np.linalg.norm(politician_proto[politician]))
politicians_matrix = np.stack(politician_vectors)
quality_avgs = []
max_quality = -1
best_model = None
best_cluster_labels = None
best_cluster_distances = None
for num_clusters in num_clusters_range:
# Initialize the model with num_clusters value and a random generator
# seed of 10 for reproducibility.
model = clustering_algorithm(n_clusters=num_clusters, random_state=10)
cluster_labels = model.fit_predict(politicians_matrix)
if getattr(model, "transform", None) is not None:
unnorm_cluster_distances = model.transform(politicians_matrix)
cluster_distances = unnorm_cluster_distances / np.expand_dims(unnorm_cluster_distances.sum(axis=1), axis=1)
else:
cluster_distances = None
quality_score = clustering_quality_measure(politicians_matrix, cluster_labels)
quality_avgs.append([num_clusters, quality_score])
if quality_score > max_quality:
max_quality = quality_score
best_model = model
best_cluster_labels = cluster_labels
best_cluster_distances = cluster_distances
ascii_graph = Pyasciigraph(float_format='{0:0.4f}')
for line in ascii_graph.graph(clustering_quality_measure.__name__, quality_avgs):
print(line)
print("done in {:0.4f}s".format(time() - t0))
# Building politicians graph
print("Building politicians graph...")
t0 = time()
nodes = []
for i, politician in enumerate(politicians_sorted):
nodes.append(
{'name': politicians_info[politician]['name'],
'party': politicians_info[politician]['party'],
'twitter': politicians_info[politician]['twitter'],
'tweets': len(politician_posts[politician]),
'cluster': np.asscalar(best_cluster_labels[i]),
'cluster_distances': best_cluster_distances[i].tolist() if best_cluster_distances is not None else None,
'most_important_words': list(reversed(terms_per_node_matrix[politician]['most_important']))})
edges = []
for i in range(len(politicians_sorted)):
for j in range(i + 1, len(politicians_sorted)):
edges.append({'source': i, 'target': j,
'weight': similarity_matrix[(politicians_sorted[i], politicians_sorted[j])],
'words': terms_per_edge_matrix[(politicians_sorted[i], politicians_sorted[j])]})
graph = {'nodes': nodes, 'edges': edges}
print("done in {:0.4f}s".format(time() - t0))
print('graph:')
print(graph)
# Saving politicians graph
print("Saving politicians graph...")
t0 = time()
with open(POLITICIANS_GRAPH_FILE_PATH, 'w') as gf:
json.dump(graph, gf)
print("done in {:0.4f}s".format(time() - t0))
| apache-2.0 |
broadinstitute/cms | docs/conf.py | 1 | 10185 | # -*- coding: utf-8 -*-
#
# CMS documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 6 14:12:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
print "PATH: {}".format(os.path.dirname(os.path.abspath('../cms/cms/')))
sys.path.insert(0, os.path.dirname(os.path.abspath('../cms/cms/')))
# -- Mock out the heavyweight pip packages, esp those that require C ----
import mock
MOCK_MODULES = ['numpy', 'scipy', 'scipy.stats', 'scipy.stats.kde', 'matplotlib', 'matplotlib.pyplot', 'pysam',
'Bio', 'Bio.AlignIO', 'Bio.SeqIO', 'Bio.Data.IUPACData']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- Obtain GIT version --
import subprocess
def _git_version():
cmd = ['git', 'describe', '--tags', '--always'] # omit "--dirty" from doc build
out = subprocess.check_output(cmd)
if type(out) != str:
out = out.decode('utf-8')
return out.strip()
__version__ = _git_version()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinxarg.ext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CMS'
copyright = u'2015, Broad Institute'
author = u'Broad Institute'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
release = __version__
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
html_theme = 'default'
else:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d. {}'.format(release)
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CMSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CMS.tex', u'CMS Documentation',
u'Broad Institute', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cms', u'CMS Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CMS', u'CMS Documentation',
author, 'CMS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-2-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/backends/backend_cairo.py | 8 | 17693 | """
A Cairo backend for matplotlib
Author: Steve Chaplin
Cairo is a vector graphics library with cross-device output support.
Features of Cairo:
* anti-aliasing
* alpha channel
* saves image files as PNG, PostScript, PDF
http://cairographics.org
Requires (in order, all available from Cairo website):
cairo, pycairo
Naming Conventions
* classes MixedUpperCase
* varables lowerUpper
* functions underscore_separated
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os, sys, warnings, gzip
import numpy as np
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
import cairocffi as cairo
except ImportError:
try:
import cairo
except ImportError:
raise ImportError("Cairo backend requires that cairocffi or pycairo is installed.")
else:
HAS_CAIRO_CFFI = False
else:
HAS_CAIRO_CFFI = True
_version_required = (1,2,0)
if cairo.version_info < _version_required:
raise ImportError ("Pycairo %d.%d.%d is installed\n"
"Pycairo %d.%d.%d or later is required"
% (cairo.version_info + _version_required))
backend_version = cairo.version
del _version_required
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox, Affine2D
from matplotlib.font_manager import ttfFontProperty
_debug = False
#_debug = True
# Image::color_conv(format) for draw_image()
if sys.byteorder == 'little':
BYTE_FORMAT = 0 # BGRA
else:
BYTE_FORMAT = 1 # ARGB
class RendererCairo(RendererBase):
fontweights = {
100 : cairo.FONT_WEIGHT_NORMAL,
200 : cairo.FONT_WEIGHT_NORMAL,
300 : cairo.FONT_WEIGHT_NORMAL,
400 : cairo.FONT_WEIGHT_NORMAL,
500 : cairo.FONT_WEIGHT_NORMAL,
600 : cairo.FONT_WEIGHT_BOLD,
700 : cairo.FONT_WEIGHT_BOLD,
800 : cairo.FONT_WEIGHT_BOLD,
900 : cairo.FONT_WEIGHT_BOLD,
'ultralight' : cairo.FONT_WEIGHT_NORMAL,
'light' : cairo.FONT_WEIGHT_NORMAL,
'normal' : cairo.FONT_WEIGHT_NORMAL,
'medium' : cairo.FONT_WEIGHT_NORMAL,
'semibold' : cairo.FONT_WEIGHT_BOLD,
'bold' : cairo.FONT_WEIGHT_BOLD,
'heavy' : cairo.FONT_WEIGHT_BOLD,
'ultrabold' : cairo.FONT_WEIGHT_BOLD,
'black' : cairo.FONT_WEIGHT_BOLD,
}
fontangles = {
'italic' : cairo.FONT_SLANT_ITALIC,
'normal' : cairo.FONT_SLANT_NORMAL,
'oblique' : cairo.FONT_SLANT_OBLIQUE,
}
def __init__(self, dpi):
"""
"""
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
self.dpi = dpi
self.gc = GraphicsContextCairo (renderer=self)
self.text_ctx = cairo.Context (
cairo.ImageSurface (cairo.FORMAT_ARGB32,1,1))
self.mathtext_parser = MathTextParser('Cairo')
RendererBase.__init__(self)
def set_ctx_from_surface (self, surface):
self.gc.ctx = cairo.Context (surface)
def set_width_height(self, width, height):
self.width = width
self.height = height
self.matrix_flipy = cairo.Matrix (yy=-1, y0=self.height)
# use matrix_flipy for ALL rendering?
# - problem with text? - will need to switch matrix_flipy off, or do a
# font transform?
def _fill_and_stroke (self, ctx, fill_c, alpha, alpha_overrides):
if fill_c is not None:
ctx.save()
if len(fill_c) == 3 or alpha_overrides:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha)
else:
ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], fill_c[3])
ctx.fill_preserve()
ctx.restore()
ctx.stroke()
@staticmethod
def convert_path(ctx, path, transform, clip=None):
for points, code in path.iter_segments(transform, clip=clip):
if code == Path.MOVETO:
ctx.move_to(*points)
elif code == Path.CLOSEPOLY:
ctx.close_path()
elif code == Path.LINETO:
ctx.line_to(*points)
elif code == Path.CURVE3:
ctx.curve_to(points[0], points[1],
points[0], points[1],
points[2], points[3])
elif code == Path.CURVE4:
ctx.curve_to(*points)
def draw_path(self, gc, path, transform, rgbFace=None):
ctx = gc.ctx
# We'll clip the path to the actual rendering extents
# if the path isn't filled.
if rgbFace is None and gc.get_hatch() is None:
clip = ctx.clip_extents()
else:
clip = None
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0, self.height)
ctx.new_path()
self.convert_path(ctx, path, transform, clip)
self._fill_and_stroke(ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())
def draw_image(self, gc, x, y, im):
# bbox - not currently used
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
rows, cols, buf = im.color_conv (BYTE_FORMAT)
surface = cairo.ImageSurface.create_for_data (
buf, cairo.FORMAT_ARGB32, cols, rows, cols*4)
ctx = gc.ctx
y = self.height - y - rows
ctx.save()
ctx.set_source_surface (surface, x, y)
if gc.get_alpha() != 1.0:
ctx.paint_with_alpha(gc.get_alpha())
else:
ctx.paint()
ctx.restore()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# Note: x,y are device/display coords, not user-coords, unlike other
# draw_* methods
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
ctx = gc.ctx
ctx.new_path()
ctx.move_to (x, y)
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
size = prop.get_size_in_points() * self.dpi / 72.0
ctx.save()
if angle:
ctx.rotate (-angle * np.pi / 180)
ctx.set_font_size (size)
if HAS_CAIRO_CFFI:
if not isinstance(s, six.text_type):
s = six.text_type(s)
else:
if not six.PY3 and isinstance(s, six.text_type):
s = s.encode("utf-8")
ctx.show_text(s)
ctx.restore()
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
ctx = gc.ctx
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.dpi, prop)
ctx.save()
ctx.translate(x, y)
if angle:
ctx.rotate (-angle * np.pi / 180)
for font, fontsize, s, ox, oy in glyphs:
ctx.new_path()
ctx.move_to(ox, oy)
fontProp = ttfFontProperty(font)
ctx.save()
ctx.select_font_face (fontProp.name,
self.fontangles [fontProp.style],
self.fontweights[fontProp.weight])
size = fontsize * self.dpi / 72.0
ctx.set_font_size(size)
if not six.PY3 and isinstance(s, six.text_type):
s = s.encode("utf-8")
ctx.show_text(s)
ctx.restore()
for ox, oy, w, h in rects:
ctx.new_path()
ctx.rectangle (ox, oy, w, h)
ctx.set_source_rgb (0, 0, 0)
ctx.fill_preserve()
ctx.restore()
def flipy(self):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
return True
#return False # tried - all draw objects ok except text (and images?)
# which comes out mirrored!
def get_canvas_width_height(self):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
if ismath:
width, height, descent, fonts, used_characters = self.mathtext_parser.parse(
s, self.dpi, prop)
return width, height, descent
ctx = self.text_ctx
ctx.save()
ctx.select_font_face (prop.get_name(),
self.fontangles [prop.get_style()],
self.fontweights[prop.get_weight()])
# Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c
# but if /96.0 is used the font is too small
size = prop.get_size_in_points() * self.dpi / 72.0
# problem - scale remembers last setting and font can become
# enormous causing program to crash
# save/restore prevents the problem
ctx.set_font_size (size)
y_bearing, w, h = ctx.text_extents (s)[1:4]
ctx.restore()
return w, h, h + y_bearing
def new_gc(self):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
self.gc.ctx.save()
self.gc._alpha = 1.0
self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA
return self.gc
def points_to_pixels(self, points):
if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))
return points/72.0 * self.dpi
class GraphicsContextCairo(GraphicsContextBase):
_joind = {
'bevel' : cairo.LINE_JOIN_BEVEL,
'miter' : cairo.LINE_JOIN_MITER,
'round' : cairo.LINE_JOIN_ROUND,
}
_capd = {
'butt' : cairo.LINE_CAP_BUTT,
'projecting' : cairo.LINE_CAP_SQUARE,
'round' : cairo.LINE_CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
def restore(self):
self.ctx.restore()
def set_alpha(self, alpha):
GraphicsContextBase.set_alpha(self, alpha)
_alpha = self.get_alpha()
rgb = self._rgb
if self.get_forced_alpha():
self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], _alpha)
else:
self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], rgb[3])
#def set_antialiased(self, b):
# enable/disable anti-aliasing is not (yet) supported by Cairo
def set_capstyle(self, cs):
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
self.ctx.set_line_cap (self._capd[cs])
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
if not rectangle: return
x,y,w,h = rectangle.bounds
# pixel-aligned clip-regions are faster
x,y,w,h = round(x), round(y), round(w), round(h)
ctx = self.ctx
ctx.new_path()
ctx.rectangle (x, self.renderer.height - h - y, w, h)
ctx.clip ()
def set_clip_path(self, path):
if not path: return
tpath, affine = path.get_transformed_path_and_affine()
ctx = self.ctx
ctx.new_path()
affine = affine + Affine2D().scale(1.0, -1.0).translate(0.0, self.renderer.height)
RendererCairo.convert_path(ctx, tpath, affine)
ctx.clip()
def set_dashes(self, offset, dashes):
self._dashes = offset, dashes
if dashes == None:
self.ctx.set_dash([], 0) # switch dashes off
else:
self.ctx.set_dash(
list(self.renderer.points_to_pixels(np.asarray(dashes))), offset)
def set_foreground(self, fg, isRGBA=None):
GraphicsContextBase.set_foreground(self, fg, isRGBA)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
if len(self._rgb) == 3:
self.ctx.set_source_rgb(*self._rgb)
else:
self.ctx.set_source_rgba(*self._rgb)
def get_rgb(self):
return self.ctx.get_source().get_rgba()[:3]
def set_joinstyle(self, js):
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
self.ctx.set_line_join(self._joind[js])
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
self._linewidth = float(w)
self.ctx.set_line_width (self.renderer.points_to_pixels(w))
def new_figure_manager(num, *args, **kwargs): # called by backends/__init__.py
"""
Create a new figure manager instance
"""
if _debug: print('%s()' % (_fn_name()))
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasCairo(figure)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasCairo (FigureCanvasBase):
def print_png(self, fobj, *args, **kwargs):
width, height = self.get_width_height()
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width, height)
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)
renderer.set_ctx_from_surface (surface)
self.figure.draw (renderer)
surface.write_to_png (fobj)
def print_pdf(self, fobj, *args, **kwargs):
return self._save(fobj, 'pdf', *args, **kwargs)
def print_ps(self, fobj, *args, **kwargs):
return self._save(fobj, 'ps', *args, **kwargs)
def print_svg(self, fobj, *args, **kwargs):
return self._save(fobj, 'svg', *args, **kwargs)
def print_svgz(self, fobj, *args, **kwargs):
return self._save(fobj, 'svgz', *args, **kwargs)
def _save (self, fo, format, **kwargs):
# save PDF/PS/SVG
orientation = kwargs.get('orientation', 'portrait')
dpi = 72
self.figure.dpi = dpi
w_in, h_in = self.figure.get_size_inches()
width_in_points, height_in_points = w_in * dpi, h_in * dpi
if orientation == 'landscape':
width_in_points, height_in_points = (height_in_points,
width_in_points)
if format == 'ps':
if not hasattr(cairo, 'PSSurface'):
raise RuntimeError ('cairo has not been compiled with PS '
'support enabled')
surface = cairo.PSSurface (fo, width_in_points, height_in_points)
elif format == 'pdf':
if not hasattr(cairo, 'PDFSurface'):
raise RuntimeError ('cairo has not been compiled with PDF '
'support enabled')
surface = cairo.PDFSurface (fo, width_in_points, height_in_points)
elif format in ('svg', 'svgz'):
if not hasattr(cairo, 'SVGSurface'):
raise RuntimeError ('cairo has not been compiled with SVG '
'support enabled')
if format == 'svgz':
if is_string_like(fo):
fo = gzip.GzipFile(fo, 'wb')
else:
fo = gzip.GzipFile(None, 'wb', fileobj=fo)
surface = cairo.SVGSurface (fo, width_in_points, height_in_points)
else:
warnings.warn ("unknown format: %s" % format)
return
# surface.set_dpi() can be used
renderer = RendererCairo (self.figure.dpi)
renderer.set_width_height (width_in_points, height_in_points)
renderer.set_ctx_from_surface (surface)
ctx = renderer.gc.ctx
if orientation == 'landscape':
ctx.rotate (np.pi/2)
ctx.translate (0, -height_in_points)
# cairo/src/cairo_ps_surface.c
# '%%Orientation: Portrait' is always written to the file header
# '%%Orientation: Landscape' would possibly cause problems
# since some printers would rotate again ?
# TODO:
# add portrait/landscape checkbox to FileChooser
self.figure.draw (renderer)
show_fig_border = False # for testing figure orientation and scaling
if show_fig_border:
ctx.new_path()
ctx.rectangle(0, 0, width_in_points, height_in_points)
ctx.set_line_width(4.0)
ctx.set_source_rgb(1,0,0)
ctx.stroke()
ctx.move_to(30,30)
ctx.select_font_face ('sans-serif')
ctx.set_font_size(20)
ctx.show_text('Origin corner')
ctx.show_page()
surface.finish()
if format == 'svgz':
fo.close()
FigureCanvas = FigureCanvasCairo
| mit |
dimriou/rupture | etc/theory/experiments/ctx_performance/plot.py | 4 | 1876 | import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
font = {
'size': 12
}
plt.rc('font', **font)
# Origins plot
origins = []
size = []
with open('origins', 'r') as f:
for l in f.readlines()[1:]:
o, s = l.strip('\n').split()
origins.append(int(o))
size.append(float(s))
plt.title('Size overhead per Number of Origins', y=1.06)
plt.xlabel('Origins')
plt.ylabel('Size overhead (%)')
plt.plot(origins, size)
plt.savefig('origins.png')
# Protected coverage plot
fig, ax1 = plt.subplots()
fig.suptitle('Size & Time overhead per Protected Coverage')
ax2 = ax1.twinx()
ax1.set_xlabel('Secret rate (%)')
ax1.set_ylabel('Size overhead (%)')
ax2.set_ylabel('Time overhead (ms)')
secret_rate = []
size = []
time = []
with open('protected_coverage', 'r') as f:
for l in f.readlines()[1:]:
se, si, t = l.strip('\n').split()
secret_rate.append(int(se))
size.append(float(si))
time.append(float(t))
ax1.plot(secret_rate, size)
plt.plot(secret_rate, time)
ax2.plot(secret_rate, time)
plt.legend(['Size', 'Time'])
plt.savefig('response_secrets.png')
# Total response plot
fig, ax1 = plt.subplots()
fig.suptitle('Size overhead per Total Response')
ax2 = ax1.twinx()
ax1.set_xlabel('Uncompressed size (KB)')
ax1.set_ylabel('Size overhead (%)')
ax2.set_ylabel('Size overhead (KB)')
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
uncompressed = []
size_perc = []
size_kb = []
with open('total_response', 'r') as f:
for l in f.readlines()[1:]:
u, sp, sk = l.strip('\n').split()
uncompressed.append(float(u))
size_perc.append(float(sp))
size_kb.append(float(sk))
ax1.plot(uncompressed, size_perc)
plt.plot(uncompressed, size_kb)
ax2.plot(uncompressed, size_kb)
plt.legend(['% overhead', 'KB overhead'])
plt.savefig('total_response.png')
| mit |
wkerzendorf/wsynphot | wsynphot/data/hst/acs/convert_filters.py | 1 | 1562 | #Reading HST ACS filters
from astropy import units as u, constants as const
from numpy import genfromtxt, asscalar
import pandas as pd
import os
from glob import glob
def read_hst_filter(fname):
"""
Reading the gemini filter file into a dataframe
Parameters
----------
fname: ~str
path to file to be read
"""
data = pd.DataFrame(genfromtxt(fname, usecols=(0, 1)),
columns=['wavelength', 'transmission_lambda'])
return data
def read_dataset(fname_list, prefix, name_parser=None):
"""
Reading a whole list of filters
Parameters
----------
fname_list: list
list of filenames
prefix: str
prefix for the dictionary keys
Returns
-------
dict
"""
filter_dict = {}
for fname in fname_list:
if name_parser is not None:
filter_name = name_parser(fname)
else:
filter_name = fname
filter_path = os.path.join(prefix, filter_name)
filter_dict[filter_path] = read_hst_filter(fname)
return filter_dict
def read_all_hst():
hst_nameparser = (
lambda fname: os.path.basename(
fname).lower().split('_')[1].replace('.dat', ''))
hst_filters = read_dataset(glob('filter_data/*.dat'), 'hst/acs/wfc',
hst_nameparser)
return hst_filters
def save_to_hdf(filter_dict, hdf_file, mode='a'):
fh = pd.HDFStore(hdf_file, mode=mode)
for key in filter_dict:
filter_dict[key].to_hdf(fh, key)
fh.close()
| bsd-3-clause |
lensacom/sparkit-learn | splearn/linear_model/tests/test_logistic.py | 2 | 1212 | import numpy as np
from sklearn.linear_model import LogisticRegression
from splearn.linear_model import SparkLogisticRegression
from splearn.utils.testing import SplearnTestCase, assert_array_almost_equal
class TestLogisticRegression(SplearnTestCase):
def test_same_coefs(self):
X, y, Z = self.make_classification(2, 10000)
local = LogisticRegression(tol=1e-4, C=10)
dist = SparkLogisticRegression(tol=1e-4, C=10)
local.fit(X, y)
dist.fit(Z, classes=np.unique(y))
converted = dist.to_scikit()
assert_array_almost_equal(local.coef_, dist.coef_, decimal=1)
assert_array_almost_equal(local.coef_, converted.coef_, decimal=1)
def test_same_prediction(self):
X, y, Z = self.make_classification(2, 100000)
local = LogisticRegression(tol=1e-4, C=10)
dist = SparkLogisticRegression(tol=1e-4, C=10)
y_local = local.fit(X, y).predict(X)
y_dist = dist.fit(Z, classes=local.classes_).predict(Z[:, 'X'])
y_converted = dist.to_scikit().predict(X)
assert (sum(y_local != y_dist.toarray()) < len(y_local) * 1. / 100.)
assert (sum(y_local != y_converted) < len(y_local) * 1. / 100.)
| apache-2.0 |
rodluger/planetplanet | setup.py | 1 | 3520 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from setuptools import setup, find_packages, Extension
import glob
import sysconfig
suffix = sysconfig.get_config_var('EXT_SUFFIX')
if suffix is None:
suffix = ".so"
# Hackishly inject a constant into builtins to enable importing of the
# module in "setup" mode. Stolen from `kplr`
import sys
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
builtins.__PLANETPLANET_SETUP__ = True
import planetplanet
# PLANETPLANET C EXTENSION. Borrowing heavily from REBOUND here.
if sys.platform == 'darwin':
from distutils import sysconfig
vars = sysconfig.get_config_vars()
vars['LDSHARED'] = vars['LDSHARED'].replace('-bundle', '-shared')
extra_link_args=['-L/usr/local/lib',
'-Wl,-install_name,@rpath/libppo' + suffix]
else:
extra_link_args=['-L/usr/local/lib']
libppomodule = Extension('libppo',
sources = glob.glob('rebound/src/*.c') + \
['progress/progress.c',
'planetplanet/photo/orbit.c',
'planetplanet/photo/eyeball.c',
'planetplanet/photo/ppo.c',
],
include_dirs = ['rebound/src/',
'progress/',
'planetplanet/photo/',
'/usr/local/include'],
define_macros=[ ('LIBREBOUND', None) ],
extra_compile_args=['-Wall', '-I/usr/local/include',
'-fstrict-aliasing', '-O3', '-std=c99',
'-Wno-unknown-pragmas', '-DLIBREBOUND',
'-D_GNU_SOURCE', '-fPIC'],
extra_link_args=extra_link_args,
libraries=['gsl', 'gslcblas', 'm']
)
long_description = \
"A photodynamical code that computes transits, eclipses, phase curves, " + \
"and planet-planet/planet-moon occultations in planetary systems."
# Setup!
setup(name = 'planetplanet',
version = planetplanet.__version__,
description = 'Photodynamical code for planet-planet occultations',
long_description = long_description,
classifiers = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Astronomy',
],
url = 'http://github.com/rodluger/planetplanet',
author = 'Rodrigo Luger',
author_email = '[email protected]',
license = 'GPL',
packages = ['planetplanet', 'planetplanet.photo', 'planetplanet.detect'],
install_requires = [
'numpy>=1.8',
'scipy',
'matplotlib',
'six',
'tqdm',
'astropy',
'numba>=0.34',
'pandas',
'rebound'
],
include_package_data = True,
zip_safe = False,
test_suite='nose.collector',
tests_require=['nose'],
ext_modules = [libppomodule],
) | gpl-3.0 |
stuart-knock/bokeh | bokeh/mpl_helpers.py | 11 | 5408 | "Helpers function for mpl module."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
from itertools import cycle, islice
from scipy import interpolate, signal
from .models import GlyphRenderer
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def convert_color(mplcolor):
"Converts mpl color formats to Bokeh color formats."
charmap = dict(b="blue", g="green", r="red", c="cyan", m="magenta",
y="yellow", k="black", w="white")
if mplcolor in charmap:
return charmap[mplcolor]
try:
colorfloat = float(mplcolor)
if 0 <= colorfloat <= 1.0:
# This is a grayscale value
return tuple([int(255 * colorfloat)] * 3)
except:
pass
if isinstance(mplcolor, tuple):
# These will be floats in the range 0..1
return int(255 * mplcolor[0]), int(255 * mplcolor[1]), int(255 * mplcolor[2])
return mplcolor
def convert_dashes(dash):
""" Converts a Matplotlib dash specification
bokeh.properties.DashPattern supports the matplotlib named dash styles,
but not the little shorthand characters. This function takes care of
mapping those.
"""
mpl_dash_map = {
"-": "solid",
"--": "dashed",
":": "dotted",
"-.": "dashdot",
}
# If the value doesn't exist in the map, then just return the value back.
return mpl_dash_map.get(dash, dash)
def delete_last_col(x):
"Just delete the last column of the array."
x = np.delete(x, (-1), axis=1)
return x
def get_props_cycled(col, prop, fx=lambda x: x):
""" We need to cycle the `get.property` list (where property can be colors,
line_width, etc) as matplotlib does. We use itertools tools for do this
cycling ans slice manipulation.
Parameters:
col: matplotlib collection object
prop: property we want to get from matplotlib collection
fx: funtion (optional) to transform the elements from list obtained
after the property call. Deafults to identity function.
"""
n = len(col.get_paths())
t_prop = [fx(x) for x in prop]
sliced = islice(cycle(t_prop), None, n)
return list(sliced)
def is_ax_end(r):
"Check if the 'name' (if it exists) in the Glyph's datasource is 'ax_end'"
if isinstance(r, GlyphRenderer):
try:
if r.data_source.data["name"] == "ax_end":
return True
except KeyError:
return False
else:
return False
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.001, f3=5):
"""
Mimic a hand-drawn line from (x, y) data
Source: http://jakevdp.github.io/blog/2012/10/07/xkcd-style-plots-in-matplotlib/
Parameters
----------
x, y : array_like
arrays to be modified
xlim, ylim : data range
the assumed plot range for the modification. If not specified,
they will be guessed from the data
mag : float
magnitude of distortions
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
Returns
-------
x, y : ndarrays
The modified lines
"""
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None:
xlim = (x.min(), x.max())
if ylim is None:
ylim = (y.min(), y.max())
if xlim[1] == xlim[0]:
xlim = ylim
if ylim[1] == ylim[0]:
ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
k = min(3, len(x) - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we'll perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int
| bsd-3-clause |
Ziqi-Li/bknqgis | Shapely/shapely/examples/intersect.py | 24 | 2626 | # intersect.py
#
# Demonstrate how Shapely can be used to analyze and plot the intersection of
# a trajectory and regions in space.
from functools import partial
import random
import pylab
from shapely.geometry import LineString, Point
from shapely.ops import cascaded_union
# Build patches as in dissolved.py
r = partial(random.uniform, -20.0, 20.0)
points = [Point(r(), r()) for i in range(100)]
spots = [p.buffer(2.5) for p in points]
patches = cascaded_union(spots)
# Represent the following geolocation parameters
#
# initial position: -25, -25
# heading: 45.0
# speed: 50*sqrt(2)
#
# as a line
vector = LineString(((-25.0, -25.0), (25.0, 25.0)))
# Find intercepted and missed patches. List the former so we can count them
# later
intercepts = [patch for patch in patches.geoms if vector.intersects(patch)]
misses = (patch for patch in patches.geoms if not vector.intersects(patch))
# Plot the intersection
intersection = vector.intersection(patches)
assert intersection.geom_type in ['MultiLineString']
if __name__ == "__main__":
# Illustrate the results using matplotlib's pylab interface
pylab.figure(num=None, figsize=(4, 4), dpi=180)
# Plot the misses
for spot in misses:
x, y = spot.exterior.xy
pylab.fill(x, y, color='#cccccc', aa=True)
pylab.plot(x, y, color='#999999', aa=True, lw=1.0)
# Do the same for the holes of the patch
for hole in spot.interiors:
x, y = hole.xy
pylab.fill(x, y, color='#ffffff', aa=True)
pylab.plot(x, y, color='#999999', aa=True, lw=1.0)
# Plot the intercepts
for spot in intercepts:
x, y = spot.exterior.xy
pylab.fill(x, y, color='red', alpha=0.25, aa=True)
pylab.plot(x, y, color='red', alpha=0.5, aa=True, lw=1.0)
# Do the same for the holes of the patch
for hole in spot.interiors:
x, y = hole.xy
pylab.fill(x, y, color='#ffffff', aa=True)
pylab.plot(x, y, color='red', alpha=0.5, aa=True, lw=1.0)
# Draw the projected trajectory
pylab.arrow(-25, -25, 50, 50, color='#999999', aa=True,
head_width=1.0, head_length=1.0)
for segment in intersection.geoms:
x, y = segment.xy
pylab.plot(x, y, color='red', aa=True, lw=1.5)
# Write the number of patches and the total patch area to the figure
pylab.text(-28, 25,
"Patches: %d/%d (%d), total length: %.1f" \
% (len(intercepts), len(patches.geoms),
len(intersection.geoms), intersection.length))
pylab.savefig('intersect.png')
| gpl-2.0 |
GitYiheng/reinforcement_learning_test | test00_previous_files/pendulum_test/ode_test_110920170003.py | 2 | 3085 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as lines
from matplotlib import animation
class CartPendulum:
def __init__(self, m1, m2, l, theta, c1, c2, dt):
self.m1 = m1 # cart mass
self.m2 = m2 # ball mass
self.l = l # pendulum length
self.theta = theta # pendulum angle
self.theta_dot = 0
self.theta_dot_dot = 0
# cart
self.x1 = 0
self.x1_dot = 0
self.x1_dot_dot = 0
# ball
self.x2 = l*np.sin(theta)
self.y2 = -l*np.cos(theta)
self.x2_dot = 0
self.y2_dot = 0
self.x2_dot_dot = 0
self.y2_dot_dot = 0
self.c1 = c1 # friction coefficient of cart
self.c2 = c2 # friction coefficient of ball
self.g = 9.81
self.F = 0 # horizontal force on cart
self.dt = dt # update time step
def update(self):
self.x1_dot_dot = (self.m2*self.l*self.theta_dot*self.theta_dot*np.sin(self.theta) + self.m2*self.g*np.sin(self.theta)*np.cos(self.theta) + self.F - self.c1*self.x1_dot + self.c2*self.theta_dot*np.cos(self.theta)) / (self.m1 + self.m2*np.sin(self.theta)*np.sin(self.theta))
self.theta_dot_dot = (-self.m2*self.l*self.theta_dot*self.theta_dot*np.sin(self.theta)*np.cos(self.theta) - (self.m1 + self.m2)*self.g*np.sin(self.theta) - self.F*np.cos(self.theta) + self.c1*self.x1_dot*np.cos(self.theta) - (1 + self.m1/self.m2)*self.c2*self.theta_dot) / (self.l*(self.m1 + self.m2*np.sin(self.theta)*np.sin(self.theta)))
self.x1_dot += self.x1_dot_dot*self.dt
self.theta_dot += self.theta_dot_dot*self.dt
self.x1 += self.x1_dot*self.dt
self.theta += self.theta_dot*self.dt
self.x2 = self.x1 + self.l*np.sin(self.theta)
self.y2 = -self.l*np.cos(self.theta)
return self.x1, self.x2, self.y2
def get_position(self):
print('Cart position: (', self.x1, ', 0)')
print('Bob position: (', self.x2, ', ', self.y2, ')')
return self.x1, self.x2, self.y2
fig = plt.figure()
plt.gca().set_aspect('equal')
ax = fig.add_subplot(111)
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
# create a cart pendulum system
system = CartPendulum(1, 1, 3, 0.4, 0.5, 0.5, 0.01)
# global drawing
link = lines.Line2D([system.x1, 0], [system.x2, system.y2], linewidth=2, color='xkcd:lightblue')
cart = patches.Rectangle((system.x1, 0), 1.0, 1.0, 0.0, edgecolor='xkcd:black', facecolor='xkcd:azure')
bob = patches.Circle((system.x2, system.y2), 0.5, edgecolor='xkcd:black', facecolor='xkcd:azure')
def init():
# permanent drawing
rail_x = np.linspace(-15, 15, 100)
rail_y = np.zeros(100)
ax.plot(rail_x, rail_y, 'xkcd:black', linewidth=2)
ax.add_line(link)
ax.add_patch(cart)
cart.set_width(1.0)
cart.set_height(1.0)
ax.add_patch(bob)
bob.set_radius(0.5)
return link, cart, bob
def update(frame_number):
system.update() # update cartpendulum
link.set_data([system.x1, system.x2], [0, system.y2]) # connect cart and bob
cart.set_xy([system.x1-0.5, -0.5]) # set cart centre
bob.center = ([system.x2, system.y2]) # set bob centre
return link, cart, bob
anim = animation.FuncAnimation(fig, update, init_func=init, frames = 100, interval=5, blit=True)
plt.show()
| mit |
rew4332/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 5 | 1902 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
| apache-2.0 |
superbobry/pymc3 | pymc3/glm/glm.py | 14 | 5720 | import numpy as np
from ..core import *
from ..distributions import *
from ..tuning.starting import find_MAP
import patsy
import theano
import pandas as pd
from collections import defaultdict
from pandas.tools.plotting import scatter_matrix
from . import families
def linear_component(formula, data, priors=None,
intercept_prior=None,
regressor_prior=None,
init_vals=None, family=None,
model=None):
"""Create linear model according to patsy specification.
Parameters
----------
formula : str
Patsy linear model descriptor.
data : array
Labeled array (e.g. pandas DataFrame, recarray).
priors : dict
Mapping prior name to prior distribution.
E.g. {'Intercept': Normal.dist(mu=0, sd=1)}
intercept_prior : pymc3 distribution
Prior to use for the intercept.
Default: Normal.dist(mu=0, tau=1.0E-12)
regressor_prior : pymc3 distribution
Prior to use for all regressor(s).
Default: Normal.dist(mu=0, tau=1.0E-12)
init_vals : dict
Set starting values externally: parameter -> value
Default: None
family : statsmodels.family
Link function to pass to statsmodels (init has to be True).
See `statsmodels.api.families`
Default: identity
Output
------
(y_est, coeffs) : Estimate for y, list of coefficients
Example
-------
# Logistic regression
y_est, coeffs = glm('male ~ height + weight',
htwt_data,
family=glm.families.Binomial(link=glm.family.logit))
y_data = Bernoulli('y', y_est, observed=data.male)
"""
if intercept_prior is None:
intercept_prior = Normal.dist(mu=0, tau=1.0E-12)
if regressor_prior is None:
regressor_prior = Normal.dist(mu=0, tau=1.0E-12)
if priors is None:
priors = defaultdict(None)
# Build patsy design matrix and get regressor names.
_, dmatrix = patsy.dmatrices(formula, data)
reg_names = dmatrix.design_info.column_names
if init_vals is None:
init_vals = {}
# Create individual coefficients
model = modelcontext(model)
coeffs = []
if reg_names[0] == 'Intercept':
prior = priors.get('Intercept', intercept_prior)
coeff = model.Var(reg_names.pop(0), prior)
if 'Intercept' in init_vals:
coeff.tag.test_value = init_vals['Intercept']
coeffs.append(coeff)
for reg_name in reg_names:
prior = priors.get(reg_name, regressor_prior)
coeff = model.Var(reg_name, prior)
if reg_name in init_vals:
coeff.tag.test_value = init_vals[reg_name]
coeffs.append(coeff)
y_est = theano.dot(np.asarray(dmatrix), theano.tensor.stack(*coeffs)).reshape((1, -1))
return y_est, coeffs
def glm(*args, **kwargs):
"""Create GLM after Patsy model specification string.
Parameters
----------
formula : str
Patsy linear model descriptor.
data : array
Labeled array (e.g. pandas DataFrame, recarray).
priors : dict
Mapping prior name to prior distribution.
E.g. {'Intercept': Normal.dist(mu=0, sd=1)}
intercept_prior : pymc3 distribution
Prior to use for the intercept.
Default: Normal.dist(mu=0, tau=1.0E-12)
regressor_prior : pymc3 distribution
Prior to use for all regressor(s).
Default: Normal.dist(mu=0, tau=1.0E-12)
init_vals : dict
Set starting values externally: parameter -> value
Default: None
family : Family object
Distribution of likelihood, see pymc3.glm.families
(init has to be True).
Output
------
vars : List of created random variables (y_est, coefficients etc)
Example
-------
# Logistic regression
vars = glm('male ~ height + weight',
data,
family=glm.families.Binomial(link=glm.families.logit))
"""
model = modelcontext(kwargs.get('model'))
family = kwargs.pop('family', families.Normal())
call_find_map = kwargs.pop('find_MAP', True)
formula = args[0]
data = args[1]
y_data = np.asarray(patsy.dmatrices(formula, data)[0]).T
y_est, coeffs = linear_component(*args, **kwargs)
family.create_likelihood(y_est, y_data)
return [y_est] + coeffs
def plot_posterior_predictive(trace, eval=None, lm=None, samples=30, **kwargs):
"""Plot posterior predictive of a linear model.
:Arguments:
trace : <array>
Array of posterior samples with columns
eval : <array>
Array over which to evaluate lm
lm : function <default: linear function>
Function mapping parameters at different points
to their respective outputs.
input: point, sample
output: estimated value
samples : int <default=30>
How many posterior samples to draw.
Additional keyword arguments are passed to pylab.plot().
"""
import matplotlib.pyplot as plt
if lm is None:
lm = lambda x, sample: sample['Intercept'] + sample['x'] * x
if eval is None:
eval = np.linspace(0, 1, 100)
# Set default plotting arguments
if 'lw' not in kwargs and 'linewidth' not in kwargs:
kwargs['lw'] = .2
if 'c' not in kwargs and 'color' not in kwargs:
kwargs['c'] = 'k'
for rand_loc in np.random.randint(0, len(trace), samples):
rand_sample = trace[rand_loc]
plt.plot(eval, lm(eval, rand_sample), **kwargs)
# Make sure to not plot label multiple times
kwargs.pop('label', None)
plt.title('Posterior predictive')
| apache-2.0 |
NelisVerhoef/scikit-learn | benchmarks/bench_rcv1_logreg_convergence.py | 149 | 7173 | # Authors: Tom Dupre la Tour <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
import gc
import time
from sklearn.externals.joblib import Memory
from sklearn.linear_model import (LogisticRegression, SGDClassifier)
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir='.', verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples))
p += w.dot(w) / 2. / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except:
C = clf.C
try:
intercept = clf.intercept_
except:
intercept = 0.
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (name, clf, iter_range, train_losses, train_scores,
test_scores, durations) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for (name, _, _, train_losses, _, _, durations) in clfs:
plt.plot(durations, train_losses, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for (name, _, _, _, train_scores, _, durations) in clfs:
plt.plot(durations, train_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for (name, _, _, _, _, test_scores, durations) in clfs:
plt.plot(durations, test_scores, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for (name, _, _, train_losses, _, _, durations) in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for (name, _, _, train_losses, _, _, durations) in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, '-o', label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index('CCAT')
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
("LR-liblinear",
LogisticRegression(C=C, tol=tol,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_iter_range, [], [], [], []),
("LR-liblinear-dual",
LogisticRegression(C=C, tol=tol, dual=True,
solver="liblinear", fit_intercept=fit_intercept,
intercept_scaling=1),
liblinear_dual_iter_range, [], [], [], []),
("LR-SAG",
LogisticRegression(C=C, tol=tol,
solver="sag", fit_intercept=fit_intercept),
sag_iter_range, [], [], [], []),
("LR-newton-cg",
LogisticRegression(C=C, tol=tol, solver="newton-cg",
fit_intercept=fit_intercept),
newton_iter_range, [], [], [], []),
("LR-lbfgs",
LogisticRegression(C=C, tol=tol,
solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range, [], [], [], []),
("SGD",
SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log',
fit_intercept=fit_intercept, verbose=0),
sgd_iter_range, [], [], [], [])]
if lightning_clf is not None and not fit_intercept:
alpha = 1. / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log",
fit_intercept)
clfs.append(
("Lightning-SVRG",
lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
clfs.append(
("Lightning-SAG",
lightning_clf.SAGClassifier(alpha=alpha, eta=step_size,
tol=tol, loss="log"),
sag_iter_range, [], [], [], []))
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| bsd-3-clause |
fredhusser/scikit-learn | sklearn/linear_model/randomized_l1.py | 33 | 23358 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
sagarjauhari/BCIpy | slicer.py | 1 | 7896 | # /usr/bin/env python
# Copyright 2013, 2014 Justis Grant Peters and Sagar Jauhari
# This file is part of BCIpy.
#
# BCIpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BCIpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BCIpy. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd, numpy as np
import sys, re
import rolling_windows
import filters
import pytz
from os.path import join
from StringIO import StringIO
ALL_RAW_URL='data'
try:
from dev_settings import *
except ImportError:
pass
class Slicer(object):
"""Loads data and metadata, extracts important features, and provides methods to retrieve subsets"""
def __init__(self, taskfile=join(ALL_RAW_URL, 'task.xls')):
self.load_tasks_from_tsv(taskfile)
self.series = {}
def get_tasks(self):
return self.tasks
def load_tasks_from_tsv(self, taskfile):
"""Reads task data from tab delimited file"""
# Find start_time and end_time column names
st_regex=re.compile("[sS]tart_[tT]ime")
et_regex=re.compile("[eE]nd_[tT]ime")
with open(taskfile, 'r') as tf:
head = ''.join(tf.readlines(1))
cols = head.split('\t')
start_time = [m.group(0) for l in cols for m in [st_regex.search(l)] if m][0]
end_time = [m.group(0) for l in cols for m in [et_regex.search(l)] if m][0]
t = pd.read_table(taskfile, parse_dates=[start_time, end_time], index_col=False)
t['word_count'] = t.stim.apply(lambda x: len(x.split()))
t['is_passage'] = t.word_count.apply(lambda x: x > 1)
self.tasks = t
def load_series_from_csv(self, seriesname, csvfilelist):
"""Reads a single series from a CSV file and merges it with the current data"""
if csvfilelist==None or len(csvfilelist)==0:
raise Exception("No files to process!")
self.series[seriesname] = pd.concat([
pd.read_csv(filename, parse_dates=[0], index_col=0,
squeeze=True).tz_localize(pytz.UTC).tz_convert(pytz.timezone('US/Eastern'))
for filename in csvfilelist
]).sort_index()
def load_series_from_pickle(self, seriesname, picklefile):
"""For quick restore from previous Slicer state"""
self.series[seriesname] = pd.read_pickle(picklefile)
def get_passage_tasks_by_difficulty(self, difficulty, features=[]):
"""Gets data for only the tasks which have the specified difficulty, along with any features specified in the 'features' arg"""
t = self.tasks
taskids = t[t.difficulty==difficulty][t.is_passage].index
return [self.get_by_task_id(taskid, features=features) for taskid in taskids]
def get_by_task_id(self, taskid, features=[]):
"""Get just one task, by taskid, with the features specified in the 'features' arg"""
task = self.tasks.loc[taskid]
st, et = task['start_time':'end_time']
st = st.tz_localize(pytz.timezone('US/Eastern'))
et = et.tz_localize(pytz.timezone('US/Eastern'))
task = task.to_dict()
task.update({f:self.series[f][st:et] for f in features})
return task
def extract_first_n_raw(self, n=10):
"""
Extract the first 'n' samples for each task's raw data
and save in self.tasks
"""
X = [
self.get_n_samples_by_taskid(taskid, 'raw', n)
for taskid in self.tasks.index
]
self.tasks = self.tasks.combine_first(pd.DataFrame(X, index=self.tasks.index))
def extract_first_n_median(self, n=10):
"""Extracts just the first n samples from the rolling median,
primarily to normalize sample vectors to the same length.
To the existing dataframe, adds additional 'n' columns which are the 1st
'n' values of the rolling median for each task.
"""
X = [
self.get_n_samples_by_taskid(taskid, 'raw_rolling_median_128', n)
for taskid in self.tasks.index
]
self.tasks = self.tasks.combine_first(pd.DataFrame(X, index=self.tasks.index))
def get_n_samples_by_taskid(self, taskid, feature, n=10):
"""For the specified taskid, return just the first n samples"""
task = self.tasks.loc[taskid]
st, et = task['start_time':'end_time']
st = st.tz_localize(pytz.timezone('US/Eastern'))
et = et.tz_localize(pytz.timezone('US/Eastern'))
ret = np.array([0]*n)
vals = self.series[feature][st:et][:n] # get up to n values
ret[:len(vals)] = vals[:] # overwrite 0s where vals exist
return ret
def get_time_duration_by_taskid(self, taskid):
"""
Returns task duration in seconds
"""
task = self.tasks.loc[taskid]
st, et = task['start_time':'end_time']
st = st.tz_localize(pytz.timezone('US/Eastern'))
et = et.tz_localize(pytz.timezone('US/Eastern'))
return (et - st).microseconds/1000000.0
def print_series_info(self):
"""Prints info about all series available, primarily for debugging purposes"""
print ["%s: %s" % (k, type(s)) for k,s in self.series.iteritems()]
def extract_rolling_median(self, seriesname='raw', window_size=128):
"""Extracts a rolling median for the specified series"""
print "Extracting rolling median: name=%s window_size=%d" \
% (seriesname, window_size)
new_feature_name = seriesname+'_rolling_median_'+str(window_size)
self.series[new_feature_name]=rolling_windows.downsampled_rolling_median(
self.series[seriesname],
window_size=window_size
)
def extract_rolling_PSD(self, seriesname='raw', window_size=512):
"""Extracts power spectral density (PSD) for the specified series"""
new_feature_name = seriesname+'_rolling_PSD_'+str(window_size)
self.series[new_feature_name]=rolling_windows.rolling_power_ratio(
self.series[seriesname],
window_size=window_size
)
def extract_filtered_signal(self, seriesname='raw', fs=512.0, lowcut=0.1, highcut=20.0):
"""Applies a Butterworth bandpass filter to the specified series"""
self.series[seriesname+'_butter_filtered'] = filters.butter_bandpass_filter(
self.series[seriesname],
lowcut=lowcut,
highcut=highcut,
fs=fs,
order=4)
# If run from the command line, do some basic tests and ouput some debugging info
if __name__ == '__main__':
print 'instantiating task slicer'
s = Slicer()
if len(sys.argv) > 2:
print 'loading raw from list of csvfiles'
s.load_series_from_csv('raw', sys.argv[1:])
else:
print 'loading raw from pickle'
s.load_series_from_pickle('raw', sys.argv[1])
print 'extracting filtered signal'
s.extract_filtered_signal()
print 'extracting rolling median'
s.extract_rolling_median()
print 'extracting rolling PSD'
s.extract_rolling_PSD()
print 'fetching task 1, with features'
print s.get_by_task_id(1, features=['raw','raw_rolling_PSD_512', 'raw_rolling_median_128'])
print s.get_passage_tasks_by_difficulty(2, features=['raw','raw_rolling_PSD_512', 'raw_rolling_median_128'])
print [(d['SUBJECT'], len(d['raw'])) for d in s.get_passage_tasks_by_difficulty(1, features=['raw'])]
s.print_series_info()
| gpl-3.0 |
winklerand/pandas | pandas/tests/reshape/test_reshape.py | 1 | 19553 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas import DataFrame, Series
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import assert_frame_equal
from pandas import get_dummies
import pandas.util.testing as tm
from pandas.compat import u
class TestGetDummies(object):
@pytest.fixture
def df(self):
return DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
@pytest.fixture(params=['uint8', 'i8', np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=['dense', 'sparse'])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == 'sparse'
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype='object')
def test_basic(self, sparse, dtype):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype))
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
def test_basic_types(self, sparse, dtype):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list('abc'))
if not sparse:
compare = tm.assert_frame_equal
else:
expected = expected.to_sparse(fill_value=0, kind='integer')
compare = tm.assert_sp_frame_equal
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
compare(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
compare(result, expected)
result = get_dummies(s_df, columns=s_df.columns,
sparse=sparse, dtype=dtype)
tm.assert_series_equal(result.get_dtype_counts(),
Series({dtype.name: 8}))
result = get_dummies(s_df, columns=['a'], sparse=sparse, dtype=dtype)
dtype_name = self.effective_dtype(dtype).name
expected_counts = {'int64': 1, 'object': 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_values()
tm.assert_series_equal(result.get_dtype_counts().sort_values(),
expected)
def test_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=['A'])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ['A']
def test_include_na(self, sparse, dtype):
if sparse:
pytest.xfail(reason='nan in index is problematic (GH 16894)')
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame({nan: [0, 0, 1],
'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
exp_na = exp_na.reindex(['a', 'b', nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True,
sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(Series(1, index=[0]), columns=[nan],
dtype=self.effective_dtype(dtype))
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=sparse)
exp = DataFrame({'letter_e': [1, 0, 0],
u('letter_%s') % eacute: [0, 1, 1]},
dtype=np.uint8)
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, sparse=sparse)
expected = DataFrame({'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
expected = DataFrame({'C': [1, 2, 3],
'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]})
cols = ['A_a', 'A_b', 'B_b', 'B_c']
expected[cols] = expected[cols].astype(dtype)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ['from_A', 'from_B']
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
expected = expected[['C', 'from_A_a', 'from_A_b',
'from_B_b', 'from_B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix='bad', sparse=sparse)
bad_columns = ['bad_a', 'bad_b', 'bad_b', 'bad_c']
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['C'] + bad_columns,
dtype=np.uint8)
expected = expected.astype({"C": np.int64})
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=['from_A'], columns=['A'],
sparse=sparse)
expected = DataFrame({'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]}, dtype=np.uint8)
expected[['C']] = df[['C']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep='..', sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A..a': [1, 0, 1],
'A..b': [0, 1, 0],
'B..b': [1, 1, 0],
'B..c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..', 'B': '__'},
sparse=sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=['too few'], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=['bad'], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1],
'C': [1, 2, 3]})
columns = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected[columns] = expected[columns].astype(np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True,
sparse=sparse, dtype=dtype).sort_index(axis=1)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_a': [1, 0, 1, 0],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_b': [1, 1, 0, 0],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]}).sort_index(axis=1)
e_dtype = self.effective_dtype(dtype)
columns = ['A_a', 'A_b', 'A_nan', 'B_b', 'B_c', 'B_nan']
expected[columns] = expected[columns].astype(e_dtype)
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
expected = DataFrame({'C': [1, 2, 3],
'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1],
'cat_x': [1, 0, 0],
'cat_y': [0, 1, 1]}).sort_index(axis=1)
columns = ['A_a', 'A_b', 'B_b', 'B_c', 'cat_x', 'cat_y']
effective_dtype = self.effective_dtype(dtype)
expected[columns] = expected[columns].astype(effective_dtype)
expected.sort_index(axis=1)
assert_frame_equal(result, expected)
def test_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list('aaa')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected = DataFrame(index=list('ABC'))
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_NA(self, sparse):
# Test NA hadling together with drop_first
s_NA = ['a', 'b', np.nan]
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
exp = DataFrame({'b': [0, 1, 0]}, dtype=np.uint8)
assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, drop_first=True,
sparse=sparse)
exp_na = DataFrame(
{'b': [0, 1, 0],
nan: [0, 0, 1]},
dtype=np.uint8).reindex(['b', nan], axis=1)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, drop_first=True,
sparse=sparse)
exp_just_na = DataFrame(index=np.arange(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'A_b': [0, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(
self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_b': [0, 1, 0],
'B_c': [0, 0, 1],
'cat_y': [0, 1, 1]})
cols = ['A_b', 'B_c', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'B_c', 'cat_y']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, drop_first=True,
sparse=sparse).sort_index(axis=1)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected.sort_index(axis=1)
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, drop_first=True,
sparse=sparse)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
def test_int_int(self):
data = Series([1, 2, 1])
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=[1, 2],
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(pd.Categorical(['a', 'b', 'a']))
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=pd.Categorical(['a', 'b']),
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
def test_int_df(self, dtype):
data = DataFrame(
{'A': [1, 2, 1],
'B': pd.Categorical(['a', 'b', 'a']),
'C': [1, 2, 1],
'D': [1., 2., 1.]
}
)
columns = ['C', 'D', 'A_1', 'A_2', 'B_a', 'B_b']
expected = DataFrame([
[1, 1., 1, 0, 1, 0],
[2, 2., 0, 1, 0, 1],
[1, 1., 1, 0, 1, 0]
], columns=columns)
expected[columns[2:]] = expected[columns[2:]].astype(dtype)
result = pd.get_dummies(data, columns=['A', 'B'], dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_preserve_categorical_dtype(self, dtype):
# GH13854
for ordered in [False, True]:
cat = pd.Categorical(list("xy"), categories=list("xyz"),
ordered=ordered)
result = get_dummies(cat, dtype=dtype)
data = np.array([[1, 0, 0], [0, 1, 0]],
dtype=self.effective_dtype(dtype))
cols = pd.CategoricalIndex(cat.categories,
categories=cat.categories,
ordered=ordered)
expected = DataFrame(data, columns=cols,
dtype=self.effective_dtype(dtype))
tm.assert_frame_equal(result, expected)
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cidx = pd.CategoricalIndex(list("xyz"), ordered=ordered)
midx = pd.MultiIndex(levels=[['a'], cidx],
labels=[[0, 0], [0, 1]])
df = DataFrame([[10, 11]], index=midx)
expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
index=midx, columns=cidx)
from pandas.core.reshape.reshape import make_axis_dummies
result = make_axis_dummies(df)
tm.assert_frame_equal(result, expected)
result = make_axis_dummies(df, transform=lambda x: x)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/lines_bars_and_markers/step_demo.py | 1 | 1283 | """
=========
Step Demo
=========
Example step plots.
"""
import numpy as np
from numpy import ma
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
x = np.arange(1, 7, 0.4)
y0 = np.sin(x)
y = y0.copy() + 2.5
plt.step(x, y, label='pre (default)')
y -= 0.5
plt.step(x, y, where='mid', label='mid')
y -= 0.5
plt.step(x, y, where='post', label='post')
y = ma.masked_where((y0 > -0.15) & (y0 < 0.15), y - 0.5)
plt.step(x, y, label='masked (pre)')
plt.legend()
plt.xlim(0, 7)
plt.ylim(-0.5, 4)
pltshow(plt)
| mit |
cmshobe/landlab | tests/components/normal_fault/test_normal_fault.py | 3 | 8582 | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from landlab import HexModelGrid, RasterModelGrid
from landlab.components import NormalFault
def test_dx_equals_zero():
"""Test a vertical fault trace."""
grid = RasterModelGrid((6, 6), xy_spacing=10)
grid.add_zeros("topographic__elevation", at="node")
param_dict = {
"faulted_surface": "topographic__elevation",
"fault_dip_angle": 90.0,
"fault_throw_rate_through_time": {"time": [0, 9, 10], "rate": [0, 0, 0.05]},
"fault_trace": {"y1": 0, "x1": 30, "y2": 30, "x2": 30},
"include_boundaries": True,
}
nf = NormalFault(grid, **param_dict)
out = np.array(
[
[True, True, True, False, False, False],
[True, True, True, False, False, False],
[True, True, True, False, False, False],
[True, True, True, False, False, False],
[True, True, True, False, False, False],
[True, True, True, False, False, False],
],
dtype=bool,
)
assert_array_equal(nf.faulted_nodes.reshape(grid.shape), out)
def test_anti_aximuth_greq_2pi():
"""Test anti azimuth over 2*pi."""
grid = RasterModelGrid((6, 6), xy_spacing=10)
grid.add_zeros("topographic__elevation", at="node")
param_dict = {
"faulted_surface": "topographic__elevation",
"fault_dip_angle": 90.0,
"fault_throw_rate_through_time": {"time": [0, 9, 10], "rate": [0, 0, 0.05]},
"fault_trace": {"y1": 30.0, "x1": 30.0, "y2": 20.0, "x2": 0.0},
"include_boundaries": True,
}
nf = NormalFault(grid, **param_dict)
assert nf._fault_anti_azimuth > 2.0 * np.pi
out = np.array(
[
[True, True, True, True, True, True],
[True, True, True, True, True, True],
[True, True, True, True, True, True],
[False, False, False, False, True, True],
[False, False, False, False, False, False],
[False, False, False, False, False, False],
],
dtype=bool,
)
assert_array_equal(nf.faulted_nodes.reshape(grid.shape), out)
def test_non_raster():
"""Test a hex model grid."""
grid = HexModelGrid((7, 3), spacing=10, xy_of_lower_left=(-15.0, 0.0))
grid.add_zeros("topographic__elevation", at="node")
param_dict = {
"faulted_surface": "topographic__elevation",
"fault_dip_angle": 90.0,
"fault_throw_rate_through_time": {"time": [0, 9, 10], "rate": [0, 0, 0.05]},
"fault_trace": {"y1": 30.0, "x1": 30.0, "y2": 20.0, "x2": 0.0},
"include_boundaries": True,
}
nf = NormalFault(grid, **param_dict)
# plotting, to test this. it works!
# import matplotlib.pyplot as plt
# plt.figure()
# imshow_grid(grid, nf.faulted_nodes, color_for_background='y')
# plt.plot(grid.x_of_node, grid.y_of_node, 'c.')
# plt.plot([param_dict['fault_trace']['x1'], param_dict['fault_trace']['x2']],
# [param_dict['fault_trace']['y1'], param_dict['fault_trace']['y2']], 'r')
# plt.show()
out = np.array(
[
True,
True,
True,
True,
True,
True,
True,
False,
True,
True,
True,
True,
False,
False,
False,
False,
True,
True,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
],
dtype=bool,
)
assert_array_equal(nf.faulted_nodes, out)
def test_dip_geq_90():
"""Test dip angles of >90 degrees."""
grid = RasterModelGrid((6, 6), xy_spacing=10)
grid.add_zeros("topographic__elevation", at="node")
with pytest.raises(ValueError):
NormalFault(grid, fault_dip_angle=90.001)
def test_uplifting_multiple_fields():
"""Test uplifting multiple fields with NormalFault."""
grid = RasterModelGrid((6, 6), xy_spacing=10)
grid.add_zeros("topographic__elevation", at="node")
zbr = grid.add_zeros("bedrock__elevation", at="node")
zbr -= 1.0
param_dict = {
"faulted_surface": ["topographic__elevation", "bedrock__elevation"],
"fault_dip_angle": 90.0,
"fault_throw_rate_through_time": {"time": [0, 9, 10], "rate": [1, 1, 1]},
"fault_trace": {"y1": 30.0, "x1": 30.0, "y2": 20.0, "x2": 0.0},
"include_boundaries": True,
}
nf = NormalFault(grid, **param_dict)
nf.run_one_step(dt=10)
elev = np.array(
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
10.0,
10.0,
10.0,
10.0,
0.0,
0.0,
10.0,
10.0,
10.0,
10.0,
0.0,
0.0,
0.0,
0.0,
0.0,
10.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
)
bedrock = np.array(
[
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
9.0,
9.0,
9.0,
9.0,
-1.0,
-1.0,
9.0,
9.0,
9.0,
9.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
9.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
-1.0,
]
)
assert_array_equal(grid.at_node["topographic__elevation"], elev)
assert_array_equal(grid.at_node["bedrock__elevation"], bedrock)
def test_uplifting_a_not_yet_created_field():
"""Test uplifting a field that does not exist with NormalFault."""
grid = RasterModelGrid((6, 6), xy_spacing=10)
grid.add_zeros("topographic__elevation", at="node")
zbr = grid.add_zeros("bedrock__elevation", at="node")
zbr -= 1.0
param_dict = {
"faulted_surface": [
"topographic__elevation",
"bedrock__elevation",
"spam",
"eggs",
],
"fault_dip_angle": 90.0,
"fault_throw_rate_through_time": {"time": [0, 9, 10], "rate": [1, 1, 1]},
"fault_trace": {"y1": 30.0, "x1": 30.0, "y2": 20.0, "x2": 0.0},
"include_boundaries": True,
}
assert "spam" not in grid.at_node
assert "eggs" not in grid.at_node
# instantiating NormalFault will not create spam or eggs
nf = NormalFault(grid, **param_dict)
assert "spam" not in grid.at_node
assert "eggs" not in grid.at_node
assert "spam" in nf._not_yet_instantiated
assert "eggs" in nf._not_yet_instantiated
# running NormalFault will not create spam or eggs
nf.run_one_step(dt=10)
assert "spam" not in grid.at_node
assert "eggs" not in grid.at_node
assert "spam" in nf._not_yet_instantiated
assert "eggs" in nf._not_yet_instantiated
# running NormalFault after adding spam and eggs will result in NormalFault
# modifying these fields.
grid.add_zeros("eggs", at="node")
grid.add_zeros("spam", at="node")
nf.run_one_step(dt=10)
assert "spam" in grid.at_node
assert "eggs" in grid.at_node
vals = np.array(
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
10.0,
10.0,
10.0,
10.0,
0.0,
0.0,
10.0,
10.0,
10.0,
10.0,
0.0,
0.0,
0.0,
0.0,
0.0,
10.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
)
assert_array_equal(grid.at_node["eggs"], vals)
assert_array_equal(grid.at_node["spam"], vals)
| mit |
asurunis/CrisisMappingToolkit | bin/test_active_contour.py | 1 | 8927 | # -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
import logging
logging.basicConfig(level=logging.ERROR)
try:
import cmt.ee_authenticate
except:
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import cmt.ee_authenticate
cmt.ee_authenticate.initialize()
import time
import cmt.domain
from cmt.radar.active_contour import *
import sys
import PIL
from PIL import ImageQt
import numpy
from PyQt4 import QtGui, QtCore
app = QtGui.QApplication(sys.argv)
#from PIL import Image, ImageChops
#import matplotlib.pyplot as plt
#plt.imread('/home/smcmich1/fileTest.tif')
#raise Exception('DEBUG')
THIS_FILE_FOLDER = os.path.dirname(os.path.realpath(__file__))
# DOMAIN SELECTION IS HERE!
#domain = cmt.domain.Domain(os.path.join(THIS_FILE_FOLDER, '..') + '/config/domains/uavsar/mississippi.xml')
#domain = cmt.domain.Domain(os.path.join(THIS_FILE_FOLDER, '..') + '/config/domains/sentinel1/malawi_2015_1.xml')
#domain = cmt.domain.Domain(os.path.join(THIS_FILE_FOLDER, '..') + '/config/domains/sentinel1/rome_small.xml')
#domain = cmt.domain.Domain(os.path.join(THIS_FILE_FOLDER, '..') + '/config/domains/skybox/malawi_2015.xml')
#domain = cmt.domain.Domain(os.path.join(THIS_FILE_FOLDER, '..') + '/config/domains/skybox/gloucester_2014_10.xml')
#domain = cmt.domain.Domain(os.path.join(THIS_FILE_FOLDER, '..') + '/config/domains/skybox/sumatra_2014_10.xml')
domain = cmt.domain.Domain(os.path.join(THIS_FILE_FOLDER, '..') + '/config/domains/skybox/new_bedford_2014_10.xml')
#result = active_contour(domain) # Run this to compute the final results!
def active_contour_step(local_image, snake, step):
'''Perform another step of the active contour algorithm'''
if snake.done:
return True
t = time.time()
if step % 10 == 0: # Do extra work every tenth iteration
snake.respace_nodes()
snake.shift_nodes() # shift before fixing geometry since reversal of orientation possible
snake.fix_geometry()
else:
snake.shift_nodes()
print time.time() - t
return False
class ActiveContourWindow(QtGui.QWidget):
'''Dedicated class for drawing the progress of the active contour algorithm'''
def __init__(self, domain):
super(ActiveContourWindow, self).__init__()
self.setGeometry(300, 300, 650, 650)
self.setWindowTitle('Active Contour')
self.domain = domain
#
## Fetch image and compute statistics
#sensor = domain.get_radar()
#detect_channel = domain.algorithm_params['water_detect_radar_channel']
#ee_image = sensor.image.select([detect_channel]).toUint16()
#if sensor.log_scale:
# statisics_image = ee_image.log10()
#else:
# statisics_image = ee_image
#(band_names, band_statistics) = compute_band_statistics(statisics_image, domain.ground_truth, domain.bounds)
#
#(self.local_image, self.snake) = initialize_active_contour(domain, ee_image, band_statistics, sensor.log_scale)
#
## Retrieve the local image bands and merge them into a fake RGB image
#channels = [self.local_image.get_image(detect_channel), self.local_image.get_image(detect_channel), self.local_image.get_image(detect_channel)]
#channel_images = [PIL.Image.fromarray(numpy.uint8(c*255/1200)) for c in channels] # Convert from 16 bit to 8 bit
#self.display_image = PIL.Image.merge('RGB', channel_images)
#self.step = 1
#self.show()
#
## Initialize the contour with the selected sensor band
#sensor_name = 'uavsar'
#sensor = getattr(domain, sensor_name)
#ee_image = sensor.image.select(['hh'])
#
## TODO: Make sure the name and statistics line up inside the class!
## Compute statistics for each band -> Log10 needs to be applied here!
#(band_names, band_statistics) = compute_band_statistics(statisics_image, domain.ground_truth, domain.bounds)
#
#(self.local_image, self.snake) = initialize_active_contour(domain, ee_image, band_statistics, sensor.log_scale)
#
## Retrieve the local image bands and merge them into a fake RGB image
##channels = [self.local_image.get_image('hh'), self.local_image.get_image('hv'), self.local_image.get_image('vv')]
#channels = [self.local_image.get_image('hh'), self.local_image.get_image('hh'), self.local_image.get_image('hh')]
#channel_images = [PIL.Image.fromarray(numpy.uint8(c >> 8)) for c in channels] # Convert from 16 bit to 8 bit
#self.display_image = PIL.Image.merge('RGB', channel_images)
#self.step = 1
#self.show()
SKYBOX_SCALE = 1200 / 256
train_domain = domain.training_domain # For skybox data there is probably no earlier image to train off of
try: # The Skybox data can be in one of two names
sensor = domain.skybox
trainSensor = train_domain.skybox
except:
sensor = domain.skybox_nir
trainSensor = train_domain.skybox_nir
ee_image = sensor.image.toUint16() # For Skybox, these are almost certainly the same image.
ee_image_train = trainSensor.image.toUint16()
if train_domain.training_features: # Train using features
(band_names, band_statistics) = compute_band_statistics_features(ee_image_train, train_domain.training_features)
else: # Train using training truth
(band_names, band_statistics) = compute_band_statistics(ee_image_train, train_domain.ground_truth, train_domain.bounds)
(self.local_image, self.snake) = initialize_active_contour(domain, ee_image, band_statistics, False)
# Retrieve the local image bands and merge them into a fake RGB image
channels = [self.local_image.get_image('Red'), self.local_image.get_image('Green'), self.local_image.get_image('Blue')]
channel_images = [PIL.Image.fromarray(numpy.uint8(c / SKYBOX_SCALE)) for c in channels] # Convert from Skybox range to 8 bit
self.display_image = PIL.Image.merge('RGB', channel_images)
self.step = 1
self.show()
def paintEvent(self, event):
imageqt = ImageQt.ImageQt(self.display_image)
p = QtGui.QPainter()
p.begin(self)
p.setRenderHint(QtGui.QPainter.Antialiasing, True);
scale = self.height() / float(imageqt.height() + 10)
p.scale(scale, scale)
p.translate((self.width() / 2 / scale - imageqt.width() / 2),
(self.height() / 2 / scale - imageqt.height() / 2))
p.fillRect(0, 0, imageqt.width(), imageqt.height(), QtGui.QColor(0, 0, 0))
p.drawImage(0, 0, imageqt)
NODE_RADIUS = 4
# draw nodes
for loop in self.snake.loops:
for i in range(len(loop.nodes)):
p.setPen(QtGui.QColor(255, 0, 0))
p.setBrush(QtGui.QBrush(QtGui.QColor(255, 0, 0)))
p.drawEllipse(loop.nodes[i][1] - NODE_RADIUS / 2.0,
loop.nodes[i][0] - NODE_RADIUS / 2.0, NODE_RADIUS, NODE_RADIUS)
# draw lines between nodes
for loop in self.snake.loops:
for i in range(len(loop.nodes)):
if len(loop.nodes) > 1:
n = i+1
if n == len(loop.nodes):
n = 0
p.setPen(QtGui.QColor(0, 255, 0))
p.drawLine(loop.nodes[i][1], loop.nodes[i][0], loop.nodes[n][1], loop.nodes[n][0])
p.end()
def keyPressEvent(self, event):
'''Update the algorithm on space, quit on "q"'''
if event.key() == QtCore.Qt.Key_Space:
active_contour_step(self.local_image, self.snake, self.step)
self.repaint()
self.step += 1
if event.key() == QtCore.Qt.Key_Q:
QtGui.QApplication.quit()
ex = ActiveContourWindow(domain)
sys.exit(app.exec_())
| apache-2.0 |
rbaghdadi/ISIR | utils/speedup_model/src/old/main.py | 1 | 2317 | from data_loader import *
from model import *
from model_bn import *
from fastai.basic_data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import fastai as fai
from fire import Fire
import matplotlib.pyplot as plt
import pickle
import seaborn as sns
def train_dev_split(dataset, batch_size, num_workers, maxsize, split_factor=10, seed=42):
indices = np.random.RandomState(seed=seed).permutation(maxsize)
val_indices, train_indices = indices[:maxsize//split_factor], indices[maxsize//split_factor:]
train_dl = DataLoader(DatasetFromHdf5(dataset, maxsize=len(train_indices)),
batch_size=batch_size,
sampler=SubsetRandomSampler(train_indices),
num_workers=num_workers)
val_dl = DataLoader(DatasetFromHdf5(dataset, maxsize=len(val_indices)),
batch_size=batch_size,
sampler=SubsetRandomSampler(val_indices),
num_workers=num_workers)
return train_dl, val_dl
def main(batch_size=2048, num_epochs=400,
num_workers=8, algorithm='adam',
maxsize=50000, new=True, dataset='data/speedup_dataset.h5',
batch_norm=False, filename='data/results.pkl',lr=0.001):
train_dl, val_dl = train_dev_split(dataset, batch_size, num_workers, maxsize)
input_size = train_dl.dataset.X.shape[1]
output_size = train_dl.dataset.Y.shape[1]
model_name = "model " + algorithm
if batch_norm:
model_name += " batch_norm"
model = load_model(input_size, output_size, model_name=model_name,filename=filename, batch_norm=batch_norm)
criterion = nn.MSELoss()
optimizer= optim.Adam(model.parameters(), lr=0.01)
if algorithm != 'adam':
optimizer = optim.SGD(model.parameters(), lr=0.01)
#scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
dl = {'train':train_dl, 'val': val_dl}
model, losses = train_model(model, criterion, optimizer, dl, num_epochs)
#pickle results
save_results(model_name, model, losses)
#plot_results(losses)
if __name__ == '__main__':
Fire()
| mit |
mrustl/flopy | examples/scripts/flopy_swi2_ex4.py | 1 | 12177 | from __future__ import print_function
import os
import platform
import sys
import math
import numpy as np
import flopy.modflow as mf
import flopy.utils as fu
import matplotlib.pyplot as plt
# --modify default matplotlib settings
updates = {'font.family':['Univers 57 Condensed', 'Arial'],
'mathtext.default':'regular',
'pdf.compression':0,
'pdf.fonttype':42,
'legend.fontsize':7,
'axes.labelsize':8,
'xtick.labelsize':7,
'ytick.labelsize':7}
plt.rcParams.update(updates)
def LegBar(ax, x0, y0, t0, dx, dy, dt, cc):
for c in cc:
ax.plot([x0, x0 + dx], [y0, y0], color=c, linewidth=4)
ctxt = '{0:=3d} years'.format(t0)
ax.text(x0 + 2. * dx, y0 + dy / 2., ctxt, size=5)
y0 += dy
t0 += dt
return
cleanFiles = False
skipRuns = False
fext = 'png'
narg = len(sys.argv)
iarg = 0
if narg > 1:
while iarg < narg - 1:
iarg += 1
basearg = sys.argv[iarg].lower()
if basearg == '-clean':
cleanFiles = True
elif basearg == '-skipruns':
skipRuns = True
elif basearg == '-pdf':
fext = 'pdf'
if cleanFiles:
print('cleaning all files')
print('excluding *.py files')
files = os.listdir('.')
for f in files:
if '.py' != os.path.splitext(f)[1].lower():
print(' removing...{}'.format(os.path.basename(f)))
os.remove(f)
sys.exit(1)
#Set path and name of MODFLOW exe
exe_name = 'mf2005'
if platform.system() == 'Windows':
exe_name = 'mf2005'
workspace = '.'
ncol = 61
nrow = 61
nlay = 2
nper = 3
perlen = [365.25 * 200., 365.25 * 12., 365.25 * 18.]
nstp = [1000, 120, 180]
save_head = [200, 60, 60]
steady=True
# dis data
delr, delc = 50.0, 50.0
botm = np.array([-10., -30., -50.])
# oc data
savewords = []
for i in range(0, nper):
icnt = 0
for j in range(0, nstp[i]):
icnt += 1
savebudget = False
savehead = False
if icnt == save_head[i]:
savebudget = True
savehead = True
icnt = 0
if (j + 1) == nstp[i]:
savebudget = True
if savebudget == True or savehead == True:
twords = [i + 1, j + 1]
if savebudget == True:
twords.append('pbudget')
if savehead == True:
twords.append('head')
savewords.append(twords)
modelname = 'swiex4_2d_2layer'
# bas data
# ibound - active except for the corners
ibound = np.ones((nlay, nrow, ncol), dtype= np.int)
ibound[:, 0, 0] = 0
ibound[:, 0, -1] = 0
ibound[:, -1, 0] = 0
ibound[:, -1, -1] = 0
# initial head data
ihead = np.zeros((nlay, nrow, ncol), dtype=np.float)
# lpf data
laytyp=0
hk=10.
vka=0.2
# boundary condition data
# ghb data
colcell, rowcell = np.meshgrid(np.arange(0, ncol), np.arange(0, nrow))
index = np.zeros((nrow, ncol), dtype=np.int)
index[:, :10] = 1
index[:, -10:] = 1
index[:10, :] = 1
index[-10:, :] = 1
nghb = np.sum(index)
lrchc = np.zeros((nghb, 5))
lrchc[:, 0] = 0
lrchc[:, 1] = rowcell[index == 1]
lrchc[:, 2] = colcell[index == 1]
lrchc[:, 3] = 0.
lrchc[:, 4] = 50.0 * 50.0 / 40.0
# create ghb dictionary
ghb_data = {0:lrchc}
# recharge data
rch = np.zeros((nrow, ncol), dtype=np.float)
rch[index == 0] = 0.0004
# create recharge dictionary
rch_data = {0: rch}
# well data
nwells = 2
lrcq = np.zeros((nwells, 4))
lrcq[0, :] = np.array((0, 30, 35, 0))
lrcq[1, :] = np.array([1, 30, 35, 0])
lrcqw = lrcq.copy()
lrcqw[0, 3] = -250
lrcqsw = lrcq.copy()
lrcqsw[0, 3] = -250.
lrcqsw[1, 3] = -25.
# create well dictionary
base_well_data = {0:lrcq, 1:lrcqw}
swwells_well_data = {0:lrcq, 1:lrcqw, 2:lrcqsw}
# swi2 data
adaptive = False
nadptmx = 10
nadptmn = 1
nu = [0, 0.025]
numult = 5.0
toeslope = nu[1] / numult #0.005
tipslope = nu[1] / numult #0.005
z1 = -10.0 * np.ones((nrow, ncol))
z1[index == 0] = -11.0
z = np.array([[z1, z1]])
iso = np.zeros((nlay, nrow, ncol), np.int)
iso[0, :, :][index == 0] = 1
iso[0, :, :][index == 1] = -2
iso[1, 30, 35] = 2
ssz=0.2
# swi2 observations
obsnam = ['layer1_', 'layer2_']
obslrc=[[0, 30, 35], [1, 30, 35]]
nobs = len(obsnam)
iswiobs = 1051
modelname = 'swiex4_s1'
if not skipRuns:
ml = mf.Modflow(modelname, version='mf2005', exe_name=exe_name, model_ws=workspace)
discret = mf.ModflowDis(ml, nlay=nlay, nrow=nrow, ncol=ncol, laycbd=0,
delr=delr, delc=delc, top=botm[0], botm=botm[1:],
nper=nper, perlen=perlen, nstp=nstp, steady=steady)
bas = mf.ModflowBas(ml, ibound=ibound, strt=ihead)
lpf = mf.ModflowLpf(ml, laytyp=laytyp, hk=hk, vka=vka)
wel = mf.ModflowWel(ml, stress_period_data=base_well_data)
ghb = mf.ModflowGhb(ml, stress_period_data=ghb_data)
rch = mf.ModflowRch(ml, rech=rch_data)
swi = mf.ModflowSwi2(ml, nsrf=1, istrat=1, toeslope=toeslope, tipslope=tipslope, nu=nu,
zeta=z, ssz=ssz, isource=iso, nsolver=1,
nadptmx=nadptmx, nadptmn=nadptmn,
nobs=nobs, iswiobs=iswiobs, obsnam=obsnam, obslrc=obslrc)
oc = mf.ModflowOc88(ml, words=savewords)
pcg = mf.ModflowPcg(ml, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50)
# create model files
ml.write_input()
# run the model
m = ml.run_model(silent=False)
# model with saltwater wells
modelname2 = 'swiex4_s2'
if not skipRuns:
ml2 = mf.Modflow(modelname2, version='mf2005', exe_name=exe_name, model_ws=workspace)
discret = mf.ModflowDis(ml2, nlay=nlay, nrow=nrow, ncol=ncol, laycbd=0,
delr=delr, delc=delc, top=botm[0], botm=botm[1:],
nper=nper, perlen=perlen, nstp=nstp, steady=steady)
bas = mf.ModflowBas(ml2, ibound=ibound, strt=ihead)
lpf = mf.ModflowLpf(ml2, laytyp=laytyp, hk=hk, vka=vka)
wel = mf.ModflowWel(ml2, stress_period_data=swwells_well_data)
ghb = mf.ModflowGhb(ml2, stress_period_data=ghb_data)
rch = mf.ModflowRch(ml2, rech=rch_data)
swi = mf.ModflowSwi2(ml2, nsrf=1, istrat=1, toeslope=toeslope, tipslope=tipslope, nu=nu,
zeta=z, ssz=ssz, isource=iso, nsolver=1,
nadptmx=nadptmx, nadptmn=nadptmn,
nobs=nobs, iswiobs=iswiobs, obsnam=obsnam, obslrc=obslrc)
oc = mf.ModflowOc88(ml2, words=savewords)
pcg = mf.ModflowPcg(ml2, hclose=1.0e-6, rclose=3.0e-3, mxiter=100, iter1=50)
# create model files
ml2.write_input()
# run the model
m = ml2.run_model(silent=False)
# Load the simulation 1 `ZETA` data and `ZETA` observations.
# read base model zeta
zfile = fu.CellBudgetFile(os.path.join(ml.model_ws, modelname+'.zta'))
kstpkper = zfile.get_kstpkper()
zeta = []
for kk in kstpkper:
zeta.append(zfile.get_data(kstpkper=kk, text='ZETASRF 1')[0])
zeta = np.array(zeta)
# read swi obs
zobs = np.genfromtxt(os.path.join(ml.model_ws, modelname+'.zobs'), names=True)
# Load the simulation 2 `ZETA` data and `ZETA` observations.
# read saltwater well model zeta
zfile2 = fu.CellBudgetFile(os.path.join(ml2.model_ws, modelname2+'.zta'))
kstpkper = zfile2.get_kstpkper()
zeta2 = []
for kk in kstpkper:
zeta2.append(zfile2.get_data(kstpkper=kk, text='ZETASRF 1')[0])
zeta2 = np.array(zeta2)
# read swi obs
zobs2 = np.genfromtxt(os.path.join(ml2.model_ws, modelname2+'.zobs'), names=True)
# Create arrays for the x-coordinates and the output years
x = np.linspace(-1500, 1500, 61)
xcell = np.linspace(-1500, 1500, 61) + delr / 2.
xedge = np.linspace(-1525, 1525, 62)
years = [40, 80, 120, 160, 200, 6, 12, 18, 24, 30]
# Define figure dimensions and colors used for plotting `ZETA` surfaces
# figure dimensions
fwid, fhgt = 8.00, 5.50
flft, frgt, fbot, ftop = 0.125, 0.95, 0.125, 0.925
# line color definition
icolor = 5
colormap = plt.cm.jet #winter
cc = []
cr = np.linspace(0.9, 0.0, icolor)
for idx in cr:
cc.append(colormap(idx))
# Recreate **Figure 9** from the SWI2 documentation (http://pubs.usgs.gov/tm/6a46/).
plt.rcParams.update({'legend.fontsize': 6, 'legend.frameon' : False})
fig = plt.figure(figsize=(fwid, fhgt), facecolor='w')
fig.subplots_adjust(wspace=0.25, hspace=0.25, left=flft, right=frgt, bottom=fbot, top=ftop)
# first plot
ax = fig.add_subplot(2, 2, 1)
# axes limits
ax.set_xlim(-1500, 1500)
ax.set_ylim(-50, -10)
for idx in range(5):
# layer 1
ax.plot(xcell, zeta[idx, 0, 30, :], drawstyle='steps-mid',
linewidth=0.5, color=cc[idx], label='{:2d} years'.format(years[idx]))
# layer 2
ax.plot(xcell, zeta[idx, 1, 30, :], drawstyle='steps-mid',
linewidth=0.5, color=cc[idx], label='_None')
ax.plot([-1500, 1500], [-30, -30], color='k', linewidth=1.0)
# legend
plt.legend(loc='lower left')
# axes labels and text
ax.set_xlabel('Horizontal distance, in meters')
ax.set_ylabel('Elevation, in meters')
ax.text(0.025, .55, 'Layer 1', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.025, .45, 'Layer 2', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.975, .1, 'Recharge conditions', transform=ax.transAxes, va='center', ha='right', size='8')
# second plot
ax = fig.add_subplot(2, 2, 2)
# axes limits
ax.set_xlim(-1500, 1500)
ax.set_ylim(-50, -10)
for idx in range(5, len(years)):
# layer 1
ax.plot(xcell, zeta[idx, 0, 30, :], drawstyle='steps-mid',
linewidth=0.5, color=cc[idx-5], label='{:2d} years'.format(years[idx]))
# layer 2
ax.plot(xcell, zeta[idx, 1, 30, :], drawstyle='steps-mid',
linewidth=0.5, color=cc[idx-5], label='_None')
ax.plot([-1500, 1500], [-30, -30], color='k', linewidth=1.0)
# legend
plt.legend(loc='lower left')
# axes labels and text
ax.set_xlabel('Horizontal distance, in meters')
ax.set_ylabel('Elevation, in meters')
ax.text(0.025, .55, 'Layer 1', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.025, .45, 'Layer 2', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.975, .1, 'Freshwater well withdrawal', transform=ax.transAxes, va='center', ha='right', size='8')
# third plot
ax = fig.add_subplot(2, 2, 3)
# axes limits
ax.set_xlim(-1500, 1500)
ax.set_ylim(-50, -10)
for idx in range(5, len(years)):
# layer 1
ax.plot(xcell, zeta2[idx, 0, 30, :], drawstyle='steps-mid',
linewidth=0.5, color=cc[idx-5], label='{:2d} years'.format(years[idx]))
# layer 2
ax.plot(xcell, zeta2[idx, 1, 30, :], drawstyle='steps-mid',
linewidth=0.5, color=cc[idx-5], label='_None')
ax.plot([-1500, 1500], [-30, -30], color='k', linewidth=1.0)
# legend
plt.legend(loc='lower left')
# axes labels and text
ax.set_xlabel('Horizontal distance, in meters')
ax.set_ylabel('Elevation, in meters')
ax.text(0.025, .55, 'Layer 1', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.025, .45, 'Layer 2', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.975, .1, 'Freshwater and saltwater\nwell withdrawals', transform=ax.transAxes,
va='center', ha='right', size='8')
# fourth plot
ax = fig.add_subplot(2, 2, 4)
# axes limits
ax.set_xlim(0, 30)
ax.set_ylim(-50, -10)
t = zobs['TOTIM'][999:] / 365 - 200.
tz2 = zobs['layer1_001'][999:]
tz3 = zobs2['layer1_001'][999:]
for i in range(len(t)):
if zobs['layer2_001'][i+999] < -30. - 0.1:
tz2[i] = zobs['layer2_001'][i+999]
if zobs2['layer2_001'][i+999] < 20. - 0.1:
tz3[i] = zobs2['layer2_001'][i+999]
ax.plot(t, tz2, linestyle='solid', color='r', linewidth=0.75, label='Freshwater well')
ax.plot(t, tz3, linestyle='dotted', color='r', linewidth=0.75, label='Freshwater and saltwater well')
ax.plot([0, 30], [-30, -30], 'k', linewidth=1.0, label='_None')
# legend
leg = plt.legend(loc='lower right', numpoints=1)
# axes labels and text
ax.set_xlabel('Time, in years')
ax.set_ylabel('Elevation, in meters')
ax.text(0.025, .55, 'Layer 1', transform=ax.transAxes, va='center', ha='left', size='7')
ax.text(0.025, .45, 'Layer 2', transform=ax.transAxes, va='center', ha='left', size='7')
outfig = 'Figure09_swi2ex4.{0}'.format(fext)
fig.savefig(outfig, dpi=300)
print('created...', outfig)
| bsd-3-clause |
alexchao56/sklearn-theano | sklearn_theano/feature_extraction/overfeat.py | 7 | 22567 | # Authors: Michael Eickenberg
# Kyle Kastner
# License: BSD 3 Clause
import os
import theano
# Required to avoid fuse errors... very strange
theano.config.floatX = 'float32'
import zipfile
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from .overfeat_class_labels import get_overfeat_class_label
from .overfeat_class_labels import get_all_overfeat_labels
from .overfeat_class_labels import get_all_overfeat_leaves
from ..datasets import get_dataset_dir, download
from ..base import (Convolution, MaxPool, PassThrough,
Standardize, ZeroPad, Relu, fuse)
from ..utils import check_tensor
# better get it from a config file
NETWORK_WEIGHTS_PATH = get_dataset_dir("overfeat_weights")
SMALL_NETWORK_WEIGHT_FILE = 'net_weight_0'
SMALL_NETWORK_FILTER_SHAPES = np.array([(96, 3, 11, 11),
(256, 96, 5, 5),
(512, 256, 3, 3),
(1024, 512, 3, 3),
(1024, 1024, 3, 3),
(3072, 1024, 6, 6),
(4096, 3072, 1, 1),
(1000, 4096, 1, 1)])
SMALL_NETWORK_BIAS_SHAPES = SMALL_NETWORK_FILTER_SHAPES[:, 0]
SMALL_NETWORK = (SMALL_NETWORK_WEIGHT_FILE,
SMALL_NETWORK_FILTER_SHAPES,
SMALL_NETWORK_BIAS_SHAPES)
LARGE_NETWORK_WEIGHT_FILE = 'net_weight_1'
LARGE_NETWORK_FILTER_SHAPES = np.array([(96, 3, 7, 7),
(256, 96, 7, 7),
(512, 256, 3, 3),
(512, 512, 3, 3),
(1024, 512, 3, 3),
(1024, 1024, 3, 3),
(4096, 1024, 5, 5),
(4096, 4096, 1, 1),
(1000, 4096, 1, 1)])
LARGE_NETWORK_BIAS_SHAPES = LARGE_NETWORK_FILTER_SHAPES[:, 0]
LARGE_NETWORK = (LARGE_NETWORK_WEIGHT_FILE,
LARGE_NETWORK_FILTER_SHAPES,
LARGE_NETWORK_BIAS_SHAPES)
def fetch_overfeat_weights_and_biases(large_network=False, weights_file=None):
network = LARGE_NETWORK if large_network else SMALL_NETWORK
fname, weight_shapes, bias_shapes = network
if weights_file is None:
weights_file = os.path.join(NETWORK_WEIGHTS_PATH, fname)
if not os.path.exists(weights_file):
url = "https://dl.dropboxusercontent.com/u/15378192/net_weights.zip"
if not os.path.exists(NETWORK_WEIGHTS_PATH):
os.makedirs(NETWORK_WEIGHTS_PATH)
full_path = os.path.join(NETWORK_WEIGHTS_PATH, "net_weights.zip")
if not os.path.exists(full_path):
download(url, full_path, progress_update_percentage=1)
zip_obj = zipfile.ZipFile(full_path, 'r')
zip_obj.extractall(NETWORK_WEIGHTS_PATH)
zip_obj.close()
memmap = np.memmap(weights_file, dtype=np.float32)
mempointer = 0
weights = []
biases = []
for weight_shape, bias_shape in zip(weight_shapes, bias_shapes):
filter_size = np.prod(weight_shape)
weights.append(
memmap[mempointer:mempointer + filter_size].reshape(weight_shape))
mempointer += filter_size
biases.append(memmap[mempointer:mempointer + bias_shape])
mempointer += bias_shape
return weights, biases
def _get_architecture(large_network=False, weights_and_biases=None,
detailed=False):
if weights_and_biases is None:
weights_and_biases = fetch_overfeat_weights_and_biases(large_network)
weights, biases = weights_and_biases
# flip weights to make Xcorr
ws = [w[:, :, ::-1, ::-1] for w in weights]
bs = biases
if large_network and not detailed:
architecture = [
Standardize(118.380948, 61.896913),
Convolution(ws[0], bs[0], subsample=(2, 2),
activation='relu'),
MaxPool((3, 3)),
Convolution(ws[1], bs[1], activation='relu'),
MaxPool((2, 2)),
Convolution(ws[2], bs[2],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
Convolution(ws[3], bs[3],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
Convolution(ws[4], bs[4],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
Convolution(ws[5], bs[5],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
MaxPool((3, 3)),
Convolution(ws[6], bs[6],
activation='relu'),
Convolution(ws[7], bs[7],
activation='relu'),
Convolution(ws[8], bs[8],
activation='identity')]
elif not large_network and not detailed:
architecture = [
Standardize(118.380948, 61.896913),
Convolution(ws[0], bs[0], subsample=(4, 4),
activation='relu'),
MaxPool((2, 2)),
Convolution(ws[1], bs[1], activation='relu'),
MaxPool((2, 2)),
Convolution(ws[2], bs[2],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
Convolution(ws[3], bs[3],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
Convolution(ws[4], bs[4],
activation='relu',
cropping=[(1, -1), (1, -1)],
border_mode='full'),
MaxPool((2, 2)),
Convolution(ws[5], bs[5],
activation='relu'),
Convolution(ws[6], bs[6],
activation='relu'),
Convolution(ws[7], bs[7],
activation='identity')]
elif large_network and detailed:
architecture = [
Standardize(118.380948, 61.896913),
Convolution(ws[0], bs[0], subsample=(2, 2),
activation=None),
Relu(),
MaxPool((3, 3)),
Convolution(ws[1], bs[1], activation=None),
Relu(),
MaxPool((2, 2)),
ZeroPad(1),
Convolution(ws[2], bs[2], activation=None),
Relu(),
ZeroPad(1),
Convolution(ws[3], bs[3], activation=None),
Relu(),
ZeroPad(1),
Convolution(ws[4], bs[4], activation=None),
Relu(),
ZeroPad(1),
Convolution(ws[5], bs[5], activation=None),
Relu(),
MaxPool((3, 3)),
Convolution(ws[6], bs[6], activation=None),
Relu(),
Convolution(ws[7], bs[7], activation=None),
Relu(),
Convolution(ws[8], bs[8], activation=None)
]
elif not large_network and detailed:
architecture = [
Standardize(118.380948, 61.896913),
Convolution(ws[0], bs[0], subsample=(4, 4), activation=None),
Relu(),
MaxPool((2, 2)),
Convolution(ws[1], bs[1], activation=None),
Relu(),
MaxPool((2, 2)),
ZeroPad(1),
Convolution(ws[2], bs[2], activation=None),
Relu(),
ZeroPad(1),
Convolution(ws[3], bs[3], activation=None),
Relu(),
ZeroPad(1),
Convolution(ws[4], bs[4], activation=None),
Relu(),
MaxPool((2, 2)),
Convolution(ws[5], bs[5], activation=None),
Relu(),
Convolution(ws[6], bs[6], activation=None),
Relu(),
Convolution(ws[7], bs[7], activation=None)
]
return architecture
def _get_fprop(large_network=False, output_layers=[-1], detailed=False):
arch = _get_architecture(large_network, detailed=detailed)
expressions, input_var = fuse(arch, output_expressions=output_layers,
input_dtype='float32')
fprop = theano.function([input_var], expressions)
return fprop
class OverfeatTransformer(BaseEstimator, TransformerMixin):
"""
A transformer/feature extractor for images using the OverFeat neural network.
Parameters
----------
large_network : boolean, optional (default=False)
Which network to use. If True, the transform will operate over X in
windows of 221x221 pixels. Otherwise, these windows will be 231x231.
output_layers : iterable, optional (default=[-1])
Which layers to return. Can be used to retrieve multiple levels of
output with a single call to transform.
force_reshape : boolean, optional (default=True)
Whether or not to force the output to be two dimensional. If true,
this class can be used as part of a scikit-learn pipeline.
force_reshape currently only supports len(output_layers) == 1!
detailed_network : boolean, optional (default=True)
If set to True, layers will be indexed and counted as in the binary
version provided by the authors of OverFeat. I.e. convolution, relu,
zero-padding, max-pooling are all separate layers. If False specified
then convolution and relu are one unit and zero-padding layers are
omitted.
batch_size : int, optional (default=None)
If set, input will be transformed in batches of size batch_size. This
can save memory at intermediate processing steps.
"""
def __init__(self, large_network=False, output_layers=[-1],
force_reshape=True,
transpose_order=(0, 3, 1, 2),
detailed_network=False,
batch_size=None):
self.large_network = large_network
self.output_layers = output_layers
self.force_reshape = force_reshape
self.transpose_order = transpose_order
self.transform_function = _get_fprop(self.large_network,
output_layers,
detailed=detailed_network)
self.batch_size = batch_size
def fit(self, X, y=None):
"""Passthrough for scikit-learn pipeline compatibility."""
return self
def transform(self, X):
"""
Transform a set of images.
Returns the features from each layer.
Parameters
----------
X : array-like, shape = [n_images, height, width, color]
or
shape = [height, width, color]
Returns
-------
T : array-like, shape = [n_images, n_features]
If force_reshape = False,
list of array-like, length output_layers,
each shape = [n_images, n_windows,
n_window_features]
Returns the features extracted for each of the n_images in X..
"""
X = check_tensor(X, dtype=np.float32, n_dim=4)
if self.batch_size is None:
if self.force_reshape:
return self.transform_function(X.transpose(
*self.transpose_order))[0].reshape((len(X), -1))
else:
return self.transform_function(
X.transpose(*self.transpose_order))
else:
XT = X.transpose(*self.transpose_order)
n_samples = XT.shape[0]
for i in range(0, n_samples, self.batch_size):
transformed_batch = self.transform_function(
XT[i:i + self.batch_size])
# at first iteration, initialize output arrays to correct size
if i == 0:
shapes = [(n_samples,) + t.shape[1:] for t in
transformed_batch]
ravelled_shapes = [np.prod(shp[1:]) for shp in shapes]
if self.force_reshape:
output_width = np.sum(ravelled_shapes)
output = np.empty((n_samples, output_width),
dtype=transformed_batch[0].dtype)
break_points = np.r_([0], np.cumsum(ravelled_shapes))
raw_output = [
output[:, start:stop] for start, stop in
zip(break_points[:-1], break_points[1:])]
else:
output = [np.empty(shape,
dtype=transformed_batch.dtype)
for shape in shapes]
raw_output = [arr.reshape(n_samples, -1)
for arr in output]
for transformed, out in zip(transformed_batch, raw_output):
out[i:i + batch_size] = transformed
return output
class OverfeatClassifier(BaseEstimator):
"""
A classifier for cropped images using the OverFeat neural network.
If large_network=True, this X will be cropped to the center
221x221 pixels. Otherwise, this cropped box will be 231x231.
Parameters
----------
large_network : boolean, optional (default=False)
Which network to use. If large_network = True, input will be cropped
to the center 221 x 221 pixels. Otherwise, input will be cropped to the
center 231 x 231 pixels.
top_n : integer, optional (default=5)
How many classes to return, based on sorted class probabilities.
output_strings : boolean, optional (default=True)
Whether to return class strings or integer classes. Returns class
strings by default.
Attributes
----------
crop_bounds_ : tuple, (x_left, x_right, y_lower, y_upper)
The coordinate boundaries of the cropping box used.
"""
def __init__(self, top_n=5, large_network=False, output_strings=True,
transpose_order=(0, 3, 1, 2)):
self.top_n = top_n
self.large_network = large_network
if self.large_network:
self.min_size = (221, 221)
else:
self.min_size = (231, 231)
self.output_strings = output_strings
self.transpose_order = transpose_order
self.transform_function = _get_fprop(self.large_network, [-1])
def fit(self, X, y=None):
"""Passthrough for scikit-learn pipeline compatibility."""
return self
def _predict_proba(self, X):
x_midpoint = X.shape[2] // 2
y_midpoint = X.shape[1] // 2
x_lower_bound = x_midpoint - self.min_size[0] // 2
if x_lower_bound <= 0:
x_lower_bound = 0
x_upper_bound = x_lower_bound + self.min_size[0]
y_lower_bound = y_midpoint - self.min_size[1] // 2
if y_lower_bound <= 0:
y_lower_bound = 0
y_upper_bound = y_lower_bound + self.min_size[1]
self.crop_bounds_ = (x_lower_bound, x_upper_bound, y_lower_bound,
y_upper_bound)
res = self.transform_function(
X[:, y_lower_bound:y_upper_bound,
x_lower_bound:x_upper_bound, :].transpose(
*self.transpose_order))[0]
# Softmax activation
exp_res = np.exp(res - res.max(axis=1))
exp_res /= np.sum(exp_res, axis=1)
return exp_res
def predict(self, X):
"""
Classify a set of cropped input images.
Returns the top_n classes.
Parameters
----------
X : array-like, shape = [n_images, height, width, color]
or
shape = [height, width, color]
Returns
-------
T : array-like, shape = [n_images, top_n]
Returns the top_n classes for each of the n_images in X.
If output_strings is True, then the result will be string
description of the class label.
Otherwise, the returned values will be the integer class label.
"""
X = check_tensor(X, dtype=np.float32, n_dim=4)
res = self._predict_proba(X)[:, :, 0, 0]
indices = np.argsort(res, axis=1)
indices = indices[:, -self.top_n:]
if self.output_strings:
class_strings = np.empty_like(indices,
dtype=object)
for index, value in enumerate(indices.flat):
class_strings.flat[index] = get_overfeat_class_label(value)
return class_strings
else:
return indices
def predict_proba(self, X):
"""
Prediction probability for a set of cropped input images.
Returns the top_n probabilities.
Parameters
----------
X : array-like, shape = [n_images, height, width, color]
or
shape = [height, width, color]
Returns
-------
T : array-like, shape = [n_images, top_n]
Returns the top_n probabilities for each of the n_images in X.
"""
X = check_tensor(X, dtype=np.float32, n_dim=4)
res = self._predict_proba(X)[:, :, 0, 0]
return np.sort(res, axis=1)[:, -self.top_n:]
class OverfeatLocalizer(BaseEstimator):
"""
A localizer for single images using the OverFeat neural network.
If large_network=True, this X will be cropped to the center
221x221 pixels. Otherwise, this box will be 231x231.
Parameters
----------
match_strings : iterable of strings
An iterable of class names to match with localizer. Can be a full
ImageNet class string or a WordNet leaf such as 'dog.n.01'. If the
pattern '.n.' is found in the match string, it will be treated as a
WordNet leaf, otherwise the string is assumed to be a class label.
large_network : boolean, optional (default=False)
Which network to use. If True, the transform will operate over X in
windows of 221x221 pixels. Otherwise, these windows will be 231x231.
top_n : integer, optional (default=5)
How many classes to return, based on sorted class probabilities.
output_strings : boolean, optional (default=True)
Whether to return class strings or integer classes. Returns class
strings by default.
"""
def __init__(self, match_strings, top_n=5, large_network=False,
transpose_order=(2, 0, 1)):
self.top_n = top_n
self.large_network = large_network
if self.large_network:
self.min_size = (221, 221)
else:
self.min_size = (231, 231)
self.match_strings = match_strings
self.transpose_order = transpose_order
self.transform_function = _get_fprop(self.large_network, [-1])
def fit(self, X, y=None):
"""Passthrough for scikit-learn pipeline compatibility."""
return self
def predict(self, X):
"""
Localize an input image.
Returns the points where the top_n classes contains any of the
match_strings.
Parameters
----------
X : array-like, shape = [height, width, color]
Returns
-------
T : list of array-likes, each of shape = [n_points, 2]
For each string in match_strings, points where that string was
in the top_n classes. len(T) will be equal to len(match_strings).
Each array in T is of size n_points x 2, where column 0 is
x point coordinate and column 1 is y point coordinate.
This means that an entry in T can be plotted with
plt.scatter(T[i][:, 0], T[i][:, 1])
"""
X = check_tensor(X, dtype=np.float32, n_dim=3)
res = self.transform_function(X.transpose(
*self.transpose_order)[None])[0]
# Softmax activation
exp_res = np.exp(res - res.max(axis=1))
exp_res /= np.sum(exp_res, axis=1)
indices = np.argsort(exp_res, axis=1)[:, -self.top_n:, :, :]
height = X.shape[0]
width = X.shape[1]
x_bound = width - self.min_size[0]
y_bound = height - self.min_size[1]
n_y = indices.shape[2]
n_x = indices.shape[3]
x_points = np.linspace(0, x_bound, n_x).astype('int32')
y_points = np.linspace(0, y_bound, n_y).astype('int32')
x_points = x_points + self.min_size[0] // 2
y_points = y_points + self.min_size[1] // 2
xx, yy = np.meshgrid(x_points, y_points)
per_window_labels = indices[0]
per_window_labels = per_window_labels.reshape(len(per_window_labels),
-1)
all_matches = []
overfeat_leaves = get_all_overfeat_leaves()
for match_string in self.match_strings:
if '.n.' in match_string:
# We were provided a wordnet category and must conglomerate
# points
all_match_labels = overfeat_leaves[match_string]
overfeat_labels = get_all_overfeat_labels()
match_indices = np.array(([overfeat_labels.index(s)
for s in all_match_labels]))
match_indices = np.unique(match_indices)
matches = np.where(
np.in1d(per_window_labels, match_indices).reshape(
per_window_labels.shape) == True)[1]
all_matches.append(np.vstack((xx.flat[matches],
yy.flat[matches])).T)
else:
# Asssume this is an OverFeat class
match_index = get_all_overfeat_labels().index(match_string)
matches = np.where(per_window_labels == match_index)[1]
all_matches.append(np.vstack((xx.flat[matches],
yy.flat[matches])).T)
return all_matches
| bsd-3-clause |
salbrandi/patella | patella/click_commands.py | 1 | 3221 | # -*- coding: utf-8 -*-
"""
Controls command line operations
The only particularly relevant command now i: patella startup <path>
not all commands retain functionality - this will be updated eventually (read: it might not be)
"""
# \/ Third-Party Packages \/
import os
import os.path
import click
import pandas as pd
# \/ Local Packages \/
from . import htmlparser as htmlparser
from . import patellaserver as flaskapp
class filec:
pass
file1 = filec()
file2 = filec()
file1.df = file2.df = pd.DataFrame({'foo': []})
file1.path = file2.path = ''
file1.name = file2.name = ''
@click.group()
def patella():
pass
@click.command()
@click.argument('url')
@click.option('--filename', default='datafile', help='specify the name of the local file that will be downloaded to the current directory')
@click.option('--filetype', default='.csv', help='specify the file type the scraper will look for')
def scrape_url(url, filetype, filename):
parseobj = htmlparser.find_download_links(url, filetype, filename, download=True)
if type(parseobj) != 'NoneType':
click.echo('ERROR: ' + parseobj['error']) # Error reporting
@click.command()
@click.argument('file_one')
@click.option('--delimiters', default=',:,', help='Specify file type delimiters in format <DELIM>:<DELIM2>')
def load_data(file_one, delimiters):
file1.path = os.getcwd() + '/' + file_one
if os.path.exists(file1.path):
file1.name = file_one
list_delims = delimiters.split(':')
if len(list_delims) == 2:
file1.df = pd.read_table(file1.path, list_delims[0], header=0)
file2.df = htmlparser.get_fe()
os.environ['LOCAL_FILE_PATH'] = file1.path
click.echo('file successfully loaded into Dataframes')
else:
if not os.path.exists(file1.path):
click.echo('no files found with the name ' + file_one + ' in path ' + file1.path)
@click.command()
@click.argument('column')
@click.argument('filename')
def change_index(filename, column):
if filename == file1:
file1.df.set_index(column)
else:
click.echo('no file found with that name')
@click.command()
@click.argument('column_names')
@click.argument('file')
def change_names(file, column_names):
pass
@click.command()
@click.argument('path')
def startserver(path):
flaskapp.startserver(path)
@click.command()
@click.argument('file')
@click.argument('col')
@click.option('--title', default=' ', help='specify the plot title')
@click.option('--x_title', default=' ', help='specify the X axis title')
@click.option('--y_title', default=' ', help='specify the Y axis title')
def plot(file, col, title, x_title, y_title):
file1.path = os.getcwd() + '/data/' + file
file1.df = pd.read_table(file1.path, ',', header=0)
htmlparser.compare(file1.df, htmlparser.get_fe(), col, title, x_title, y_title)
# A test cli command
@click.command()
@click.argument('foo')
def testme(foo):
pass
# add all the subcommands to the patella group
patella.add_command(scrape_url, name='scrape')
patella.add_command(testme, name='test')
patella.add_command(plot)
patella.add_command(load_data, name='load')
patella.add_command(startserver, name='startup') | mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/traitlets/config/loader.py | 13 | 28772 | # encoding: utf-8
"""A simple configuration system."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import argparse
import copy
import logging
import os
import re
import sys
import json
from ast import literal_eval
from ipython_genutils.path import filefind
from ipython_genutils import py3compat
from ipython_genutils.encoding import DEFAULT_ENCODING
from six import text_type
from traitlets.traitlets import HasTraits, List, Any
#-----------------------------------------------------------------------------
# Exceptions
#-----------------------------------------------------------------------------
class ConfigError(Exception):
pass
class ConfigLoaderError(ConfigError):
pass
class ConfigFileNotFound(ConfigError):
pass
class ArgumentError(ConfigLoaderError):
pass
#-----------------------------------------------------------------------------
# Argparse fix
#-----------------------------------------------------------------------------
# Unfortunately argparse by default prints help messages to stderr instead of
# stdout. This makes it annoying to capture long help screens at the command
# line, since one must know how to pipe stderr, which many users don't know how
# to do. So we override the print_help method with one that defaults to
# stdout and use our class instead.
class ArgumentParser(argparse.ArgumentParser):
"""Simple argparse subclass that prints help to stdout by default."""
def print_help(self, file=None):
if file is None:
file = sys.stdout
return super(ArgumentParser, self).print_help(file)
print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
#-----------------------------------------------------------------------------
# Config class for holding config information
#-----------------------------------------------------------------------------
class LazyConfigValue(HasTraits):
"""Proxy object for exposing methods on configurable containers
Exposes:
- append, extend, insert on lists
- update on dicts
- update, add on sets
"""
_value = None
# list methods
_extend = List()
_prepend = List()
def append(self, obj):
self._extend.append(obj)
def extend(self, other):
self._extend.extend(other)
def prepend(self, other):
"""like list.extend, but for the front"""
self._prepend[:0] = other
_inserts = List()
def insert(self, index, other):
if not isinstance(index, int):
raise TypeError("An integer is required")
self._inserts.append((index, other))
# dict methods
# update is used for both dict and set
_update = Any()
def update(self, other):
if self._update is None:
if isinstance(other, dict):
self._update = {}
else:
self._update = set()
self._update.update(other)
# set methods
def add(self, obj):
self.update({obj})
def get_value(self, initial):
"""construct the value from the initial one
after applying any insert / extend / update changes
"""
if self._value is not None:
return self._value
value = copy.deepcopy(initial)
if isinstance(value, list):
for idx, obj in self._inserts:
value.insert(idx, obj)
value[:0] = self._prepend
value.extend(self._extend)
elif isinstance(value, dict):
if self._update:
value.update(self._update)
elif isinstance(value, set):
if self._update:
value.update(self._update)
self._value = value
return value
def to_dict(self):
"""return JSONable dict form of my data
Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
"""
d = {}
if self._update:
d['update'] = self._update
if self._extend:
d['extend'] = self._extend
if self._prepend:
d['prepend'] = self._prepend
elif self._inserts:
d['inserts'] = self._inserts
return d
def _is_section_key(key):
"""Is a Config key a section name (does it start with a capital)?"""
if key and key[0].upper()==key[0] and not key.startswith('_'):
return True
else:
return False
class Config(dict):
"""An attribute based dict that can do smart merges."""
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self._ensure_subconfig()
def _ensure_subconfig(self):
"""ensure that sub-dicts that should be Config objects are
casts dicts that are under section keys to Config objects,
which is necessary for constructing Config objects from dict literals.
"""
for key in self:
obj = self[key]
if _is_section_key(key) \
and isinstance(obj, dict) \
and not isinstance(obj, Config):
setattr(self, key, Config(obj))
def _merge(self, other):
"""deprecated alias, use Config.merge()"""
self.merge(other)
def merge(self, other):
"""merge another config object into this one"""
to_update = {}
for k, v in other.items():
if k not in self:
to_update[k] = v
else: # I have this key
if isinstance(v, Config) and isinstance(self[k], Config):
# Recursively merge common sub Configs
self[k].merge(v)
else:
# Plain updates for non-Configs
to_update[k] = v
self.update(to_update)
def collisions(self, other):
"""Check for collisions between two config objects.
Returns a dict of the form {"Class": {"trait": "collision message"}}`,
indicating which values have been ignored.
An empty dict indicates no collisions.
"""
collisions = {}
for section in self:
if section not in other:
continue
mine = self[section]
theirs = other[section]
for key in mine:
if key in theirs and mine[key] != theirs[key]:
collisions.setdefault(section, {})
collisions[section][key] = "%r ignored, using %r" % (mine[key], theirs[key])
return collisions
def __contains__(self, key):
# allow nested contains of the form `"Section.key" in config`
if '.' in key:
first, remainder = key.split('.', 1)
if first not in self:
return False
return remainder in self[first]
return super(Config, self).__contains__(key)
# .has_key is deprecated for dictionaries.
has_key = __contains__
def _has_section(self, key):
return _is_section_key(key) and key in self
def copy(self):
return type(self)(dict.copy(self))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
new_config = type(self)()
for key, value in self.items():
if isinstance(value, (Config, LazyConfigValue)):
# deep copy config objects
value = copy.deepcopy(value, memo)
elif type(value) in {dict, list, set, tuple}:
# shallow copy plain container traits
value = copy.copy(value)
new_config[key] = value
return new_config
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
if _is_section_key(key):
c = Config()
dict.__setitem__(self, key, c)
return c
elif not key.startswith('_'):
# undefined, create lazy value, used for container methods
v = LazyConfigValue()
dict.__setitem__(self, key, v)
return v
else:
raise KeyError
def __setitem__(self, key, value):
if _is_section_key(key):
if not isinstance(value, Config):
raise ValueError('values whose keys begin with an uppercase '
'char must be Config instances: %r, %r' % (key, value))
dict.__setitem__(self, key, value)
def __getattr__(self, key):
if key.startswith('__'):
return dict.__getattr__(self, key)
try:
return self.__getitem__(key)
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, key, value):
if key.startswith('__'):
return dict.__setattr__(self, key, value)
try:
self.__setitem__(key, value)
except KeyError as e:
raise AttributeError(e)
def __delattr__(self, key):
if key.startswith('__'):
return dict.__delattr__(self, key)
try:
dict.__delitem__(self, key)
except KeyError as e:
raise AttributeError(e)
#-----------------------------------------------------------------------------
# Config loading classes
#-----------------------------------------------------------------------------
class ConfigLoader(object):
"""A object for loading configurations from just about anywhere.
The resulting configuration is packaged as a :class:`Config`.
Notes
-----
A :class:`ConfigLoader` does one thing: load a config from a source
(file, command line arguments) and returns the data as a :class:`Config` object.
There are lots of things that :class:`ConfigLoader` does not do. It does
not implement complex logic for finding config files. It does not handle
default values or merge multiple configs. These things need to be
handled elsewhere.
"""
def _log_default(self):
from traitlets.log import get_logger
return get_logger()
def __init__(self, log=None):
"""A base class for config loaders.
log : instance of :class:`logging.Logger` to use.
By default loger of :meth:`traitlets.config.application.Application.instance()`
will be used
Examples
--------
>>> cl = ConfigLoader()
>>> config = cl.load_config()
>>> config
{}
"""
self.clear()
if log is None:
self.log = self._log_default()
self.log.debug('Using default logger')
else:
self.log = log
def clear(self):
self.config = Config()
def load_config(self):
"""Load a config from somewhere, return a :class:`Config` instance.
Usually, this will cause self.config to be set and then returned.
However, in most cases, :meth:`ConfigLoader.clear` should be called
to erase any previous state.
"""
self.clear()
return self.config
class FileConfigLoader(ConfigLoader):
"""A base class for file based configurations.
As we add more file based config loaders, the common logic should go
here.
"""
def __init__(self, filename, path=None, **kw):
"""Build a config loader for a filename and path.
Parameters
----------
filename : str
The file name of the config file.
path : str, list, tuple
The path to search for the config file on, or a sequence of
paths to try in order.
"""
super(FileConfigLoader, self).__init__(**kw)
self.filename = filename
self.path = path
self.full_filename = ''
def _find_file(self):
"""Try to find the file by searching the paths."""
self.full_filename = filefind(self.filename, self.path)
class JSONFileConfigLoader(FileConfigLoader):
"""A JSON file loader for config
Can also act as a context manager that rewrite the configuration file to disk on exit.
Example::
with JSONFileConfigLoader('myapp.json','/home/jupyter/configurations/') as c:
c.MyNewConfigurable.new_value = 'Updated'
"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
dct = self._read_file_as_dict()
self.config = self._convert_to_config(dct)
return self.config
def _read_file_as_dict(self):
with open(self.full_filename) as f:
return json.load(f)
def _convert_to_config(self, dictionary):
if 'version' in dictionary:
version = dictionary.pop('version')
else:
version = 1
if version == 1:
return Config(dictionary)
else:
raise ValueError('Unknown version of JSON config file: {version}'.format(version=version))
def __enter__(self):
self.load_config()
return self.config
def __exit__(self, exc_type, exc_value, traceback):
"""
Exit the context manager but do not handle any errors.
In case of any error, we do not want to write the potentially broken
configuration to disk.
"""
self.config.version = 1
json_config = json.dumps(self.config, indent=2)
with open(self.full_filename, 'w') as f:
f.write(json_config)
class PyFileConfigLoader(FileConfigLoader):
"""A config loader for pure python files.
This is responsible for locating a Python config file by filename and
path, then executing it to construct a Config object.
"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
self._read_file_as_dict()
return self.config
def load_subconfig(self, fname, path=None):
"""Injected into config file namespace as load_subconfig"""
if path is None:
path = self.path
loader = self.__class__(fname, path)
try:
sub_config = loader.load_config()
except ConfigFileNotFound:
# Pass silently if the sub config is not there,
# treat it as an empty config file.
pass
else:
self.config.merge(sub_config)
def _read_file_as_dict(self):
"""Load the config file into self.config, with recursive loading."""
def get_config():
"""Unnecessary now, but a deprecation warning is more trouble than it's worth."""
return self.config
namespace = dict(
c=self.config,
load_subconfig=self.load_subconfig,
get_config=get_config,
__file__=self.full_filename,
)
fs_encoding = sys.getfilesystemencoding() or 'ascii'
conf_filename = self.full_filename.encode(fs_encoding)
py3compat.execfile(conf_filename, namespace)
class CommandLineConfigLoader(ConfigLoader):
"""A config loader for command line arguments.
As we add more command line based loaders, the common logic should go
here.
"""
def _exec_config_str(self, lhs, rhs):
"""execute self.config.<lhs> = <rhs>
* expands ~ with expanduser
* tries to assign with literal_eval, otherwise assigns with just the string,
allowing `--C.a=foobar` and `--C.a="foobar"` to be equivalent. *Not*
equivalent are `--C.a=4` and `--C.a='4'`.
"""
rhs = os.path.expanduser(rhs)
try:
# Try to see if regular Python syntax will work. This
# won't handle strings as the quote marks are removed
# by the system shell.
value = literal_eval(rhs)
except (NameError, SyntaxError, ValueError):
# This case happens if the rhs is a string.
value = rhs
exec(u'self.config.%s = value' % lhs)
def _load_flag(self, cfg):
"""update self.config from a flag, which can be a dict or Config"""
if isinstance(cfg, (dict, Config)):
# don't clobber whole config sections, update
# each section from config:
for sec,c in cfg.items():
self.config[sec].update(c)
else:
raise TypeError("Invalid flag: %r" % cfg)
# raw --identifier=value pattern
# but *also* accept '-' as wordsep, for aliases
# accepts: --foo=a
# --Class.trait=value
# --alias-name=value
# rejects: -foo=value
# --foo
# --Class.trait
kv_pattern = re.compile(r'\-\-[A-Za-z][\w\-]*(\.[\w\-]+)*\=.*')
# just flags, no assignments, with two *or one* leading '-'
# accepts: --foo
# -foo-bar-again
# rejects: --anything=anything
# --two.word
flag_pattern = re.compile(r'\-\-?\w+[\-\w]*$')
class KeyValueConfigLoader(CommandLineConfigLoader):
"""A config loader that loads key value pairs from the command line.
This allows command line options to be gives in the following form::
ipython --profile="foo" --InteractiveShell.autocall=False
"""
def __init__(self, argv=None, aliases=None, flags=None, **kw):
"""Create a key value pair config loader.
Parameters
----------
argv : list
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then sys.argv[1:] will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Vaues can be Config objects,
dicts, or "key=value" strings. If Config or dict, when the flag
is triggered, The flag is loaded as `self.config.update(m)`.
Returns
-------
config : Config
The resulting Config object.
Examples
--------
>>> from traitlets.config.loader import KeyValueConfigLoader
>>> cl = KeyValueConfigLoader()
>>> d = cl.load_config(["--A.name='brian'","--B.number=0"])
>>> sorted(d.items())
[('A', {'name': 'brian'}), ('B', {'number': 0})]
"""
super(KeyValueConfigLoader, self).__init__(**kw)
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
def clear(self):
super(KeyValueConfigLoader, self).clear()
self.extra_args = []
def _decode_argv(self, argv, enc=None):
"""decode argv if bytes, using stdin.encoding, falling back on default enc"""
uargv = []
if enc is None:
enc = DEFAULT_ENCODING
for arg in argv:
if not isinstance(arg, text_type):
# only decode if not already decoded
arg = arg.decode(enc)
uargv.append(arg)
return uargv
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse the configuration and generate the Config object.
After loading, any arguments that are not key-value or
flags will be stored in self.extra_args - a list of
unparsed command-line arguments. This is used for
arguments such as input files or subcommands.
Parameters
----------
argv : list, optional
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then self.argv will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Values can be Config objects
or dicts. When the flag is triggered, The config is loaded as
`self.config.update(cfg)`.
"""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
# ensure argv is a list of unicode strings:
uargv = self._decode_argv(argv)
for idx,raw in enumerate(uargv):
# strip leading '-'
item = raw.lstrip('-')
if raw == '--':
# don't parse arguments after '--'
# this is useful for relaying arguments to scripts, e.g.
# ipython -i foo.py --matplotlib=qt -- args after '--' go-to-foo.py
self.extra_args.extend(uargv[idx+1:])
break
if kv_pattern.match(raw):
lhs,rhs = item.split('=',1)
# Substitute longnames for aliases.
if lhs in aliases:
lhs = aliases[lhs]
if '.' not in lhs:
# probably a mistyped alias, but not technically illegal
self.log.warning("Unrecognized alias: '%s', it will probably have no effect.", raw)
try:
self._exec_config_str(lhs, rhs)
except Exception:
raise ArgumentError("Invalid argument: '%s'" % raw)
elif flag_pattern.match(raw):
if item in flags:
cfg,help = flags[item]
self._load_flag(cfg)
else:
raise ArgumentError("Unrecognized flag: '%s'"%raw)
elif raw.startswith('-'):
kv = '--'+item
if kv_pattern.match(kv):
raise ArgumentError("Invalid argument: '%s', did you mean '%s'?"%(raw, kv))
else:
raise ArgumentError("Invalid argument: '%s'"%raw)
else:
# keep all args that aren't valid in a list,
# in case our parent knows what to do with them.
self.extra_args.append(item)
return self.config
class ArgParseConfigLoader(CommandLineConfigLoader):
"""A loader that uses the argparse module to load from the command line."""
def __init__(self, argv=None, aliases=None, flags=None, log=None, *parser_args, **parser_kw):
"""Create a config loader for use with argparse.
Parameters
----------
argv : optional, list
If given, used to read command-line arguments from, otherwise
sys.argv[1:] is used.
parser_args : tuple
A tuple of positional arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
parser_kw : dict
A tuple of keyword arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
Returns
-------
config : Config
The resulting Config object.
"""
super(CommandLineConfigLoader, self).__init__(log=log)
self.clear()
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
self.parser_args = parser_args
self.version = parser_kw.pop("version", None)
kwargs = dict(argument_default=argparse.SUPPRESS)
kwargs.update(parser_kw)
self.parser_kw = kwargs
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse command line arguments and return as a Config object.
Parameters
----------
args : optional, list
If given, a list with the structure of sys.argv[1:] to parse
arguments from. If not given, the instance's self.argv attribute
(given at construction time) is used."""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
self._create_parser(aliases, flags)
self._parse_args(argv)
self._convert_to_config()
return self.config
def get_extra_args(self):
if hasattr(self, 'extra_args'):
return self.extra_args
else:
return []
def _create_parser(self, aliases=None, flags=None):
self.parser = ArgumentParser(*self.parser_args, **self.parser_kw)
self._add_arguments(aliases, flags)
def _add_arguments(self, aliases=None, flags=None):
raise NotImplementedError("subclasses must implement _add_arguments")
def _parse_args(self, args):
"""self.parser->self.parsed_data"""
# decode sys.argv to support unicode command-line options
enc = DEFAULT_ENCODING
uargs = [py3compat.cast_unicode(a, enc) for a in args]
self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs)
def _convert_to_config(self):
"""self.parsed_data->self.config"""
for k, v in vars(self.parsed_data).items():
exec("self.config.%s = v"%k, locals(), globals())
class KVArgParseConfigLoader(ArgParseConfigLoader):
"""A config loader that loads aliases and flags with argparse,
but will use KVLoader for the rest. This allows better parsing
of common args, such as `ipython -c 'print 5'`, but still gets
arbitrary config with `ipython --InteractiveShell.use_readline=False`"""
def _add_arguments(self, aliases=None, flags=None):
self.alias_flags = {}
# print aliases, flags
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
paa = self.parser.add_argument
for key,value in aliases.items():
if key in flags:
# flags
nargs = '?'
else:
nargs = None
if len(key) is 1:
paa('-'+key, '--'+key, type=text_type, dest=value, nargs=nargs)
else:
paa('--'+key, type=text_type, dest=value, nargs=nargs)
for key, (value, help) in flags.items():
if key in self.aliases:
#
self.alias_flags[self.aliases[key]] = value
continue
if len(key) is 1:
paa('-'+key, '--'+key, action='append_const', dest='_flags', const=value)
else:
paa('--'+key, action='append_const', dest='_flags', const=value)
def _convert_to_config(self):
"""self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
# remove subconfigs list from namespace before transforming the Namespace
if '_flags' in self.parsed_data:
subcs = self.parsed_data._flags
del self.parsed_data._flags
else:
subcs = []
for k, v in vars(self.parsed_data).items():
if v is None:
# it was a flag that shares the name of an alias
subcs.append(self.alias_flags[k])
else:
# eval the KV assignment
self._exec_config_str(k, v)
for subc in subcs:
self._load_flag(subc)
if self.extra_args:
sub_parser = KeyValueConfigLoader(log=self.log)
sub_parser.load_config(self.extra_args)
self.config.merge(sub_parser.config)
self.extra_args = sub_parser.extra_args
def load_pyconfig_files(config_files, path):
"""Load multiple Python config files, merging each of them in turn.
Parameters
==========
config_files : list of str
List of config files names to load and merge into the config.
path : unicode
The full path to the location of the config files.
"""
config = Config()
for cf in config_files:
loader = PyFileConfigLoader(cf, path=path)
try:
next_config = loader.load_config()
except ConfigFileNotFound:
pass
except:
raise
else:
config.merge(next_config)
return config
| mit |
srli/SoftwareSystems | hw03/thinkplot.py | 88 | 12565 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#081D58',
'#253494',
'#225EA8',
'#1D91C0',
'#41B6C4',
'#7FCDBB',
'#C7E9B4',
'#EDF8B1',
'#FFFFD9']
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
]
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, n):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[n]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in Brewer.ColorGenerator')
@classmethod
def InitializeIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls):
"""Gets the color iterator."""
return cls.color_iter
def PrePlot(num=None, rows=1, cols=1):
"""Takes hints about what's coming.
num: number of lines that will be plotted
"""
if num:
Brewer.InitializeIter(num)
# TODO: get sharey and sharex working. probably means switching
# to subplots instead of subplot.
# also, get rid of the gray background.
if rows > 1 or cols > 1:
pyplot.subplots(rows, cols, sharey=True)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
def SubPlot(rows, cols, plot_number):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
"""
pyplot.subplot(rows, cols, plot_number)
class InfiniteList(list):
"""A list that returns the same value for all indices."""
def __init__(self, val):
"""Initializes the list.
val: value to be stored
"""
list.__init__(self)
self.val = val
def __getitem__(self, index):
"""Gets the item with the given index.
index: int
returns: the stored value
"""
return self.val
def Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.iteritems():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
Brewer.ClearIter()
pyplot.clf()
def Figure(**options):
"""Sets options for the current figure."""
Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def Plot(xs, ys, style='', **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
color_iter = Brewer.GetIter()
if color_iter:
try:
options = Underride(options, color=color_iter.next())
except StopIteration:
print 'Warning: Brewer ran out of colors.'
Brewer.ClearIter()
options = Underride(options, linewidth=3, alpha=0.8)
pyplot.plot(xs, ys, style, **options)
def Scatter(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
pyplot.scatter(xs, ys, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ps = pmf.Render()
if pmf.name:
options = Underride(options, label=pmf.name)
Plot(xs, ps, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, fs = hist.Render()
width = min(Diff(xs))
if hist.name:
options = Underride(options, label=hist.name)
options = Underride(options,
align='center',
linewidth=0,
width=width)
pyplot.bar(xs, fs, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs.pop()
ps.pop()
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs.pop(0)
ps.pop(0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
if cdf.name:
options = Underride(options, label=cdf.name)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.iterkeys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
loc = options.get('loc', 0)
legend = options.get('legend', True)
if legend:
pyplot.legend(loc=loc)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
# TODO: figure out how to show more than one plot
Config(**options)
pyplot.show()
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
if root:
for fmt in formats:
SaveFormat(root, fmt)
Clf()
def SaveFormat(root, fmt='eps'):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
filename = '%s.%s' % (root, fmt)
print 'Writing', filename
pyplot.savefig(filename, format=fmt, dpi=300)
# provide aliases for calling functons with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = Brewer.ColorGenerator(7)
for color in color_iter:
print color
if __name__ == '__main__':
main()
| gpl-3.0 |
gertingold/scipy | scipy/cluster/tests/test_hierarchy.py | 11 | 41545 | #
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns
import pytest
from pytest import raises as assert_raises
from scipy._lib.six import xrange, u
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage,
num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster,
is_isomorphic, single, leaders, complete, weighted, centroid,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette, cut_tree, optimal_leaf_ordering,
_order_cluster_tree, _hierarchy, _LINKAGE_METHODS)
from scipy.spatial.distance import pdist
from scipy.cluster._hierarchy import Heap
from . import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt
have_matplotlib = True
except Exception:
have_matplotlib = False
class TestLinkage(object):
def test_linkage_non_finite_elements_in_distance_matrix(self):
# Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf).
# Exception expected.
y = np.zeros((6,))
y[0] = np.nan
assert_raises(ValueError, linkage, y)
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted', u('single')]:
self.check_linkage_tdist(method)
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
self.check_linkage_q(method)
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
y = scipy.spatial.distance.pdist(hierarchy_test_data.X,
metric="euclidean")
Z = linkage(y, method)
assert_allclose(Z, expectedZ, atol=1e-06)
def test_compare_with_trivial(self):
rng = np.random.RandomState(0)
n = 20
X = rng.rand(n, 2)
d = pdist(X)
for method, code in _LINKAGE_METHODS.items():
Z_trivial = _hierarchy.linkage(d, n, code)
Z = linkage(d, method)
assert_allclose(Z_trivial, Z, rtol=1e-14, atol=1e-15)
def test_optimal_leaf_ordering(self):
Z = linkage(hierarchy_test_data.ytdist, optimal_ordering=True)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo')
assert_allclose(Z, expectedZ, atol=1e-10)
class TestLinkageTies(object):
_expectations = {
'single': np.array([[0, 1, 1.41421356, 2],
[2, 3, 1.41421356, 3]]),
'complete': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.82842712, 3]]),
'average': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'weighted': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'centroid': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'median': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'ward': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.44948974, 3]]),
}
def test_linkage_ties(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']:
self.check_linkage_ties(method)
def check_linkage_ties(self, method):
X = np.array([[-1, -1], [0, 0], [1, 1]])
Z = linkage(X, method=method)
expectedZ = self._expectations[method]
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent(object):
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
self.check_inconsistent_tdist(depth)
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance(object):
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion(object):
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster(object):
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
self.check_fclusterdata(t, 'inconsistent')
for t in hierarchy_test_data.fcluster_distance:
self.check_fclusterdata(t, 'distance')
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fclusterdata(t, 'maxclust')
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
self.check_fcluster(t, 'inconsistent')
for t in hierarchy_test_data.fcluster_distance:
self.check_fcluster(t, 'distance')
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fcluster(t, 'maxclust')
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
self.check_fcluster_monocrit(t)
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fcluster_maxclust_monocrit(t)
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders(object):
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic(object):
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
self.help_is_isomorphic_randperm(1000, nc)
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
self.help_is_isomorphic_randperm(1000, nc, True, 5)
def test_is_isomorphic_7(self):
# Regression test for gh-6271
assert_(not is_isomorphic([1, 2, 3], [1, 1, 1]))
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in xrange(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage(object):
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
self.check_is_valid_linkage_various_size(nrow, ncol, valid)
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent(object):
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
self.check_is_valid_im_various_size(nrow, ncol, valid)
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage(object):
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
class TestLeavesList(object):
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
self.check_leaves_list_Q(method)
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond(object):
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
assert_raises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in xrange(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondence should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondence should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
class TestIsMonotonic(object):
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
assert_raises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
assert_equal(is_monotonic(Z), True)
class TestMaxDists(object):
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
self.check_maxdists_Q_linkage(method)
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts(object):
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
self.check_maxinconsts_Q_linkage(method)
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat(object):
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
self.check_maxRstat_invalid_index(i)
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
self.check_maxRstat_empty_linkage(i)
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
self.check_maxRstat_difrow_linkage(i)
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
self.check_maxRstat_one_cluster_linkage(i)
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
self.check_maxRstat_Q_linkage(method, i)
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram(object):
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
self.check_dendrogram_plot(orientation)
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['g', 'b', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4]}
fig = plt.figure()
ax = fig.add_subplot(221)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
assert_equal(R1, expected)
# test that dendrogram accepts and handle the leaf_font_size and
# leaf_rotation keywords
R1a = dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20, leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
assert_equal(testlabel.get_size(), 20)
R1a = dendrogram(Z, ax=ax, orientation=orientation,
leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
R1a = dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_size(), 20)
plt.close()
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['b'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9]})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7]})
def test_dendrogram_colors(self):
# Tests dendrogram plots with alternate colors
Z = linkage(hierarchy_test_data.ytdist, 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
# reset color palette (global list)
set_link_color_palette(None)
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
def test_unsupported_uncondensed_distance_matrix_linkage_warning():
assert_warns(ClusterWarning, linkage, [[0, 1], [1, 0]])
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS:
assert_raises(ValueError, linkage, [[1, 1], [1, 1]],
method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
def test_node_compare():
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = scipy.cluster.hierarchy.ward(X)
tree = to_tree(Z)
assert_(tree > tree.get_left())
assert_(tree.get_right() > tree.get_left())
assert_(tree.get_right() == tree.get_right())
assert_(tree.get_right() != tree.get_left())
def test_cut_tree():
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = scipy.cluster.hierarchy.ward(X)
cutree = cut_tree(Z)
assert_equal(cutree[:, 0], np.arange(nobs))
assert_equal(cutree[:, -1], np.zeros(nobs))
assert_equal(cutree.max(0), np.arange(nobs - 1, -1, -1))
assert_equal(cutree[:, [-5]], cut_tree(Z, n_clusters=5))
assert_equal(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]))
assert_equal(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]))
nodes = _order_cluster_tree(Z)
heights = np.array([node.dist for node in nodes])
assert_equal(cutree[:, np.searchsorted(heights, [5])],
cut_tree(Z, height=5))
assert_equal(cutree[:, np.searchsorted(heights, [5, 10])],
cut_tree(Z, height=[5, 10]))
assert_equal(cutree[:, np.searchsorted(heights, [10, 5])],
cut_tree(Z, height=[10, 5]))
def test_optimal_leaf_ordering():
# test with the distance vector y
Z = optimal_leaf_ordering(linkage(hierarchy_test_data.ytdist),
hierarchy_test_data.ytdist)
expectedZ = hierarchy_test_data.linkage_ytdist_single_olo
assert_allclose(Z, expectedZ, atol=1e-10)
# test with the observation matrix X
Z = optimal_leaf_ordering(linkage(hierarchy_test_data.X, 'ward'),
hierarchy_test_data.X)
expectedZ = hierarchy_test_data.linkage_X_ward_olo
assert_allclose(Z, expectedZ, atol=1e-06)
def test_Heap():
values = np.array([2, -1, 0, -1.5, 3])
heap = Heap(values)
pair = heap.get_min()
assert_equal(pair['key'], 3)
assert_equal(pair['value'], -1.5)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], -1)
heap.change_value(1, 2.5)
pair = heap.get_min()
assert_equal(pair['key'], 2)
assert_equal(pair['value'], 0)
heap.remove_min()
heap.remove_min()
heap.change_value(1, 10)
pair = heap.get_min()
assert_equal(pair['key'], 4)
assert_equal(pair['value'], 3)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], 10)
| bsd-3-clause |
cpmech/CIVL4250py | genetic-algo/ga_auxiliary.py | 1 | 5801 | ###########################################################################################
########### computing #####################################################################
###########################################################################################
from numpy import linspace, sin, pi, array, zeros, hstack, cumsum
from numpy import vectorize, ones
from numpy.random import seed, random, randint
# Random generates n numbers between xa and xb
def Random(n, xa, xb): return random(n) * (xb - xa) + xa
# Flip generates a Bernoulli variable; throw a coin with probability p
def FlipCoin(p):
if p==1.0: return True
if p==0.0: return False
if random()<=p: return True
return False
# SimpleChromo splits x into n unequal parts
def SimpleChromo(x, n):
vals = random(n)
sumv = sum(vals)
return x * vals / sumv
# Fitness function: map objective function into [0, 1]
# Y -- objective values
def Fitness(Y):
ymin, ymax = min(Y), max(Y)
if abs(ymax - ymin) < 1e-14: return ones(len(Y))
return (ymax - Y) / (ymax - ymin)
# SortPop sort individuals by fitness (decreasing order)
# C -- chromosomes/population
# Y -- objective values
# F -- fitness
def SortPop(C, Y, F):
I = F.argsort()[::-1] # the [::-1] is a trick to reverse the sorting order
C = C[I] # sorted chromosomes
Y = Y[I] # sorted objective values
F = F[I] # sorted fitness
return C, Y, F
# PrintPop prints all individuals
# C -- chromosomes/population
# Y -- objective values
# xFcn -- converts C to X values
# F -- fitness
# P -- probabilities
# M -- cumulated probabilities
def PrintPop(C, Y, xFcn, F=None, P=None, M=None, showC=False):
print '%7s%7s' % ('x', 'y'),
X = array([xFcn(c) for c in C])
if showC:
L = []
for c in C:
l = ''
for v in c: l += '%7.3f' % v
L.append(l)
nc = str(len(C[0]) * 7)
print ('%'+nc+'s') % ('chromosome/bases'),
if F!=None: print '%8s' % 'fitness',
if P!=None: print '%8s' % 'prob',
if M!=None: print '%8s' % 'cum.prob',
print
for i, x in enumerate(X):
print '%7.2f%7.2f' % (x, Y[i]),
if showC: print L[i],
if F!=None: print '%8.3f' % F[i],
if P!=None: print '%8.3f' % P[i],
if M!=None: print '%8.3f' % M[i],
print
# RouletteSelect selects n individuals
# M -- cumulated probabilities
def RouletteSelect(M, n, sample=None):
if sample==None: sample = random(n)
S = zeros(n, dtype=int) # selected individuals
for i, s in enumerate(sample):
for j, m in enumerate(M):
if m > s:
S[i] = j
break
return S
# FilterPairs generates 2 x ninds/2 lists from selected individuals
# try to avoid repeated indices in pairs
def FilterPairs(S):
ninds = len(S)
A = zeros(ninds/2, dtype=int)
B = zeros(ninds/2, dtype=int)
for i in range(ninds/2):
a, b = S[2*i], S[2*i+1]
if a == b:
for s in S:
if s != a:
b = s
break
A[i], B[i] = a, b
return A, B
###########################################################################################
########### graphing ######################################################################
###########################################################################################
from matplotlib.patches import Rectangle, FancyArrowPatch
from pylab import grid, xlabel, ylabel, legend, plot, show, close, subplot
from pylab import clf, gca, xticks, text, axis, savefig, rcParams
# Gll adds grid, labels and legend
def Gll(xl, yl, legpos=None): grid(); xlabel(xl); ylabel(yl); legend(loc=legpos)
# PlotProbBins plots probabilities bins
# X -- population
# P -- probabilities
def PlotProbBins(X, P):
rcParams.update({'figure.figsize':[800/72.27,200/72.27]})
x0, Tk = 0.0, [0.0]
for i in range(len(X)):
gca().add_patch(Rectangle([x0, 0], P[i], 0.2, color='#d5e7ed', ec='black', clip_on=0))
ha = 'center'
if i==len(X)-1: ha = 'left' # last one
text(x0+P[i]/2.0, 0.1, '%.1f'%X[i], ha=ha)
x0 += P[i]
Tk.append(x0)
xticks(Tk, ['%.2f'%v for v in Tk])
axis('equal')
gca().get_yaxis().set_visible(False)
for dir in ['left', 'right', 'top']:
gca().spines[dir].set_visible(False)
xlabel('cumulated probability')
grid()
axis([0, 1, 0, 0.2])
# DrawChromo draws one chromosome
def DrawChromo(key, A, pos, y0, swap_colors, red='#e3a9a9', blue='#c8d0e3'):
nbases = len(A)
x0, l = 0.1, 1.0 / float(nbases)
red, blue = red, blue
text(x0-0.01, y0+0.05, key, ha='right')
if swap_colors: red, blue = blue, red
for i in range(0, pos):
gca().add_patch(Rectangle([x0, y0], l, 0.1, color=red, ec='black'))
text(x0+l/2.0, y0+0.05, '%.3f'%A[i], ha='center')
x0 += l
for i in range(pos, nbases):
gca().add_patch(Rectangle([x0, y0], l, 0.1, color=blue, ec='black'))
text(x0+l/2.0, y0+0.05, '%.3f'%A[i], ha='center')
x0 += l
# DrawCrossover draws crossover process
def DrawCrossover(A, B, a, b, pos):
rcParams.update({'figure.figsize':[800/72.27,400/72.27]})
DrawChromo('A', A, pos, 0.35, 0)
DrawChromo('B', B, pos, 0.25, 1)
DrawChromo('a', a, pos, 0.10, 0, blue='#e3a9a9')
DrawChromo('b', b, pos, 0.00, 0, red='#c8d0e3')
axis('equal')
axis([0, 1.2, 0, 0.4])
gca().get_yaxis().set_visible(False)
gca().get_xaxis().set_visible(False)
for dir in ['left', 'right', 'top', 'bottom']:
gca().spines[dir].set_visible(False)
gca().add_patch(FancyArrowPatch([0.6,0.25], [0.6, 0.2], fc='#9fffde', mutation_scale=30))
| mit |
dquartul/BLonD | unittests/llrf/test_rf_modulation.py | 2 | 8157 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 15 18:57:49 2017
@author: schwarz
"""
#General imports
import unittest
import numpy as np
import matplotlib.pyplot as plt
#BLonD imports
import blond.llrf.rf_modulation as rfMod
import blond.utils.exceptions as blExcept
class TestRFModulation(unittest.TestCase):
def test_construct(self):
timebase = np.linspace(0, 1, 100)
stringMsg = "Integer input should raise an InputDataError exception"
with self.assertRaises(blExcept.InputDataError, msg=stringMsg):
rfMod.PhaseModulation(1, 1, 1, 1, 1)
stringMsg = "String input should raise an InputDataError exception"
with self.assertRaises(blExcept.InputDataError, msg=stringMsg):
rfMod.PhaseModulation('a', 1, 1, 1, 1)
with self.assertRaises(blExcept.InputDataError, msg=stringMsg):
rfMod.PhaseModulation(timebase, 'a', 1, 1, 1)
with self.assertRaises(blExcept.InputDataError, msg=stringMsg):
rfMod.PhaseModulation(timebase, 1, 'a', 1, 1)
with self.assertRaises(blExcept.InputDataError, msg=stringMsg):
rfMod.PhaseModulation(timebase, 1, 1, 'a', 1)
with self.assertRaises(blExcept.InputDataError, msg=stringMsg):
rfMod.PhaseModulation(timebase, 1, 1, 1, 'a')
with self.assertRaises(blExcept.InputDataError, msg=stringMsg):
rfMod.PhaseModulation(timebase, 1, 1, 1, 1, 'a')
stringMsg = "Wrong shape input should raise an InputDataError exception"
with self.assertRaises(blExcept.InputDataError, msg=stringMsg):
rfMod.PhaseModulation(np.zeros([2, 2]), 1, 1, 1, 1)
with self.assertRaises(blExcept.InputDataError, msg=stringMsg):
rfMod.PhaseModulation(np.zeros(2), [1, 2, 3], 1, 1, 1)
with self.assertRaises(blExcept.InputDataError, msg=stringMsg):
rfMod.PhaseModulation(np.zeros(2), 1, np.zeros([3, 100]), 1, 1, 1)
modulator = rfMod.PhaseModulation(timebase, 1, 1, 1, 1)
self.assertEqual((modulator.timebase, modulator.frequency, \
modulator.amplitude, modulator.offset, \
modulator.multiplier, modulator.harmonic), \
(timebase, 1, 1, 1, 1, 1), \
msg = "Input has not been applied correctly")
with self.assertRaises(TypeError, \
msg = 'Non-boolean input should raise TypeError'):
rfMod.PhaseModulation(np.zeros(2), 1, 1, 1, 1, 1, "Not a bool")
def test_interpolation(self):
timebase = np.linspace(0, 1, 1000)
modulator = rfMod.PhaseModulation(timebase, 1, 1, 1, 1)
self.assertTrue(all(modulator._interp_param([[0, 1], [0, 1]]) \
== timebase), msg = 'Function interpolation incorrect')
self.assertTrue(all(modulator._interp_param(1) == 1), \
msg = 'Single valued interpolation incorrect')
with self.assertRaises(TypeError, \
msg='Error should be raised for wrong shape data'):
modulator._interp_param([1, 0])
def test_modulation(self):
timebase = np.linspace(0, 1, 1000)
testFreqProg = [[0, 1], [20, 5]]
testAmpProg = [[0, 0.5, 1], [0, 2, 0]]
testOffsetProg = [[0, 1], [0, np.pi]]
testMultProg = 2
harmonic = 8
modulator = rfMod.PhaseModulation(timebase, testFreqProg, \
testAmpProg, testOffsetProg, \
harmonic, testMultProg)
modulator.calc_modulation()
self.assertEqual(modulator.dphi[0], 0, \
msg = 'Start phase should be 0')
self.assertEqual(modulator.dphi[-1], np.pi, \
msg = 'Start phase should be np.pi')
self.assertAlmostEqual(np.max(modulator.dphi), 3.55908288285, 5,
msg = 'Max dphi is incorrect')
def test_delta_omega(self):
timebase = np.linspace(0, 1, 1000)
freqProg = np.array([np.linspace(0, 1, 10000), \
np.linspace(1E6, 1E6, 10000)])
modulator = rfMod.PhaseModulation(timebase, 1, 1, 1, 1)
modulator.dphi = np.linspace(0, np.pi/2, 250).tolist() \
+ [np.pi/2]*500 \
+ np.linspace(np.pi/2, 0, 250).tolist()
with self.assertRaises(blExcept.InputDataError, \
msg = 'wrong shape frequency should raise Error'):
modulator.calc_delta_omega(np.zeros([3, 100]))
modulator.calc_delta_omega(freqProg)
self.assertEqual(np.sum(modulator.domega), 0, \
msg = "Trapezoid dphi should give sum(domega) == 0")
modulator.dphi = [np.pi/2]*1000
modulator.calc_delta_omega(freqProg)
self.assertEqual(modulator.domega.tolist(), [0]*len(timebase), \
msg = "Constant dphi should have domega == 0")
def test_extender(self):
timebase = np.linspace(0, 1, 1000)
testFreqProg = [[0, 1], [1E3, 5E2]]
testAmpProg = [[0, 0.5, 1], [0, 1, 0]]
testOffsetProg = [[0, 1], [0, np.pi]]
testMultProg = 2
harmonic = 8
freqProg = np.array([np.linspace(0, 1, 10000), \
np.linspace(1E6, 2E6, 10000)])
modulator = rfMod.PhaseModulation(timebase, testFreqProg, \
testAmpProg, testOffsetProg, \
harmonic, testMultProg, \
modulate_frequency = False)
modulator.calc_modulation()
with self.assertRaises(AttributeError, \
msg = """Attribute error should be raised
before domega has been calculated"""):
modulator.extend_to_n_rf(8)
modulator.calc_delta_omega(freqProg)
with self.assertRaises(AttributeError, \
msg = """AttrubuteError should be raised if
modulator.harmonic not in passed harmonics"""):
dPhi, dOmega = modulator.extend_to_n_rf([1, 3, 5])
dPhi, dOmega = modulator.extend_to_n_rf([1, 3, 5, 7, 8])
self.assertEqual(len(dPhi), 5, \
msg = "dPhi Not correctly extended to n_rf")
self.assertEqual(len(dPhi), 5, \
msg = "dOmega not correctly extended to n_rf")
for i in range(5):
self.assertEqual(len(dPhi[i]), 2, \
msg = "All dPhi members should have length 2")
self.assertEqual(len(dOmega[i]), 2, \
msg = "All dOmega members should have length 2")
if i != 4:
self.assertEqual(dPhi[i][1], [0, 0], \
msg = "Unused system dPhi should be [0, 0]")
self.assertEqual(dOmega[i][1], [0, 0], \
msg = "Unused system dOmega should be [0, 0]")
else:
self.assertEqual(dPhi[i][1].tolist(), \
modulator.dphi.tolist(), \
msg = "Used dPhi should match dPhi")
self.assertEqual(dOmega[i][1].tolist(), \
[0]*len(timebase), \
msg = """Used dOmega should be 0 with
modulate_frequency = False""")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
nistats/nistats | nistats/first_level_model.py | 1 | 42594 | """
This module presents an interface to use the glm implemented in
nistats.regression.
It contains the GLM and contrast classes that are meant to be the main objects
of fMRI data analyses.
Author: Bertrand Thirion, Martin Perez-Guevara, 2016
"""
import glob
import json
import os
import sys
import time
from warnings import warn
import numpy as np
import pandas as pd
from nibabel import Nifti1Image
from nibabel.onetime import setattr_on_read
from sklearn.base import (BaseEstimator,
clone,
TransformerMixin,
)
from sklearn.externals.joblib import Memory
from nilearn.input_data import NiftiMasker
from nilearn._utils import CacheMixin
from nilearn._utils.niimg_conversions import check_niimg
from sklearn.externals.joblib import (Parallel,
delayed,
)
from .contrasts import (_compute_fixed_effect_contrast,
expression_to_contrast_vector)
from .design_matrix import make_first_level_design_matrix
from .regression import (ARModel,
OLSModel,
SimpleRegressionResults,
RegressionResults
)
from .utils import (_basestring,
_check_run_tables,
_check_events_file_uses_tab_separators,
get_bids_files,
parse_bids_filename,
get_data
)
from nistats._utils.helpers import replace_parameters
def mean_scaling(Y, axis=0):
"""Scaling of the data to have percent of baseline change along the
specified axis
Parameters
----------
Y : array of shape (n_time_points, n_voxels)
The input data.
Returns
-------
Y : array of shape (n_time_points, n_voxels),
The data after mean-scaling, de-meaning and multiplication by 100.
mean : array of shape (n_voxels,)
The data mean.
"""
mean = Y.mean(axis=axis)
if (mean == 0).any():
warn('Mean values of 0 observed.'
'The data have probably been centered.'
'Scaling might not work as expected')
mean = np.maximum(mean, 1)
Y = 100 * (Y / mean - 1)
return Y, mean
def _ar_model_fit(X, val, Y):
"""Wrapper for fit method of ARModel to allow joblib parallelization"""
return ARModel(X, val).fit(Y)
def run_glm(Y, X, noise_model='ar1', bins=100, n_jobs=1, verbose=0):
""" GLM fit for an fMRI data matrix
Parameters
----------
Y : array of shape (n_time_points, n_voxels)
The fMRI data.
X : array of shape (n_time_points, n_regressors)
The design matrix.
noise_model : {'ar1', 'ols'}, optional
The temporal variance model. Defaults to 'ar1'.
bins : int, optional
Maximum number of discrete bins for the AR(1) coef histogram.
n_jobs : int, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : int, optional
The verbosity level. Defaut is 0
Returns
-------
labels : array of shape (n_voxels,),
A map of values on voxels used to identify the corresponding model.
results : dict,
Keys correspond to the different labels values
values are RegressionResults instances corresponding to the voxels.
"""
acceptable_noise_models = ['ar1', 'ols']
if noise_model not in acceptable_noise_models:
raise ValueError(
"Acceptable noise models are {0}. You provided "
"'noise_model={1}'".format(acceptable_noise_models,
noise_model)
)
if Y.shape[0] != X.shape[0]:
raise ValueError('The number of rows of Y '
'should match the number of rows of X.'
' You provided X with shape {0} '
'and Y with shape {1}'.
format(X.shape, Y.shape))
# Create the model
ols_result = OLSModel(X).fit(Y)
if noise_model == 'ar1':
# compute and discretize the AR1 coefs
ar1 = (
(ols_result.residuals[1:]
* ols_result.residuals[:-1]).sum(axis=0)
/ (ols_result.residuals ** 2).sum(axis=0)
)
del ols_result
ar1 = (ar1 * bins).astype(np.int) * 1. / bins
# Fit the AR model acccording to current AR(1) estimates
results = {}
labels = ar1
# Parallelize by creating a job per ARModel
vals = np.unique(ar1)
ar_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_ar_model_fit)(X, val, Y[:, labels == val])
for val in vals)
for val, result in zip(vals, ar_result):
results[val] = result
del vals
del ar_result
else:
labels = np.zeros(Y.shape[1])
results = {0.0: ols_result}
return labels, results
class FirstLevelModel(BaseEstimator, TransformerMixin, CacheMixin):
""" Implementation of the General Linear Model
for single session fMRI data.
Parameters
----------
t_r : float
This parameter indicates repetition times of the experimental runs.
In seconds. It is necessary to correctly consider times in the design
matrix. This parameter is also passed to nilearn.signal.clean.
Please see the related documentation for details.
slice_time_ref : float, optional (default 0.)
This parameter indicates the time of the reference slice used in the
slice timing preprocessing step of the experimental runs. It is
expressed as a percentage of the t_r (time repetition), so it can have
values between 0. and 1.
hrf_model : {'spm', 'spm + derivative', 'spm + derivative + dispersion',
'glover', 'glover + derivative', 'glover + derivative + dispersion',
'fir', None}
String that specifies the hemodynamic response function.
Defaults to 'glover'.
drift_model : string, optional
This parameter specifies the desired drift model for the design
matrices. It can be 'polynomial', 'cosine' or None.
high_pass : float, optional
This parameter specifies the cut frequency of the high-pass filter in
Hz for the design matrices. Used only if drift_model is 'cosine'.
drift_order : int, optional
This parameter specifices the order of the drift model (in case it is
polynomial) for the design matrices.
fir_delays : array of shape(n_onsets) or list, optional
In case of FIR design, yields the array of delays used in the FIR
model, in scans.
min_onset : float, optional
This parameter specifies the minimal onset relative to the design
(in seconds). Events that start before (slice_time_ref * t_r +
min_onset) are not considered.
mask_img : Niimg-like, NiftiMasker object or False, optional
Mask to be used on data. If an instance of masker is passed,
then its mask will be used. If no mask is given,
it will be computed automatically by a NiftiMasker with default
parameters. If False is given then the data will not be masked.
target_affine : 3x3 or 4x4 matrix, optional
This parameter is passed to nilearn.image.resample_img.
Please see the related documentation for details.
target_shape : 3-tuple of integers, optional
This parameter is passed to nilearn.image.resample_img.
Please see the related documentation for details.
smoothing_fwhm : float, optional
If smoothing_fwhm is not None, it gives the size in millimeters of
the spatial smoothing to apply to the signal.
memory : string, optional
Path to the directory used to cache the masking process and the glm
fit. By default, no caching is done.
Creates instance of joblib.Memory.
memory_level : integer, optional
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
standardize : boolean, optional
If standardize is True, the time-series are centered and normed:
their variance is put to 1 in the time dimension.
signal_scaling : False, int or (int, int), optional,
If not False, fMRI signals are
scaled to the mean value of scaling_axis given,
which can be 0, 1 or (0, 1).
0 refers to mean scaling each voxel with respect to time,
1 refers to mean scaling each time point with respect to all voxels &
(0, 1) refers to scaling with respect to voxels and time,
which is known as grand mean scaling.
Incompatible with standardize (standardize=False is enforced when
signal_scaling is not False).
noise_model : {'ar1', 'ols'}, optional
The temporal variance model. Defaults to 'ar1'
verbose : integer, optional
Indicate the level of verbosity. By default, nothing is printed.
If 0 prints nothing. If 1 prints progress by computation of
each run. If 2 prints timing details of masker and GLM. If 3
prints masker computation details.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs', -2 'all CPUs but one', and so on.
minimize_memory : boolean, optional
Gets rid of some variables on the model fit results that are not
necessary for contrast computation and would only be useful for
further inspection of model details. This has an important impact
on memory consumption. True by default.
subject_label : string, optional
This id will be used to identify a `FirstLevelModel` when passed to
a `SecondLevelModel` object.
Attributes
----------
labels_ : array of shape (n_voxels,),
a map of values on voxels used to identify the corresponding model
results_ : dict,
with keys corresponding to the different labels values.
Values are SimpleRegressionResults corresponding to the voxels,
if minimize_memory is True,
RegressionResults if minimize_memory is False
"""
@replace_parameters({'mask': 'mask_img'}, end_version='next')
def __init__(self, t_r=None, slice_time_ref=0., hrf_model='glover',
drift_model='cosine', high_pass=.01, drift_order=1,
fir_delays=[0], min_onset=-24, mask_img=None,
target_affine=None, target_shape=None, smoothing_fwhm=None,
memory=Memory(None), memory_level=1, standardize=False,
signal_scaling=0, noise_model='ar1', verbose=0, n_jobs=1,
minimize_memory=True, subject_label=None):
# design matrix parameters
self.t_r = t_r
self.slice_time_ref = slice_time_ref
self.hrf_model = hrf_model
self.drift_model = drift_model
self.high_pass = high_pass
self.drift_order = drift_order
self.fir_delays = fir_delays
self.min_onset = min_onset
# glm parameters
self.mask_img = mask_img
self.target_affine = target_affine
self.target_shape = target_shape
self.smoothing_fwhm = smoothing_fwhm
if isinstance(memory, _basestring):
self.memory = Memory(memory)
else:
self.memory = memory
self.memory_level = memory_level
self.standardize = standardize
if signal_scaling is False:
self.signal_scaling = signal_scaling
elif signal_scaling in [0, 1, (0, 1)]:
self.scaling_axis = signal_scaling
self.signal_scaling = True
self.standardize = False
else:
raise ValueError('signal_scaling must be "False", "0", "1"'
' or "(0, 1)"')
self.noise_model = noise_model
self.verbose = verbose
self.n_jobs = n_jobs
self.minimize_memory = minimize_memory
# attributes
self.labels_ = None
self.results_ = None
self.subject_label = subject_label
def fit(self, run_imgs, events=None, confounds=None,
design_matrices=None):
""" Fit the GLM
For each run:
1. create design matrix X
2. do a masker job: fMRI_data -> Y
3. fit regression to (Y, X)
Parameters
----------
run_imgs: Niimg-like object or list of Niimg-like objects,
See http://nilearn.github.io/manipulating_images/input_output.html#inputing-data-file-names-or-image-objects # noqa:E501
Data on which the GLM will be fitted. If this is a list,
the affine is considered the same for all.
events: pandas Dataframe or string or list of pandas DataFrames or
strings
fMRI events used to build design matrices. One events object
expected per run_img. Ignored in case designs is not None.
If string, then a path to a csv file is expected.
confounds: pandas Dataframe or string or list of pandas DataFrames or
strings
Each column in a DataFrame corresponds to a confound variable
to be included in the regression model of the respective run_img.
The number of rows must match the number of volumes in the
respective run_img. Ignored in case designs is not None.
If string, then a path to a csv file is expected.
design_matrices: pandas DataFrame or list of pandas DataFrames,
Design matrices that will be used to fit the GLM. If given it
takes precedence over events and confounds.
"""
# Check arguments
# Check imgs type
if events is not None:
_check_events_file_uses_tab_separators(events_files=events)
if not isinstance(run_imgs, (list, tuple)):
run_imgs = [run_imgs]
if design_matrices is None:
if events is None:
raise ValueError('events or design matrices must be provided')
if self.t_r is None:
raise ValueError('t_r not given to FirstLevelModel object'
' to compute design from events')
else:
design_matrices = _check_run_tables(run_imgs, design_matrices,
'design_matrices')
# Check that number of events and confound files match number of runs
# Also check that events and confound files can be loaded as DataFrame
if events is not None:
events = _check_run_tables(run_imgs, events, 'events')
if confounds is not None:
confounds = _check_run_tables(run_imgs, confounds, 'confounds')
# Learn the mask
if self.mask_img is False:
# We create a dummy mask to preserve functionality of api
ref_img = check_niimg(run_imgs[0])
self.mask_img = Nifti1Image(np.ones(ref_img.shape[:3]),
ref_img.affine)
if not isinstance(self.mask_img, NiftiMasker):
self.masker_ = NiftiMasker(mask_img=self.mask_img,
smoothing_fwhm=self.smoothing_fwhm,
target_affine=self.target_affine,
standardize=self.standardize,
mask_strategy='epi',
t_r=self.t_r,
memory=self.memory,
verbose=max(0, self.verbose - 2),
target_shape=self.target_shape,
memory_level=self.memory_level
)
self.masker_.fit(run_imgs[0])
else:
if self.mask_img.mask_img_ is None and self.masker_ is None:
self.masker_ = clone(self.mask_img)
for param_name in ['target_affine', 'target_shape',
'smoothing_fwhm', 't_r', 'memory',
'memory_level']:
our_param = getattr(self, param_name)
if our_param is None:
continue
if getattr(self.masker_, param_name) is not None:
warn('Parameter %s of the masker'
' overriden' % param_name)
setattr(self.masker_, param_name, our_param)
self.masker_.fit(run_imgs[0])
else:
self.masker_ = self.mask_img
# For each run fit the model and keep only the regression results.
self.labels_, self.results_, self.design_matrices_ = [], [], []
n_runs = len(run_imgs)
t0 = time.time()
for run_idx, run_img in enumerate(run_imgs):
# Report progress
if self.verbose > 0:
percent = float(run_idx) / n_runs
percent = round(percent * 100, 2)
dt = time.time() - t0
# We use a max to avoid a division by zero
if run_idx == 0:
remaining = 'go take a coffee, a big one'
else:
remaining = (100. - percent) / max(0.01, percent) * dt
remaining = '%i seconds remaining' % remaining
sys.stderr.write(
"Computing run %d out of %d runs (%s)\n"
% (run_idx + 1, n_runs, remaining))
# Build the experimental design for the glm
run_img = check_niimg(run_img, ensure_ndim=4)
if design_matrices is None:
n_scans = get_data(run_img).shape[3]
if confounds is not None:
confounds_matrix = confounds[run_idx].values
if confounds_matrix.shape[0] != n_scans:
raise ValueError('Rows in confounds does not match'
'n_scans in run_img at index %d'
% (run_idx,))
confounds_names = confounds[run_idx].columns.tolist()
else:
confounds_matrix = None
confounds_names = None
start_time = self.slice_time_ref * self.t_r
end_time = (n_scans - 1 + self.slice_time_ref) * self.t_r
frame_times = np.linspace(start_time, end_time, n_scans)
design = make_first_level_design_matrix(frame_times,
events[run_idx],
self.hrf_model,
self.drift_model,
self.high_pass,
self.drift_order,
self.fir_delays,
confounds_matrix,
confounds_names,
self.min_onset
)
else:
design = design_matrices[run_idx]
self.design_matrices_.append(design)
# Mask and prepare data for GLM
if self.verbose > 1:
t_masking = time.time()
sys.stderr.write('Starting masker computation \r')
Y = self.masker_.transform(run_img)
del run_img # Delete unmasked image to save memory
if self.verbose > 1:
t_masking = time.time() - t_masking
sys.stderr.write('Masker took %d seconds \n'
% t_masking)
if self.signal_scaling:
Y, _ = mean_scaling(Y, self.scaling_axis)
if self.memory:
mem_glm = self.memory.cache(run_glm, ignore=['n_jobs'])
else:
mem_glm = run_glm
# compute GLM
if self.verbose > 1:
t_glm = time.time()
sys.stderr.write('Performing GLM computation\r')
labels, results = mem_glm(Y, design.values,
noise_model=self.noise_model,
bins=100, n_jobs=self.n_jobs)
if self.verbose > 1:
t_glm = time.time() - t_glm
sys.stderr.write('GLM took %d seconds \n' % t_glm)
self.labels_.append(labels)
# We save memory if inspecting model details is not necessary
if self.minimize_memory:
for key in results:
results[key] = SimpleRegressionResults(results[key])
self.results_.append(results)
del Y
# Report progress
if self.verbose > 0:
sys.stderr.write("\nComputation of %d runs done in %i seconds\n\n"
% (n_runs, time.time() - t0))
return self
def compute_contrast(self, contrast_def, stat_type=None,
output_type='z_score'):
"""Generate different outputs corresponding to
the contrasts provided e.g. z_map, t_map, effects and variance.
In multi-session case, outputs the fixed effects map.
Parameters
----------
contrast_def : str or array of shape (n_col) or list of (string or
array of shape (n_col))
where ``n_col`` is the number of columns of the design matrix,
(one array per run). If only one array is provided when there
are several runs, it will be assumed that the same contrast is
desired for all runs. The string can be a formula compatible with
`pandas.DataFrame.eval`. Basically one can use the name of the
conditions as they appear in the design matrix of the fitted model
combined with operators +- and combined with numbers
with operators +-`*`/.
stat_type : {'t', 'F'}, optional
type of the contrast
output_type : str, optional
Type of the output map. Can be 'z_score', 'stat', 'p_value',
'effect_size', 'effect_variance' or 'all'
Returns
-------
output : Nifti1Image or dict
The desired output image(s). If ``output_type == 'all'``, then
the output is a dictionary of images, keyed by the type of image.
"""
if self.labels_ is None or self.results_ is None:
raise ValueError('The model has not been fit yet')
if isinstance(contrast_def, (np.ndarray, str)):
con_vals = [contrast_def]
elif isinstance(contrast_def, (list, tuple)):
con_vals = contrast_def
else:
raise ValueError('contrast_def must be an array or str or list of'
' (array or str)')
# Translate formulas to vectors
for cidx, (con, design_mat) in enumerate(zip(con_vals,
self.design_matrices_)
):
design_columns = design_mat.columns.tolist()
if isinstance(con, _basestring):
con_vals[cidx] = expression_to_contrast_vector(
con, design_columns)
n_runs = len(self.labels_)
if len(con_vals) != n_runs:
warn('One contrast given, assuming it for all %d runs' % n_runs)
con_vals = con_vals * n_runs
valid_types = ['z_score', 'stat', 'p_value', 'effect_size',
'effect_variance']
valid_types.append('all') # ensuring 'all' is the final entry.
if output_type not in valid_types:
raise ValueError(
'output_type must be one of {}'.format(valid_types))
contrast = _compute_fixed_effect_contrast(self.labels_, self.results_,
con_vals, stat_type)
output_types = (valid_types[:-1]
if output_type == 'all' else [output_type])
outputs = {}
for output_type_ in output_types:
estimate_ = getattr(contrast, output_type_)()
# Prepare the returned images
output = self.masker_.inverse_transform(estimate_)
contrast_name = str(con_vals)
output.header['descrip'] = (
'%s of contrast %s' % (output_type_, contrast_name))
outputs[output_type_] = output
return outputs if output_type == 'all' else output
def _get_voxelwise_model_attribute(self, attribute,
result_as_time_series):
"""Transform RegressionResults instances within a dictionary
(whose keys represent the autoregressive coefficient under the 'ar1'
noise model or only 0.0 under 'ols' noise_model and values are the
RegressionResults instances) into input nifti space.
Parameters
----------
attribute : str
an attribute of a RegressionResults instance.
possible values include: resid, norm_resid, predicted,
SSE, r_square, MSE.
result_as_time_series : bool
whether the RegressionResult attribute has a value
per timepoint of the input nifti image.
Returns
-------
output : list
a list of Nifti1Image(s)
"""
# check if valid attribute is being accessed.
all_attributes = dict(vars(RegressionResults)).keys()
possible_attributes = [prop
for prop in all_attributes
if '__' not in prop
]
if attribute not in possible_attributes:
msg = ("attribute must be one of: "
"{attr}".format(attr=possible_attributes)
)
raise ValueError(msg)
if self.minimize_memory:
raise ValueError(
'To access voxelwise attributes like '
'R-squared, residuals, and predictions, '
'the `FirstLevelModel`-object needs to store '
'there attributes. '
'To do so, set `minimize_memory` to `False` '
'when initializing the `FirstLevelModel`-object.')
if self.labels_ is None or self.results_ is None:
raise ValueError('The model has not been fit yet')
output = []
for design_matrix, labels, results in zip(self.design_matrices_,
self.labels_,
self.results_
):
if result_as_time_series:
voxelwise_attribute = np.zeros((design_matrix.shape[0],
len(labels))
)
else:
voxelwise_attribute = np.zeros((1, len(labels)))
for label_ in results:
label_mask = labels == label_
voxelwise_attribute[:, label_mask] = getattr(results[label_],
attribute)
output.append(self.masker_.inverse_transform(voxelwise_attribute))
return output
@setattr_on_read
def residuals(self):
"""Transform voxelwise residuals to the same shape
as the input Nifti1Image(s)
Returns
-------
output : list
a list of Nifti1Image(s)
"""
return self._get_voxelwise_model_attribute('resid',
result_as_time_series=True)
@setattr_on_read
def predicted(self):
"""Transform voxelwise predicted values to the same shape
as the input Nifti1Image(s)
Returns
-------
output : list
a list of Nifti1Image(s)
"""
return self._get_voxelwise_model_attribute('predicted',
result_as_time_series=True)
@setattr_on_read
def r_square(self):
"""Transform voxelwise r-squared values to the same shape
as the input Nifti1Image(s)
Returns
-------
output : list
a list of Nifti1Image(s)
"""
return self._get_voxelwise_model_attribute('r_square',
result_as_time_series=False
)
@replace_parameters({'mask': 'mask_img'}, end_version='next')
def first_level_models_from_bids(dataset_path, task_label, space_label=None,
img_filters=None, t_r=None, slice_time_ref=0.,
hrf_model='glover', drift_model='cosine',
high_pass=.01, drift_order=1, fir_delays=[0],
min_onset=-24, mask_img=None,
target_affine=None, target_shape=None,
smoothing_fwhm=None, memory=Memory(None),
memory_level=1, standardize=False,
signal_scaling=0, noise_model='ar1',
verbose=0, n_jobs=1,
minimize_memory=True,
derivatives_folder='derivatives'):
"""Create FirstLevelModel objects and fit arguments from a BIDS dataset.
It t_r is not specified this function will attempt to load it from a
bold.json file alongside slice_time_ref. Otherwise t_r and slice_time_ref
are taken as given.
Parameters
----------
dataset_path: str
Directory of the highest level folder of the BIDS dataset. Should
contain subject folders and a derivatives folder.
task_label: str
Task_label as specified in the file names like _task-<task_label>_.
space_label: str, optional
Specifies the space label of the preprocessed bold.nii images.
As they are specified in the file names like _space-<space_label>_.
img_filters: list of tuples (str, str), optional (default: None)
Filters are of the form (field, label). Only one filter per field
allowed. A file that does not match a filter will be discarded.
Possible filters are 'acq', 'ce', 'dir', 'rec', 'run', 'echo', 'res',
'den', and 'desc'. Filter examples would be ('desc', 'preproc'),
('dir', 'pa') and ('run', '10').
derivatives_folder: str, optional
derivatives and app folder path containing preprocessed files.
Like "derivatives/FMRIPREP". default is simply "derivatives".
All other parameters correspond to a `FirstLevelModel` object, which
contains their documentation. The subject label of the model will be
determined directly from the BIDS dataset.
Returns
-------
models: list of `FirstLevelModel` objects
Each FirstLevelModel object corresponds to a subject. All runs from
different sessions are considered together for the same subject to run
a fixed effects analysis on them.
models_run_imgs: list of list of Niimg-like objects,
Items for the FirstLevelModel fit function of their respective model.
models_events: list of list of pandas DataFrames,
Items for the FirstLevelModel fit function of their respective model.
models_confounds: list of list of pandas DataFrames or None,
Items for the FirstLevelModel fit function of their respective model.
"""
# check arguments
img_filters = img_filters if img_filters else []
if not isinstance(dataset_path, str):
raise TypeError(
'dataset_path must be a string, instead %s was given' %
type(task_label))
if not os.path.exists(dataset_path):
raise ValueError('given path do not exist: %s' % dataset_path)
if not isinstance(task_label, str):
raise TypeError('task_label must be a string, instead %s was given' %
type(task_label))
if space_label is not None and not isinstance(space_label, str):
raise TypeError('space_label must be a string, instead %s was given' %
type(space_label))
if not isinstance(img_filters, list):
raise TypeError('img_filters must be a list, instead %s was given' %
type(img_filters))
for img_filter in img_filters:
if (not isinstance(img_filter[0], str)
or not isinstance(img_filter[1], str)):
raise TypeError('filters in img filters must be (str, str), '
'instead %s was given' % type(img_filter))
if img_filter[0] not in ['acq', 'ce', 'dir', 'rec', 'run',
'echo', 'desc', 'res', 'den',
]:
raise ValueError(
"field %s is not a possible filter. Only "
"'acq', 'ce', 'dir', 'rec', 'run', 'echo', "
"'desc', 'res', 'den' are allowed." % img_filter[0])
# check derivatives folder is present
derivatives_path = os.path.join(dataset_path, derivatives_folder)
if not os.path.exists(derivatives_path):
raise ValueError('derivatives folder does not exist in given dataset')
# Get acq specs for models. RepetitionTime and SliceTimingReference.
# Throw warning if no bold.json is found
if t_r is not None:
warn('RepetitionTime given in model_init as %d' % t_r)
warn('slice_time_ref is %d percent of the repetition '
'time' % slice_time_ref)
else:
filters = [('task', task_label)]
for img_filter in img_filters:
if img_filter[0] in ['acq', 'rec', 'run']:
filters.append(img_filter)
img_specs = get_bids_files(derivatives_path, modality_folder='func',
file_tag='bold', file_type='json',
filters=filters)
# If we dont find the parameter information in the derivatives folder
# we try to search in the raw data folder
if not img_specs:
img_specs = get_bids_files(dataset_path, modality_folder='func',
file_tag='bold', file_type='json',
filters=filters)
if not img_specs:
warn('No bold.json found in derivatives folder or '
'in dataset folder. t_r can not be inferred and will need to'
' be set manually in the list of models, otherwise their fit'
' will throw an exception')
else:
specs = json.load(open(img_specs[0], 'r'))
if 'RepetitionTime' in specs:
t_r = float(specs['RepetitionTime'])
else:
warn('RepetitionTime not found in file %s. t_r can not be '
'inferred and will need to be set manually in the '
'list of models. Otherwise their fit will throw an '
' exception' % img_specs[0])
if 'SliceTimingRef' in specs:
slice_time_ref = float(specs['SliceTimingRef'])
else:
warn('SliceTimingRef not found in file %s. It will be assumed'
' that the slice timing reference is 0.0 percent of the '
'repetition time. If it is not the case it will need to '
'be set manually in the generated list of models' %
img_specs[0])
# Infer subjects in dataset
sub_folders = glob.glob(os.path.join(derivatives_path, 'sub-*/'))
sub_labels = [os.path.basename(s[:-1]).split('-')[1] for s in sub_folders]
sub_labels = sorted(list(set(sub_labels)))
# Build fit_kwargs dictionaries to pass to their respective models fit
# Events and confounds files must match number of imgs (runs)
models = []
models_run_imgs = []
models_events = []
models_confounds = []
for sub_label in sub_labels:
# Create model
model = FirstLevelModel(
t_r=t_r, slice_time_ref=slice_time_ref, hrf_model=hrf_model,
drift_model=drift_model, high_pass=high_pass,
drift_order=drift_order, fir_delays=fir_delays,
min_onset=min_onset, mask_img=mask_img,
target_affine=target_affine, target_shape=target_shape,
smoothing_fwhm=smoothing_fwhm, memory=memory,
memory_level=memory_level, standardize=standardize,
signal_scaling=signal_scaling, noise_model=noise_model,
verbose=verbose, n_jobs=n_jobs,
minimize_memory=minimize_memory, subject_label=sub_label)
models.append(model)
# Get preprocessed imgs
if space_label is None:
filters = [('task', task_label)] + img_filters
else:
filters = [('task', task_label),
('space', space_label)] + img_filters
imgs = get_bids_files(derivatives_path, modality_folder='func',
file_tag='bold', file_type='nii*',
sub_label=sub_label, filters=filters)
# If there is more than one file for the same (ses, run), likely we
# have an issue of underspecification of filters.
run_check_list = []
# If more than one run is present the run field is mandatory in BIDS
# as well as the ses field if more than one session is present.
if len(imgs) > 1:
for img in imgs:
img_dict = parse_bids_filename(img)
if (
'_ses-' in img_dict['file_basename']
and '_run-' in img_dict['file_basename']
):
if (img_dict['ses'], img_dict['run']) in run_check_list:
raise ValueError(
'More than one nifti image found '
'for the same run %s and session %s. '
'Please verify that the '
'desc_label and space_label labels '
'corresponding to the BIDS spec '
'were correctly specified.' %
(img_dict['run'], img_dict['ses']))
else:
run_check_list.append((img_dict['ses'],
img_dict['run']))
elif '_ses-' in img_dict['file_basename']:
if img_dict['ses'] in run_check_list:
raise ValueError(
'More than one nifti image '
'found for the same ses %s, while '
'no additional run specification present'
'. Please verify that the desc_label and '
'space_label labels '
'corresponding to the BIDS spec '
'were correctly specified.' %
img_dict['ses'])
else:
run_check_list.append(img_dict['ses'])
elif '_run-' in img_dict['file_basename']:
if img_dict['run'] in run_check_list:
raise ValueError(
'More than one nifti image '
'found for the same run %s. '
'Please verify that the desc_label and '
'space_label labels '
'corresponding to the BIDS spec '
'were correctly specified.' %
img_dict['run'])
else:
run_check_list.append(img_dict['run'])
models_run_imgs.append(imgs)
# Get events and extra confounds
filters = [('task', task_label)]
for img_filter in img_filters:
if img_filter[0] in ['acq', 'rec', 'run']:
filters.append(img_filter)
# Get events files
events = get_bids_files(dataset_path, modality_folder='func',
file_tag='events', file_type='tsv',
sub_label=sub_label, filters=filters)
if events:
if len(events) != len(imgs):
raise ValueError('%d events.tsv files found for %d bold '
'files. Same number of event files as '
'the number of runs is expected' %
(len(events), len(imgs)))
events = [pd.read_csv(event, sep='\t', index_col=None)
for event in events]
models_events.append(events)
else:
raise ValueError('No events.tsv files found')
# Get confounds. If not found it will be assumed there are none.
# If there are confounds, they are assumed to be present for all runs.
confounds = get_bids_files(derivatives_path, modality_folder='func',
file_tag='desc-confounds_regressors',
file_type='tsv', sub_label=sub_label,
filters=filters)
if confounds:
if len(confounds) != len(imgs):
raise ValueError('%d confounds.tsv files found for %d bold '
'files. Same number of confound files as '
'the number of runs is expected' %
(len(events), len(imgs)))
confounds = [pd.read_csv(c, sep='\t', index_col=None)
for c in confounds]
models_confounds.append(confounds)
return models, models_run_imgs, models_events, models_confounds
| bsd-3-clause |
XiaoxiaoLiu/morphology_analysis | bigneuron/image_profiling.py | 1 | 5911 | __author__ = 'coriannaj'
###
import os
import subprocess, threading
import pandas as pd
V3D = "/data/mat/xiaoxiaol/work/bin/bin_vaa3d_for_clusters/start_vaa3d.sh"
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
def run(self, timeout):
def target():
#print 'Thread started'
self.process = subprocess.Popen(self.cmd, shell=True)
self.process.communicate()
#print 'Thread finished'
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print 'Terminating process'
print self.cmd
self.process.terminate()
thread.join()
#print self.process.returncode
subfolder="0401_gold163_all_soma_sort"
data_DIR="/data/mat/xiaoxiaol/data/big_neuron/silver/"+subfolder
def RUN_Vaa3d_Job(arguments):
# run in local python env
cmd = V3D + arguments
print cmd
command = Command(cmd)
command.run(timeout=60*10)
return
def profiling(input_img, input_swc, output_file, dilation_ratio = 3, flip = 0, invert = 0, cutoff_ratio=0.05, logfile=""):
arguments = " -x profiling -f profile_swc -i "+input_img+" "+input_swc+" -o "+output_file+" -p "+str(dilation_ratio)+" "+str(flip)+" "+str(invert)+" "+str(cutoff_ratio)+logfile
RUN_Vaa3d_Job(arguments)
return
def runGold(write_path, img_csv, overall_csv):
#create df table with headers
cols = ['image_id', 'CNR', 'SNR', 'dynamic_range', 'mean_fg', 'mean_bg', 'mean_tubularity']
overall_profile = pd.DataFrame(columns=cols)
print data_DIR
folder_num = 0
run_folders = 0
# go through all image directories in gold163
for dirName in os.listdir(data_DIR):
subfolder_path = os.path.join(data_DIR, dirName)
subfolder_write_path = os.path.join(write_path, os.path.basename(dirName))
swc = None
img = None
print "Directory: %s aka %s" %(dirName, subfolder_path)
if os.path.isdir(subfolder_path):
profile_path = os.path.join(subfolder_write_path, img_csv)
folder_num += 1
print "Is directory %d" %folder_num
for f in os.listdir(subfolder_path):
#checking all files in directory for img and trace
if f.endswith('strict.swc.out.swc'):
swc = os.path.join(subfolder_path, f)
print "found swc"
if f.endswith(('.v3dpbd','.v3draw')):
img = os.path.join(subfolder_path, f)
#formatting log file construction - add .log, join with path, then add command line argument ' > '
logfile = img_csv + '.log'
logfile = os.path.join(subfolder_write_path, logfile)
logfile = " > " + logfile
#create profile if all files necessary were found
if swc != None and img != None:
profiling(img, swc, profile_path, 3, 0, 0, 0.01, logfile)
print "Img: %s" % img
print "Swc: %s" % swc
print "Profile path: %s" %profile_path
print "Log File: %s" %logfile
run_folders += 1
#read in CSV output file
try:
profile_df = pd.read_csv(profile_path)
except IOError:
print "Folder %d did not create a profile.csv" %folder_num
else:
#if file exists get data
# take from row 2 because only want dendrite data, type=3
stats = [int(os.path.basename(dirName)), profile_df.at[2,'cnr'], profile_df.at[2,'snr'], profile_df.at[2,'dynamic_range'], profile_df.at[2,'fg_mean'], profile_df.at[2,'bg_mean'], profile_df.at[2,'tubularity_mean']]
print stats
overall_profile.loc[len(overall_profile)]=stats
print "exporting csv"
print "visited %d folders" %folder_num
print "ran on %d folders" %run_folders
print overall_profile
overall_profile.sort_values(by='image_id', inplace=True)
overall_profile.to_csv(os.path.join(write_path, overall_csv), index=False)
return
runGold('/data/mat/xiaoxiaol/data/big_neuron/silver/0401_gold163_all_soma_sort/', 'profile_strict.csv', 'radius_estimation_profiling-strict.csv')
#to add additional folders to data compilation
def runAddGold(data_dir, out_file, images):
out_path = os.path.join(data_DIR, out_file)
data_df = pd.read_csv(out_path)
for img_d in images:
data_path = os.path.join(data_dir, img_d)
files = os.listdir(data_path)
for f in files:
profile_path = os.path.join(data_path, 'profile.csv')
if f.endswith('.swc'):
swc = os.path.join(data_path, f)
if f.endswith(('.v3dpbd', '.v3draw')):
img = os.path.join(data_path, f)
if swc != None and img != None:
profiling(img, swc, profile_path)
try:
profile_df = pd.read_csv(profile_path)
except IOError:
print "Folder %s did not create a profile.csv" %img_d
else:
# if file exists get data
# take from row 2 because only want dendrite data, type=3
stats = [158, img_d, profile_df.at[2, 'cnr'], profile_df.at[2, 'dynamic_range'],
profile_df.at[2, 'fg_mean'], profile_df.at[2, 'bg_mean'], profile_df.at[2, 'tubularity_mean']]
print stats
data_df.loc[len(data_df)] = stats
data_df.to_csv(out_file)
#server_data = '/data/mat/xiaoxiaol/data/big_neuron/silver/0401_gold163_all_soma_sort/'
#runAddGold(server_data, "/local1/home/coriannaj/Desktop/0401_gold163_all_soma_sort/img_profiling.csv", ["292", "293"])
| gpl-3.0 |
pitsios-s/SVHN-Thesis | src/multi_digit/digit_statistics.py | 1 | 2658 | import matplotlib.pyplot as plt
import numpy as np
import h5py
def load_multi_digit_labels(data_dir):
digit_structure_file = data_dir + "/digitStruct.mat"
file = h5py.File(digit_structure_file, 'r')
digit_structure_bbox = file['digitStruct']['bbox']
labels = []
for i in range(len(digit_structure_bbox)):
bb = digit_structure_bbox[i].item()
attr = file[bb]["label"]
if len(attr) > 1:
label = [file[attr.value[j].item()].value[0][0] for j in range(len(attr))]
else:
label = [attr.value[0][0]]
labels.append(label)
return labels
def calculate_digit_lengths(labels):
lengths = {}
for label in labels:
length = len(label)
if length not in lengths:
lengths[length] = 1
else:
lengths[length] += 1
return lengths
def main():
train_labels_lengths = calculate_digit_lengths(load_multi_digit_labels("../../res/original/train"))
test_labels_lengths = calculate_digit_lengths(load_multi_digit_labels("../../res/original/test"))
extra_labels_lengths = calculate_digit_lengths(load_multi_digit_labels("../../res/original/extra"))
# Visualize results
plt.style.use("ggplot")
train_indexes = np.arange(len(train_labels_lengths)) + 1
test_indexes = np.arange(len(test_labels_lengths)) + 1
extra_indexes = np.arange(len(extra_labels_lengths)) + 1
fig, ax = plt.subplots(1, 3)
# Bar chart for train labels
ax[0].set_title("Digit Lengths for Train Dataset")
train_bar = ax[0].bar(train_indexes, train_labels_lengths.values(), color="blue")
ax[0].set_xticks(train_indexes)
for bar in train_bar:
height = bar.get_height()
ax[0].text(bar.get_x() + bar.get_width() / 2, 1.01 * height, str(int(height)), ha="center", va="bottom")
# Bar chart for test labels
ax[1].set_title("Digit Lengths for Test Dataset")
test_bar = ax[1].bar(test_indexes, test_labels_lengths.values(), color="blue")
ax[1].set_xticks(test_indexes)
for bar in test_bar:
height = bar.get_height()
ax[1].text(bar.get_x() + bar.get_width() / 2, 1.02 * height, str(int(height)), ha="center", va="bottom")
# Bar chart for train labels
ax[2].set_title("Digit Lengths for Extra Dataset")
extra_bar = ax[2].bar(extra_indexes, extra_labels_lengths.values(), color="blue")
ax[2].set_xticks(extra_indexes)
for bar in extra_bar:
height = bar.get_height()
ax[2].text(bar.get_x() + bar.get_width() / 2, 1.02 * height, str(int(height)), ha="center", va="bottom")
plt.show()
if __name__ == '__main__':
main()
| mit |
feilchenfeldt/enrichme | enrichme.py | 1 | 51923 | #!/usr/bin/env python
"""
TODO:
-- fix the gene summary mode so that is supports overlapping features
(necessary to implement max_dist argument)
DONE?
-- allow prior windowing of input data for better performance
-- ask magnus about applications
In top scores enrichment:
If data is on a grid: do not keep the values for each grid point,
but keep the top values and their distance.
Could this speed up things when shifting? But can be problematic
if real positions very clustered.
Speed up things:
Instead of searching features for each shift, make a big sparse matrix
(or a dictionary) that says for each snp with which features it is associated.
Uemit suggest to use a bloomfilter! (fast dictionary)
-- Allow options to automatically run as many permutations as necessary
to get something:
A) significant
B) significant above multiple testing
Make the output column names generic, i.e., independent
of the input (score, feature, category, n_features, ...)!?
"""
import os, sys
import pandas as pd
import numpy as np
import gc, logging, time
import multiprocessing as mp
import pandas_util as hp
#import warnings
#warnings.simplefilter(action = "ignore", category = 'SettingWithCopyWarning')
#warnings.simplefilter("ignore")
#Treat the warnings expliticly in the future.
pd.options.mode.chained_assignment = None # default='warn'
eu = os.path.expanduser
jn = os.path.join
logger = logging.getLogger()
logging.basicConfig(format='%(levelname)-8s %(asctime)s %(message)s')
#logging.basicConfig(format='%(levelname)-8s %(asctime)s %(funcName)20s() %(message)s')
logger.setLevel(logging.DEBUG)
__all__ = ['enrichme']
__version__ = '0.1.1'
def get_sep(fn_fh):
try:
fn = fn_fh.name
except AttributeError:
fn = fn_fh
ext = os.path.splitext(fn)[-1]
if ext == ".tsv":
sep = "\t"
elif ext == ".csv":
sep = ","
else:
sep = None
logging.warning('Automatically inferring file seperator of {}. '
'Consider renaming your file to *.tsv or *.csv for speed.'.format(fn))
return sep
def init_rank_table(assoc):
try:
rt = pd.DataFrame({assoc.name:assoc.values,"rank":0,"out_of":0},index=assoc.index)
except Exception,e:
print assoc
raise e
rt.index.name = assoc.index.name
return rt
def shift_rod(rod_df, rnd, mode = "grid"):
"""
shift reference ordered data across the whole genome
Input:
rod_df ... pandas dataframe or series with mulitiindex (chrom, pos)
modes ...
'grid' ... just rotate the index of the rod data frame
this means that the positions stay the same only the
value for each position becomes different
Faster, but means that you only hit the same grid-point
this should make it conservative on large grids. Large
grids are problematic if the fraction of top windows
considered becomes large
'continuous' ... add the random shift to each index value.
NOT IMPLEMENTED
"""
if mode == "grid":
new_start_i = int(len(rod_df)*rnd)
rotate_data = np.concatenate((rod_df.iloc[new_start_i:].values,rod_df.iloc[:new_start_i].values))
if isinstance(rod_df,pd.core.series.Series):
r = pd.Series(rotate_data,index=rod_df.index)
return r
elif isinstance(rod_df,pd.core.frame.DataFrame):
r = pd.DataFrame(rotate_data,index=rod_df.index,columns=rod_df.columns)
return r
else:
raise UserException("Only mode grid supported.")
#parallel support
#from http://stackoverflow.com/questions/3288595/multiprocessing-using-pool-map-on-a-function-defined-in-a-class
def fun(f,q_in,q_out):
while True:
i,x = q_in.get()
if i is None:
break
q_out.put((i,f(x)))
def parmap(f, X, nprocs):
q_in = mp.Queue(1)
q_out = mp.Queue()
proc = [mp.Process(target=fun,args=(f,q_in,q_out)) for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i,x)) for i,x in enumerate(X)]
[q_in.put((None,None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
return [x for i,x in sorted(res)]
class CandidateEnrichment(object):
"""
Test enrichment of a candidate feature list
against a feature to category mapping.
In a common example, candidate_features will be genes
and categories will gene ontology (GO) categories.
Input:
candidate_features ... list of feature names to be tested
feature_to_category ... data frame with arbitrary index
"""
def __init__(self, candidate_features, feature_to_category, value_name='candidate',
feature_name='feature', category_name='category', feature_df=None, ncpus=1):
self.candidate_features = np.unique(candidate_features)
if len(self.candidate_features) < len(candidate_features):
logging.warning("There are duplicates in candidate_features, "
"going to be removed...")
self._bind_feature_to_category(feature_to_category, feature_name, category_name)
n_candidates = len(self.candidate_features)
self.all_candidate_features = self.feature_to_category[self.feature_name].unique()
self.candidate_features = np.intersect1d(self.candidate_features,
self.all_candidate_features, assume_unique=True)
assert len(self.candidate_features) > 0, ("No candidate feature found in the "
"first column of feature_to_category.")
if len(self.candidate_features) < n_candidates:
logging.warning("Only {} of the {} candidates are present "
"in feature_to_category mapping. "
"Only those are going to be used.".format(len(self.candidate_features), n_candidates))
self.init_rank_table = self.initital_rank_table()
self.rank_table = self.init_rank_table
self.feature_df = feature_df
self.ncpus = ncpus
self.value_name = value_name
def _bind_feature_to_category(self, feature_to_category, feature_name, category_name):
assert feature_name in feature_to_category.columns
assert category_name in feature_to_category.columns
self.feature_to_category = feature_to_category
self.feature_name = feature_name
self.category_name = category_name
def get_association(self, candidate_features):
"""
Get series with number of candidate candidate_features associated with each
category in feature_to_category.
"""
try:
assoc = self.feature_to_category.set_index(self.feature_name).ix[candidate_features].groupby(self.category_name).apply(len)
except IndexError, e:
raise e
assoc.name = "n_" + self.feature_name
assoc.index.name = self.category_name
return assoc
def initital_rank_table(self):
logging.debug("get real assoc")
real_assoc = self.get_association(self.candidate_features)
logging.debug("init rank table")
rt = init_rank_table(real_assoc)
return rt
def permuter(self, rank_table, n_permut, core=None):
"""
Update the supplied rank table (rt) with the
"""
rt = rank_table.copy()
if core is not None:
np.random.seed(int(time.time()*0.0001*(core+1)))
for i in xrange(n_permut):
permut_candidate_features = np.random.choice(self.all_candidate_features,
size=len(self.candidate_features), replace=False)
assoc = self.get_association(permut_candidate_features)
rt["rank"] += (rt["n_candidate_features"] > assoc.reindex(rt.index).fillna(0))
rt["out_of"] += 1
gc.collect() #needed for low memory profile
rt.sort('rank',ascending=False,inplace=True)
return rt
def get_permut_rank_table(self, n_permut):
rti = self.init_rank_table
if self.ncpus > 1:
logging.info("Spawning {} processes.".format(self.ncpus))
n_permut_proc = int(n_permut/self.ncpus)
rts = parmap(lambda core: self.permuter(rti, n_permut_proc, core), range(self.ncpus), self.ncpus)
rt = reduce_mem(rts)
else:
rt = self.permuter(rti, n_permut)
return rt
def permute(self, n_permut):
rt = self.get_permut_rank_table(n_permut)
self.rank_table = reduce_mem([self.rank_table, rt])
def get_pvals(self, pval_threshold=1, category_to_description=None):
return rank_to_pval(self.rank_table, self.feature_to_category, pval_threshold=1, category_to_description=category_to_description)
def create_info(self):
"""
Creates an attribute self.summary_per_feature.
For CandidateEnrichment or TopScoresEnrichment
this data frame contains a boolean variable for
each gene that is True if the gene is in the caniditate set.
For SummaryEnrichment this attribute gives the summary
for each gene.
"""
assert self.feature_df is not None, "Cannot create info without feature_df"
summary_per_feature = pd.DataFrame({self.value_name:self.feature_df[self.feature_name].\
apply(lambda x: x in self.candidate_features).values},
index=self.feature_df[self.feature_name].values)
summary_per_feature[self.value_name] = summary_per_feature[self.value_name].astype(int)
summary_per_feature.index.name = self.feature_name
summary_per_feature['zscore'] = (summary_per_feature[self.value_name]-summary_per_feature[self.value_name].mean())\
/summary_per_feature[self.value_name].std(ddof=0)
summary_per_feature.sort('zscore', ascending=False, inplace=True)
self.summary_per_feature = summary_per_feature
# def get_candidate_location(self):
# """
# for a list of gene ids,
# get a data frame with their.
# position
# """
# try:
# gi = self.feature_df[self.feature_df[self.feature_name].apply(lambda x: x in self.features)]
# except Exception, e:
# raise e
# return gi
class SummaryEnrichment(CandidateEnrichment):
"""
summary ... name of the function of the groupby object
to apply to the data (e.g. 'mean', 'max',...)
max_dist ... is not implemented yet!!!
two scenarios:
-- summary across features + summary across categories
-- summary across features -> take top features
(can be modelled by more complicated summary function, eg, lambda x: mean(x)>thresh)?
-> count features per cat (summary with sum)
"""
def __init__(self, value_s, feature_df, feature_to_category, feature_name='feature', category_name='category',
feature_summary=None, feature_summary_fun=None,
category_summary=None, category_summary_fun=None, min_features_per_cat=2,
max_dist=0, chrom_len=None, ncpus=1):
#todo: logging for AssertionErrors, explain what the problem is
hp.check_rod(value_s)
hp.check_feature_df(feature_df)
assert feature_name in feature_df.columns
assert (feature_summary is not None) != (feature_summary_fun is not None), \
"Specify either feature_summary OR feature_summary_fun."
assert (category_summary is not None) != (category_summary_fun is not None), \
"Specify either category_summary OR category_summary_fun."
if chrom_len is not None:
self.flat_index = True
else:
self.flat_index = False
if self.flat_index:
self.value_s = hp.rod_to_1d(value_s, chrom_len)
self.feature_df = hp.feature_df_to_1d(feature_df, chrom_len)
else:
self.value_s = value_s.copy()
self.feature_df = feature_df.copy()
self.value_name = self.value_s.name
self._bind_feature_to_category(feature_to_category, feature_name, category_name)
self.prune_feature_to_category(min_features_per_cat)
self.feature_summary = feature_summary
self.category_summary = category_summary
self.feature_summary_fun = feature_summary_fun
self.category_summary_fun = category_summary_fun
self.max_dist = max_dist
#logging.debug("I am before.")
#import pdb
#logging.debug("I am here.")
#pdb.set_trace()
#logging.debug("I am after.")
self.init_rank_table = self.initital_rank_table()
self.rank_table = self.init_rank_table
self.ncpus = ncpus
def prune_feature_to_category(self, min_features_per_cat):
self.feature_to_category = self.feature_to_category.drop_duplicates()
ftc_features = self.feature_to_category[self.feature_name].unique()
features = self.feature_df[self.feature_name].unique()
not_in_ftc = np.setdiff1d(features, ftc_features, assume_unique=True)
not_in_feature_df = np.setdiff1d(ftc_features,features, assume_unique=True)
if len(not_in_ftc)>0:
logging.warning("{} features from the features file are not "
"in the feature_to_category mapping.".format(len(not_in_ftc)))
if len(not_in_feature_df)>0:
logging.warning("{} features from the feature_to_category mapping are not "
"in the features file. Removing them.".format(len(not_in_feature_df)))
self.feature_to_category = self.feature_to_category.set_index(self.feature_name).\
drop(pd.Index(not_in_feature_df)).reset_index()
if min_features_per_cat > 0:
n_cats = len(self.feature_to_category[self.category_name].unique())
logging.info("Removing categories for which there are less than {} "
"features in the features file.".format(min_features_per_cat))
self.feature_to_category = self.feature_to_category.groupby(self.category_name).\
filter(lambda x: len(x) >= min_features_per_cat)
n_removed = n_cats - len(self.feature_to_category[self.category_name].unique())
logging.info("{} categories removed.".format(n_removed))
self.feature_to_category.reset_index(inplace=True)
def initital_rank_table(self):
real_assoc = self.get_association(self.value_s)
rt = init_rank_table(real_assoc)
return rt
def get_association(self, value_s):
summary_per_feature = self.get_summary_per_feature(value_s)
assoc = self.get_summary_per_category(summary_per_feature)
assoc.name = self.value_name + '_' + 'summary'# self.feature_name + self.feature_summary + '_'
assoc.index.name = self.category_name
return assoc
def get_summary_per_feature(self,value_s):
if self.flat_index:
values_per_feature = hp.data_per_feature_FI(value_s, self.feature_df,
feature_name=self.feature_name)
else:
values_per_feature = hp.data_per_feature(value_s, self.feature_df,
feature_name=self.feature_name, max_dist=self.max_dist)
if self.feature_summary is not None:
groups = values_per_feature.groupby(self.feature_name)
return getattr(groups, self.feature_summary)()
elif self.feature_summary_fun is not None:
return self.feature_summary_fun(values_per_feature)
#summary_per_feature = hp.apply_to_feature(values_per_feature,
# groupby_func_name=self.feature_summary,
# function=self.feature_summary_fun)
#return summary_per_feature
def get_summary_per_category(self,value_per_feature):
"""
Calculates summary (e.g. mean) of values for the features
in each of the given categories.
Returns:
series
"""
value_to_category = self.feature_to_category.copy()
values_per_feature_to_category = value_per_feature.ix[value_to_category[self.feature_name].values].values
del value_to_category[self.feature_name]
value_to_category[self.value_name] = values_per_feature_to_category
if self.category_summary is not None:
groups = value_to_category.groupby(self.category_name)
return getattr(groups, self.category_summary)()[self.value_name]
elif self.category_summary_fun is not None:
return self.category_summary_fun(value_to_category)[self.value_name]
#summary_per_category = getattr(value_to_category.groupby('category'), self.category_summary)()
#return summary_per_category['value']
def permuter(self, rank_table, n_permut, core=None):
rank_table = rank_table.copy()
if core is not None:
np.random.seed(int(time.time()*0.0001*(core+1)))
#rnds = np.random.rand(n_permut)
#print core
#print rnds
for rnd in np.random.rand(n_permut):
s = shift_rod(self.value_s, rnd)
assoc = self.get_association(s)
rank_table["rank"] += (rank_table[assoc.name] > \
assoc.reindex(rank_table.index).fillna(0))
rank_table["out_of"] += 1
gc.collect() #needed for low memory profile
rank_table.sort('rank',ascending=False,inplace=True)
return rank_table
def create_info(self):
summary_per_feature = self.get_summary_per_feature(self.value_s)
#this is contained in the pvalue df
#summary_per_category = self.get_summary_per_category(summary_per_feature)
summary_per_feature['zscore'] = (summary_per_feature[self.value_name]-summary_per_feature[self.value_name].mean())\
/summary_per_feature[self.value_name].std(ddof=0)
summary_per_feature.sort('zscore', ascending=False, inplace=True)
#self.summary_per_category = summary_per_category
self.summary_per_feature = summary_per_feature
class TopScoresEnrichment(SummaryEnrichment):
"""
Test for enrichment by shifting the
input reference ordered scores.
First, we search for features
close to the top values of the reference
ordered input scores, and get associations
between features and categories.
Then, we repeat the following step many times:
Randomly shift the input scores,
against the (chrom, pos) index and
repeat the first step for the shifted data.
Repeating the second step many times,
gives a null distribution of
associations between features and categories,
against which we compare the real assocations
from step one.
"""
def __init__(self,value_s, feature_df, feature_to_category, top_type, top,
feature_name='feature', category_name='category',
min_features_per_cat=2, ascending=False, max_dist=0, ncpus=1):
top_types = ['count','threshold','quantile']
assert feature_name in feature_df.columns
assert top_type in top_types, "top_type must be one of {}".format(top_types)
self.value_s = value_s.copy()
#the following prevents get_peaks from throwing unspecific error
self.feature_df = feature_df.drop_duplicates(subset=feature_name)
self._bind_feature_to_category(feature_to_category, feature_name, category_name)
self.value_name = self.value_s.name
self.prune_feature_to_category(min_features_per_cat)
if top_type == 'count':
top_n = top
elif top_type == 'quantile':
top_n = int(len(self.value_s)*top)
elif top_type == 'threshold':
value_s_s = value_s.sort(ascending=ascending, inplace=False)
if ascending:
top_n = np.argmax(value_s_s.values>top)
else:
top_n = np.argmax(value_s_s.values<top)
del value_s_s
self.top_n = top_n
self.ascending = ascending
self.max_dist = max_dist
self.init_rank_table = self.initital_rank_table()
self.rank_table = self.init_rank_table
self.ncpus = ncpus
def get_association(self, value_s):
value_s = value_s.sort(ascending=self.ascending, inplace=False)
top_s = value_s.iloc[:self.top_n]
del value_s
candidate_features = hp.get_features(top_s, self.feature_df, feature_name=self.feature_name,
max_dist=self.max_dist)
assoc = CandidateEnrichment.get_association(self, candidate_features)
#assoc = self.feature_to_category.set_index(self.feature_name).ix[cand_genes].groupby(self.category_name).apply(len)
#assoc.name = "n_" + self.feature_name
#assoc.index.name = self.category_name
return assoc
def get_peak_info(self,top_s,peaks_per_gene):
peak_height_name = top_s.name
gene_list_peak_pos = peaks_per_gene.reset_index([0])[self.feature_name].groupby(lambda x: x).apply(list)
gene_list_peak_pos.name = "genes"
gene_list_peak_pos.index = pd.MultiIndex.from_tuples(gene_list_peak_pos.index)
peak_info = pd.concat([top_s,gene_list_peak_pos],axis=1)
peak_info.sort(peak_height_name,ascending=False,inplace=True)
peak_info.index.names = ["chrom","pos"]
return peak_info
def create_info(self):
"""
Creates several info dataframe for the input data.
self.peaks_per_feature .... top peaks per feature for features that
are hit by top peaks
self.top_peaks ... top peaks with the features that are close
"""
value_s = self.value_s.sort(ascending=self.ascending, inplace=False)
top_s = value_s.iloc[:self.top_n]
self.candidate_features = hp.get_features(top_s, self.feature_df, feature_name=self.feature_name,
max_dist=self.max_dist)
sub_feature_df = self.feature_df.reset_index().set_index(self.feature_name).ix[self.candidate_features]\
.reset_index().set_index(['chrom','start'])
self.peaks_per_feature = get_peaks(sub_feature_df, top_s, self.max_dist, feature_name=self.feature_name)
features_sort_by_max = self.peaks_per_feature['peak_height'].groupby(lambda i:i[0]).max()
features_sort_by_max.sort(ascending=False,inplace=True)
self.peaks_per_feature = self.peaks_per_feature.ix[features_sort_by_max.index]
self.top_peaks = self.get_peak_info(top_s, self.peaks_per_feature)
#super(SummaryEnrichment, self).create_info()
CandidateEnrichment.create_info(self)
def get_peaks(sub_gene_df,top_s,max_dist,feature_name):
"""
For each gene in gene_info get the
peaks within max_dist in top_s. This
is basically reverse engineering to get
the peak info for each gene that was found
to be associated with a peak.
The reason for reverse engeneering rather than
storing this information when searching for the genes
for each peak is that we want to use precisely the same
function to search the genes for the real data and for the
permutations.
Input:
gene_info ... data frame with index ('chrom','start')
and columns 'gene_id' and 'end'
top_s ... series of peak positions with index (chrom, pos)
and values peak height
max_dist ... maximum distance between gene and peak
"""
gene_info = sub_gene_df
def get_dist(df,gene_pos):
"""
calculate distance
"""
s = pd.Series(df.index.droplevel(0).values - gene_pos.ix[df.index[0][0]],
index=df.index.droplevel(0).values)
return s
tot_gene_peaks_df = pd.DataFrame()
if not top_s.index.is_monotonic:
top_s = top_s.sortlevel([0,1])
if not gene_info.index.is_monotonic:
gene_info = gene_info.sort_index()
for chrom in gene_info.index.droplevel(1).unique():
loc_top_s = top_s.ix[chrom]
start = np.searchsorted(loc_top_s.index.values+max_dist,gene_info.ix[chrom].index.values)
end = np.searchsorted(loc_top_s.index.values-max_dist,gene_info.ix[chrom]["end"].values)
x = pd.concat([loc_top_s.iloc[st:ed] for st,ed in zip(start,end)],
keys=gene_info.ix[chrom][feature_name].values)
x.name = "peak_height"
dist_start = x.groupby(lambda i: i[0]).\
apply(lambda df: get_dist(df,
gene_info.ix[chrom].reset_index().set_index(feature_name)["start"]))
dist_start.name = "dist_start"
dist_end = x.groupby(lambda i: i[0]).\
apply(lambda df: get_dist(df,
gene_info.ix[chrom].set_index(feature_name)["end"]))
dist_end.name = "dist_end"
gene_peaks_df = pd.concat([x,dist_start,dist_end],axis=1)
gene_peaks_df.index = pd.MultiIndex.from_arrays([gene_peaks_df.index.droplevel(1),
[chrom]*len(x),
gene_peaks_df.index.droplevel(0)])
tot_gene_peaks_df = pd.concat([tot_gene_peaks_df, gene_peaks_df],axis=0)
tot_gene_peaks_df.index.names = [feature_name,"chrom","peak_pos"]
return tot_gene_peaks_df
def get_peak_info(top_s,peaks_per_gene):
peak_height_name = top_s.name
gene_list_peak_pos = peaks_per_gene.reset_index([0])["gene_id"].groupby(lambda x: x).apply(list)
gene_list_peak_pos.name = "genes"
gene_list_peak_pos.index = pd.MultiIndex.from_tuples(gene_list_peak_pos.index)
peak_info = pd.concat([top_s,gene_list_peak_pos],axis=1)
peak_info.sort(peak_height_name,ascending=False,inplace=True)
peak_info.index.names = ["chrom","pos"]
return peak_info
def get_p_val(rank_table):
"""
Input:
"""
r = 1-rank_table["rank"]*1./(rank_table["out_of"]+1)
r.sort()
r.name = "p_value"
return r
def open_reduce_fns(fns):
permut_fhs = []
for fn in fns:
try:
if os.stat(fn).st_size>0:
permut_fhs.append(open(fn))
else:
logging.warning( "File seems to be empty. Skipping it: {}.".format(fn))
except Exception, e:
logging.warning("Can't open file. Skipping it: {}.".format(fn))
logging.warning(str(e))
return permut_fhs
def reduce_fhs(permut_fhs):
tot_rank = pd.read_csv(permut_fhs[0], index_col=0, sep=get_sep(permut_fhs[0])).dropna()
tot_rank["index"] = tot_rank.index
tot_rank.drop_duplicates(subset="index",inplace=True)
del tot_rank["index"]
for fh in permut_fhs[1:]:
rank_table = pd.read_csv(fh,index_col=0, sep=get_sep(fh)).dropna()
rank_table["index"] = rank_table.index
rank_table.drop_duplicates(subset="index",inplace=True)
try:
tot_rank["rank"] = tot_rank["rank"].add(rank_table["rank"],fill_value=0)
tot_rank["out_of"] = tot_rank["out_of"].add(rank_table["out_of"],fill_value=0)
except Exception, e:
raise e
return tot_rank
def reduce_mem(rank_tables):
tot_rank = rank_tables[0].dropna()
tot_rank["index"] = tot_rank.index
tot_rank.drop_duplicates(subset="index",inplace=True)
del tot_rank["index"]
for rank_table in rank_tables[1:]:
rank_table.dropna(inplace=True)
rank_table["index"] = rank_table.index
rank_table.drop_duplicates(subset="index",inplace=True)
try:
tot_rank["rank"] = tot_rank["rank"].add(rank_table["rank"],fill_value=0)
tot_rank["out_of"] = tot_rank["out_of"].add(rank_table["out_of"],fill_value=0)
except Exception, e:
raise e
return tot_rank
def rank_to_pval(rank_table, feature_to_category, pval_threshold=1, category_to_description=None):
try:
rank_table.drop('p_value', axis=1, inplace=True)
except ValueError:
pass
p_vals = get_p_val(rank_table)
p_val_df = rank_table.join(p_vals)
p_val_df.sort('p_value', inplace=True)
p_val_df["benjamini_hochberg"] = p_val_df["p_value"] * \
len(feature_to_category.iloc[:,1].unique())*1. /\
np.arange(1,len(p_val_df)+1)
p_val_df = p_val_df[p_val_df["p_value"]<=pval_threshold]
if category_to_description is not None:
#ctd_cols = parse_cols(reduce_args.category_to_description_cols)
#category_to_description = pd.read_csv(reduce_args.category_to_description,
# usecols=ctd_cols, sep='\t')
category_to_description = category_to_description.\
set_index(category_to_description.columns[0],drop=True)
try:
p_val_df.drop(category_to_description.columns[0], axis=1, inplace=True)
except ValueError:
pass
p_val_df = p_val_df.join(category_to_description)
#p_val_df.sort('p_value', inplace=True)
return p_val_df
def save_info(enrich, name):
enrich.create_info()
enrich.summary_per_feature.to_csv(name + ".summary_per_feature.tsv", sep='\t')
try:
enrich.summary_per_category.to_csv(name + ".summary_per_category.tsv", sep='\t')
except AttributeError:
pass
try:
enrich.peaks_per_feature.to_csv(name + ".peaks_per_feature.tsv", sep='\t')
enrich.top_peaks.to_csv(name + ".top_peaks.tsv", sep='\t')
except AttributeError:
pass
def enrichme():
import argparse, csv
#import pdb
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=
"Test enrichment of features (e.g. genes) in certain categories.",
add_help=False)
parser.add_argument("-R",'--run_type', choices=['Permute','Reduce'], default='Permute')
parser.add_argument('-N','--name', help='Base name for all the output files. ')
parser.add_argument("--feature_to_category", type=argparse.FileType('r'),
help="Filename for tsv that links features to"
" categories. E.g. go associations. ")
parser.add_argument('--feature_to_category_cols',nargs=2, default = ['feature', 'category'],
help="Column labels or positions (0-indexed) of 'feature' and 'category'"
"In the feature_to_category tsv."
"Expects 2 integers or strings.")
parser.add_argument('--category_to_description',
type=argparse.FileType('r'), required=False,
help='Tsv with category to category description mapping.')
parser.add_argument('--category_to_description_cols', nargs=2, default = ['category', 'description'],
help="Column labels or positions (0-indexed) of 'category' and 'description' "
"In the category_to_description tsv. "
"Expects 2 integers or strings.")
parser.add_argument('--logging_level','-l',
choices=['DEBUG','INFO','WARNING','ERROR','CRITICAL'],
default='INFO', help='Minimun level of logging.')
parser.add_argument("--help",'-h', action='store_true',
help="Print help message and exit.")
#Runtype parsers
void_parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
runtypeparsers = void_parser.add_subparsers(dest='run_type')
permuteparser = runtypeparsers.add_parser('Permute', description='Make random permutations or shifts of input '
' and save the ranks of the real assocations compared to permutations.')
permuteparser.add_argument('-M','--mode', choices=['Candidate', 'Summary', 'TopScores'], help='Enrichment method.')
permuteparser.add_argument('--n_permut', default='auto', help='Number of permutations or shifts.')
#'Use this option for multiple runs '
# 'that are reduced in mode --M Reduce '
# 'afterwards.')
permuteparser.add_argument('--noinfo',action='store_true', help='Do not output additional info files such as '
'summary values per gene and do not output pvalues. '
'Use this option if you are running multiple permute '
'job that are reduced later.')
permuteparser.add_argument("--ncpus", '-nct',
type=int, default=mp.cpu_count(), help='Number of cpus to use in parallel.')
#permuteparser.add_argument('--reduce', action='store_true', help='Reduce on the fly. Use this option if you '
# 'just want to do a single job.')
reduceparser = runtypeparsers.add_parser('Reduce', description='Reduces the results from multiple Permute-runs '
' to a single table of enrichment pvalues.')
reduceparser.add_argument('--permuts', nargs='*',
help='Filenames of permutation results of individual runs. '
' (specified with --name in the permutation runs).')
reduceparser.add_argument('--remove_input', action='store_true',
help="Delete the input files given by --permuts .")
#reduceparser.add_argument('--pval_threshold', type=float, default=1,
# help='Do not report categories with p_value>pval_threshold. '
# '(Of course, they are still used to calculate the Benjamini-Hochberg '
# ' multiple testing correction.)')
#Mode parsers
void_parser1 = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
modeparsers = void_parser1.add_subparsers(dest='mode')
candidateparser = modeparsers.add_parser('Candidate', description='Test for enrichment of candidate features in feature categories.')
candidateparser.add_argument('--candidates', type=argparse.FileType('r'),
help='Sequence of candidate genes.')
summaryparser = modeparsers.add_parser('Summary', description='Test for enrichment by summarising values across features and '
'features across genes')
topscoresparser = modeparsers.add_parser('TopScores', description='Test for enrichment of features that are close to top scores '
'in feature categories.')
for p in [summaryparser, topscoresparser]:
p.add_argument("--rod",type=argparse.FileType('r'),
help="Input reference ordered data tsv containing the values per position.")
p.add_argument("--rod_cols", type=str, nargs=3, default = ['chrom','pos','value'],
help="Column labels or positions (0-indexed) "
"of [chrom pos value]. "
"Expects 3 integers or strings.")
p.add_argument("--features", type=argparse.FileType('r'),
help="Filename of the feature info tsv "
" with columns 'chrom' 'start' 'end' 'feature'.")
p.add_argument('--feature_cols',nargs=4, default = ['chrom', 'start', 'end', 'feature'],
help="Column labels or positions (0-indexed) of ['chrom' 'start' 'end' 'feature']. "
"Expects 4 integers or strings. "
"E.g., if the file is a bed with the "
"feature name in the 4th column, use --feature_cols 0 1 2 3")
p.add_argument('--min_features_per_cat', type=int, default=2, help="Minimum number of features found "
"in the features file in order not to exclude "
"a category from the testing. If categories are "
"excluded beforehand you can speed up calculations "
"by setting this to 0.")
#p.add_argument("--peaks_per_gene", type=argparse.FileType('w'),
# help="File to write peak info for each gene.")
#p.add_argument("--top_peaks", type=argparse.FileType('w'),
# help="File to write top peak info.")
summaryparser.add_argument('--feature_summary', default='max', choices = ['mean', 'median', 'min', 'max', 'sum'],
help='Function to apply to values across feature to get a single '
'value per feature.')
summaryparser.add_argument('--category_summary', default='mean', choices = ['mean', 'median', 'min', 'max', 'sum'],
help='Function to apply to feature values across category to get a single '
'value per category.')
summaryparser.add_argument('--chrom_len', type=argparse.FileType('r'), help="Tsv that provides with columns 'chrom' "
"'length'. Not required, but considerably "
"speeds up permutations.")
topscoresparser.add_argument('--top_type',choices=['count','threshold','quantile'],
help='How should the cutoff for top scores be determined?')
topscoresparser.add_argument('--top', type=float, help='Cutoff for top scores in units determined by top_type.')
g = topscoresparser.add_mutually_exclusive_group(required=False)
g.add_argument("--ascending",dest="ascending", action="store_true", help="Sort ascending, (e.g., for p-values).")
g.add_argument("--descending",dest="ascending", action="store_false", help="Sort descending, (e.g., for scores).")
g.set_defaults(ascending=False)
topscoresparser.add_argument('--max_dist', type=int, default=0,
help="Maximum distance (in bp) between feature and rod value "
"in order to consider the rod values for that feature.")
mode_classes = {'Candidate':CandidateEnrichment,'Summary':SummaryEnrichment,
'TopScores':TopScoresEnrichment}
def parse_cols(cols):
cols1 = []
for col in cols:
try:
cols1.append(int(col))
except ValueError:
cols1.append(col)
return cols1
args, unknown = parser.parse_known_args()
help_str = "\n"+parser.format_help()
if args.help:
logger.setLevel(logging.INFO)
if args.run_type == 'Permute':
permute_args, unknown = permuteparser.parse_known_args(unknown)
if permute_args.mode is not None:
modeparser = modeparsers.choices[permute_args.mode]
else:
permuteparser.error("argument --mode/-M is required")
mode_args, unknown = modeparser.parse_known_args(unknown)
help_str += "\n\n-----------Help for Permute step ------------\n\n" \
+ permuteparser.format_help()
help_str += "\n\n-----------Help for mode {} ------------\n\n".format(permute_args.mode) \
+ modeparser.format_help()
if args.run_type == 'Reduce':
reduce_args, unknown = reduceparser.parse_known_args(unknown)
help_str += "\n\n-----------Help for Reduce step ------------\n\n" \
+ reduceparser.format_help()
if args.help:
logging.info(help_str)
sys.exit(0)
if unknown:
logging.error("Unknown command line arguments: {}".format(unknown))
logging.info(help_str)
sys.exit(1)
if args.feature_to_category is None:
parser.error("argument --feature_to_category is required")
logger.setLevel(getattr(logging,args.logging_level))
logging.info("Loading feature to category mapping from {}.".format(args.feature_to_category.name))
feature_to_category_cols = parse_cols(args.feature_to_category_cols)
feature_to_category = pd.read_csv(args.feature_to_category,usecols=feature_to_category_cols,
sep=get_sep(args.feature_to_category.name))
feature_to_category = feature_to_category[feature_to_category_cols]
if args.category_to_description is not None:
logging.info("Loading category to description mapping from {}.".format(args.category_to_description.name))
ctd_cols = parse_cols(args.category_to_description_cols)
cat_to_desc = pd.read_csv(args.category_to_description,
usecols=ctd_cols,
quoting=csv.QUOTE_NONE,
sep=get_sep(args.category_to_description))
else:
cat_to_desc = None
#test whether name is writeable:
#we do not test all derived filenames, but this should be ok for most cases)
#try:
# open(args.name,'w')
#except Exception, e:
# logging.error("Cannot write to {}".format(args.name))
# raise
dir0 = os.path.dirname(os.path.realpath(args.name))
assert os.access(dir0, os.W_OK), \
"{} cannot be accessed for writing".format(dir0)
if args.run_type == 'Permute':
Enrichment = mode_classes[permute_args.mode]
mode_args = {arg:getattr(mode_args,arg) for arg in vars(mode_args)}
mode_args['ncpus'] = permute_args.ncpus
if permute_args.mode in ['Summary', 'TopScores']:
if mode_args['rod'] is None:
modeparsers.choices[permute_args.mode].error("argument --rod is required")
if mode_args['features'] is None:
modeparsers.choices[permute_args.mode].error("argument --features is required")
if permute_args.mode == 'Summary':
if mode_args['chrom_len'] is not None:
chrom_len = pd.read_csv(mode_args['chrom_len'],sep=get_sep(mode_args['chrom_len'].name), squeeze=True,index_col=0)
mode_args['chrom_len'] = chrom_len
logging.info("Loading features from {}.".format(mode_args['features'].name))
feature_cols = parse_cols(mode_args.pop('feature_cols'))
features_fh = mode_args.pop('features')
feature_df = pd.read_csv(features_fh, index_col=[0,1],
usecols=feature_cols, sep=get_sep(features_fh))
feature_df.index.set_names(['chrom','start'], inplace=True)
feature_df = feature_df[feature_cols[2:]]
feature_df.rename(columns={feature_cols[2]:'end'}, inplace=True)
if feature_df.columns[1] != feature_to_category.columns[0]:
logging.warning("Feature name in features and feature_to_category do not match. "
"They are {} and {}. Assuming that the feature identifiers in these "
"columns are the same and using the first name. "
"Make sure that these columns contain the same identifiers.".format(feature_df.columns[1],
feature_to_category.columns[0]))
feature_to_category.rename(columns={feature_to_category.columns[0]:feature_df.columns[1]}, inplace=True)
logging.info("Loading rod from {}.".format(mode_args['rod'].name))
rod_cols = parse_cols(mode_args.pop('rod_cols'))
rod_fh = mode_args.pop('rod')
rod_s = pd.read_csv(rod_fh, index_col=[0,1],
usecols=rod_cols, sep=get_sep(rod_fh), squeeze=True)#,
#header=False if type(rod_cols[0])==int else True)
rod_s.index.set_names(['chrom','pos'], inplace=True)
#remove inf values from input
rod_s.replace([np.inf, -np.inf], np.nan, inplace=True)
enrich = Enrichment(value_s=rod_s, feature_df=feature_df,
feature_to_category=feature_to_category, feature_name=feature_df.columns[1],
category_name=feature_to_category.columns[1], **mode_args)
elif permute_args.mode == 'Candidate':
if mode_args['candidates'] is None:
modeparsers.choices[permute_args.mode].error("argument --candidates is required")
candidate_features = []
for candidate in mode_args.pop('candidates'):
candidate_features.append(candidate.strip())
enrich = Enrichment(candidate_features,feature_to_category=feature_to_category,
feature_name=feature_to_category.columns[0],
category_name=feature_to_category.columns[1], **mode_args)
if not permute_args.noinfo:
save_info(enrich, args.name)
#enrich.create_info()
#enrich.summary_per_feature.to_csv(args.name+'.summary_per_feature.tsv',sep='\t')
#if permute_args.mode == 'TopScores':
# enrich.top_peaks.to_csv(args.name+'.top_peaks.tsv',sep='\t')
# enrich.peaks_per_gene.to_csv(args.name+'.peaks_per_gene.tsv',sep='\t')
if permute_args.n_permut == 'auto':
n_cats = len(enrich.feature_to_category[enrich.category_name].unique())
permute_args.n_permut = int(n_cats/0.02)
logging.info("Running {} permutations to get results informative "
"above multiple testing "
"for {} categories. Consider using more cores or "
"map/reduce implementation if this takes "
"too long.".format(permute_args.n_permut, n_cats))
else:
permute_args.n_permut = int(permute_args.n_permut)
if permute_args.n_permut>0:
start = time.time()
enrich.permute(permute_args.n_permut)
end = time.time()
delta = end - start
logging.info("{} permutations took {} seconds = {} minutes = {} hours.".format(permute_args.n_permut,
delta,delta/60.,delta/3600.))
if permute_args.noinfo:
enrich.rank_table.to_csv(args.name+'.ranktable.tsv', sep='\t', header=True)
else:
enrich.get_pvals(category_to_description=cat_to_desc).to_csv(args.name+'.pvals.tsv', sep='\t', header=True)
elif args.run_type == 'Reduce':
if reduce_args.permuts is None:
reduceparser.error("argument --permuts is required")
permut_fhs = open_reduce_fns(reduce_args.permuts)
tot_rank = reduce_fhs(permut_fhs)
p_val_df = rank_to_pval(tot_rank, feature_to_category, pval_threshold=1,
category_to_description=cat_to_desc)
p_val_df.index.name = 'category'
#make this as a function to also use it in a all-in-one run
try:
logging.info("Loading {}".format(args.name+".peaks_per_feature.tsv"))
cand_genes = np.unique(pd.read_csv(args.name+".peaks_per_feature.tsv",usecols=[0],sep='\t').values)
#CONTINUE HERE.....
gene_per_go_s = feature_to_category.set_index(feature_to_category_cols[0]).ix[cand_genes].groupby(feature_to_category_cols[1]).apply(lambda x: list(x.index))
gene_per_go_s.name = feature_to_category_cols[0]
p_val_df = pd.concat([p_val_df, gene_per_go_s], axis=1)
def check_len(el):
try:
return(len(el))
except TypeError:
return 0
#if not (p_val_df[feature_to_category_cols[0]].apply(check_len) == p_val_df["n_"+feature_to_category_cols[0]]).all():
# assert_df = p_val_df[["n_genes","genes"]]
# assert_df["len_genes"] = assert_df["genes"].apply(check_len)
# assert_df = assert_df[["n_genes","len_genes","genes"]]
# printv("Genes per category from",args.peaks_per_gene_fn,
# "inconsistent with n_genes reported in",permut_fhs[0].name,":",
# assert_df[assert_df["n_genes"]!=assert_df["len_genes"]])
except IOError, e:
logging.warning("Not adding feature names to pvalue file, no peaks_per_feature file found.")
#raise e
#logging.info(str(e))
#try:
# p_val_df = p_val_df['n_genes']
p_val_df.sort('p_value',inplace=True)
p_val_df.to_csv(args.name+'_{}.pvals.tsv'.format(p_val_df['out_of'].max()), sep='\t')
if reduce_args.remove_input:
for fh in permut_fhs:
os.remove(fh.name)
if __name__ == "__main__":
enrichme()
| mit |
kajksa/pyrtl433 | rtl433/rtl433.py | 1 | 15213 | import json
import numpy as np
from . import crtl433
# http://stackoverflow.com/questions/17479944/partitioning-an-float-array-into-similar-segments-clustering
# http://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-450-principles-of-digital-communications-i-fall-2006/lecture-notes/book_3.pdf
def lloyd_max(x, xq0, max_iter = 5):
"""Lloyd-Max algorithm"""
n = len(xq0)
xq = xq0 + 0
err_old = 1e10
iteration = 0
while True:
tq = (xq[:-1] + xq[1:])//2
# TODO: http://stackoverflow.com/questions/9444409/why-is-numpy-much-slower-than-matlab-on-a-digitize-example
b = np.digitize(x,tq)
err = np.var(x-xq[b])
print(err_old, err, iteration, xq)
if err_old==0 or np.abs(err-err_old)/err_old < 1e-3 or iteration>=max_iter:
return xq, b
err_old = err
for i in range(n): # Update levels
d = x[b == i]
if len(d)>0:
xq[i] = np.mean(d)
iteration += 1
def lloyd_max_bin(x, xq0, max_iter = 5):
"""Simplified Lloyd-Max for low, high data (two-levels)."""
assert len(xq0)==2
xq = xq0 + 0
N = len(x)
iteration = 0
b = np.zeros_like(x)
while True:
tq = (xq[0] + xq[1])//2
I = x>tq
b[:] = 0
b[I] = 1
if iteration>=max_iter:
return xq, b
s = np.sum(I)
if s!=0:
xq[1] = np.sum(x[I])//s
if s!=N:
xq[0] = np.sum(x[~I])//(N-s)
# TODO: when x[I] or x[~I] is empty? Should not happen when xq0 = [min, max], BUT expensive
# xq[0] = np.mean(x[~I])
# xq[1] = np.mean(x[I])
iteration += 1
def _calc_uint8_squares():
"""Lookup table of centered squared np.uint8 stored as np.uint16."""
s = np.empty(256, dtype=np.uint16)
for i in range(256):
s[i] = (127 - i) * (127 - i)
return s
# Lookup table
_uint8_squares = _calc_uint8_squares()
def square_uint8(d):
"""Find squares of centered uint8 data using look up table."""
assert d.dtype==np.uint8
return _uint8_squares[d[::2]] + _uint8_squares[d[1::2]]
def pulse_gap_widths(b):
"""From a sequnce of levels 0 and 1 levels return widths of constant levels."""
#ispulse = np.concatenate(([b[0]], np.equal(b, 1).view(np.int8), [0 if b[-1]==1 else 1]))
ispulse = np.empty(len(b) + 2, dtype=np.int8)
ispulse[0] = b[0]
ispulse[1:-1] = b
ispulse[-1] = 0 if b[-1]==1 else 1
absdiff = np.abs(np.diff(ispulse))
ranges = np.concatenate(([0],np.where(absdiff == 1)[0])) # TODO: remove concatenate
widths = np.diff(ranges)
return widths
def split_on_gaps(widths, start_value, threshold):
"""Split widths where widths[0] has start_value (0: low, gap, 1: high, pulse)."""
assert start_value in [0,1] # low, high
if start_value == 0:
gaps = widths[0::2]
pulses = widths[1::2]
else:
gaps = widths[1::2]
pulses = widths[0::2]
where_reset = np.where(gaps>threshold)[0]
pulsess = np.split(pulses,where_reset+start_value)
gapss = np.split(gaps,where_reset+1)
assert len(pulsess)==len(gapss)
return pulsess, gapss
def split_packet(pulses, gaps, reset_limit):
"""Simplified split_on_gaps, assuming starting on pulse and ending on gap."""
where_reset = np.where(gaps>reset_limit)[0]
pulsess = np.split(pulses,where_reset+1)
gapss = np.split(gaps,where_reset+1)
assert len(pulsess)==len(gapss)
return pulsess, gapss
# http://stackoverflow.com/questions/17479944/partitioning-an-float-array-into-similar-segments-clustering
def cluster_analyze(L, tolerance, max_clusters = 10):
"""Simple 1D cluster analysis."""
f = int(tolerance * 100) + 100 # 0.2 -> 120
if len(L)==0:
return [],0
Ls = np.sort(L) # Expensive
clusters = [0]
current = Ls[0]
for i, l in enumerate(Ls):
if l > (f * current) // 100: # L is int
clusters.append(None)
clusters[-1] = i + 1
current = l
if len(clusters) == max_clusters + 1:
break
num_clustered = i + 1 # Number of points clustered
clustdata = np.split(Ls, clusters[:-1])
if len(clusters) == max_clusters + 1:
num_clustered = i
clustdata = clustdata[:-1]
assert len(clustdata)<=max_clusters
return clustdata, num_clustered
def cluster_median(c):
"""Find median off sorted sequence c."""
Nc = len(c)
if Nc % 2 == 0:
m = Nc // 2
else:
m = (Nc - 1) // 2
return c[m]
def pack_bytes(bits_bool):
"""Pack array of bools as bytes (np.uint8)."""
bits_bool = bits_bool.astype(int)
bytes = np.packbits(bits_bool)
return bytes
class RFSignal:
def __init__(self, num_samples):
if num_samples % 2 != 0:
raise ValueError("Not even number of samples.")
self.num_samples = num_samples
self.num_bytes = self.num_samples
# TODO
# Numerator (b) and denominator (a) polynomials of the IIR filter
#b, a = scipy.signal.butter(1, 0.05) # Digital filter
b = [ 0.07295966, 0.07295966]
a = [ 1., -0.85408069] # Notice the minus! Needed in fixed point lowpass stuff
self.bi,self.ai = crtl433.lowpass_params(b,a)
#
# Pre allocate
#
# Squared signal
self.squared = np.empty(self.num_samples//2, dtype = np.uint16)
# Low pass filtered signal
self.signal = np.empty(self.num_samples//2, dtype = np.uint16)
def _square(self):
crtl433.square_uint8(self.data, self.squared)
def _lowpass(self):
crtl433.lowpass_uint16(self.bi, self.ai, self.squared, self.signal, state=np.array([self.squared[0], self.squared[0]]))
def _quantize(self):
# TODO! This is slow!
#xq0 = np.array([np.min(self.signal), np.max(self.signal)]) # How fast is this? Many reductions!
xq0 = np.array([0, np.max(self.signal)], dtype = np.uint16) # Faster and more robust
#self.levels, self.level_index = lloyd_max_bin(self.signal, xq0=xq0, max_iter=1)
self.levels, self.level_index = crtl433.lloyd_max_bin(self.signal, xq0=xq0, max_iter=1)
self.start_low = True if self.level_index[0]==0 else False
self.end_low = True if self.level_index[-1]==0 else False
def _pulses_gaps(self):
# Find widths of gaps and pulse
self.widths = crtl433.pulse_gap_widths(self.level_index)
# Always start on a pulse and end on a gap
if self.start_low:
self.widths = self.widths[1:]
if not self.end_low:
self.widths = self.widths[:-1]
assert len(self.widths) % 2 == 0
self.pulses = self.widths[0::2]
self.gaps = self.widths[1::2]
self.num_pulses = len(self.pulses)
def process(self, data):
self.data = data
assert len(data) == self.num_bytes
assert data.dtype == np.uint8
# Square data
self._square()
# Lowpass filter squared
self._lowpass()
# Quantize signal
self._quantize()
# Find pulses and gaps
self._pulses_gaps()
@property
def quantized(self):
return self.levels[self.level_index]
def analyze(self, max_clusters=16, tolerance=0.2, verbose=True):
periods = self.pulses + self.gaps
# Cluster widths of pulses, gaps and periods
cpulses, _ = cluster_analyze(self.pulses, tolerance = tolerance, max_clusters=max_clusters)
cgaps, _ = cluster_analyze(self.gaps, tolerance = tolerance, max_clusters=max_clusters)
cperiods, _ = cluster_analyze(periods, tolerance = tolerance, max_clusters=max_clusters)
if verbose:
print("Analyzing pulses...")
print("Total count: {}, width: {}".format(self.num_pulses, np.sum(periods)))
def _print(cc):
for clusterid,c in enumerate(cc):
data = {'clusterid': clusterid, 'count': len(c), 'width': cluster_median(c), 'min': c[0], 'max': c[-1]}
print("[{clusterid}] count: {count:>4}, width: {width} [{min};{max}]".format(**data))
print("Pulse width distribution:")
_print(cpulses)
print("Gap width distribution:")
_print(cgaps)
print("Pulse period distribution:")
_print(cperiods)
print()
print("Guessing modulation:")
if len(cpulses)==1:
print("Pulse Position Modulation with fixed pulse width")
elif len(cperiods)==2:
print("Pulse Width Modulation with fixed period")
else:
print("No clue...")
return cpulses, cgaps, cperiods
def bytes2str(bs):
"""Convert iterable of bytes to hex integer string for printing."""
ret = ""
for x in bs:
ret += "{:02x} ".format(x)
return ret[:-1]
def boolbit2str(boolbit):
"""Convert boolean array to bit string for printing."""
ret = ""
zero = "0"
one = "1"
for i in range(len(boolbit)//8):
byte = boolbit[i*8:(i+1)*8]
for b in byte:
if b:
ret += one
else:
ret += zero
ret += " "
byte = boolbit[8*(len(boolbit)//8):]
for b in byte:
if b:
ret += one
else:
ret += zero
return ret
#
# DEMODULATE
#
# Working with nibbles
#define HI_NIBBLE(b) (((b) >> 4) & 0x0F)
#define LO_NIBBLE(b) ((b) & 0x0F)
# https://wiki.python.org/moin/BitManipulation
def manchester(bits):
"""Manchester decoding."""
if len(bits) % 2!= 0 or np.any(bits[::2]==bits[1::2]):
raise ValueError("Not valid data for Manchester decoding.")
return bits[::2]
class Demodulate:
def _print(self, i, bytes, boolbit):
# Print
sb = bytes2str(bytes)
sbb = boolbit2str(boolbit)
str_pulse_analyze = "[{:02}] {{{:02}}} : {}: {}".format(i, len(boolbit), sb, sbb)
if len(str_pulse_analyze)>80:
s = str_pulse_analyze[:76] + " ..."
assert len(s)==80
print(s)
else:
print(str_pulse_analyze)
def _print_data(self, i, data):
if data:
json_data = json.dumps(data)
print("[{:02}] : {}".format(i, json_data))
def print(self):
for i, (boolbit, bytes, pdata) in enumerate(zip(self.boolbits, self.bytess, self.data)):
if not np.all(boolbit==False) and len(boolbit)>2:
self._print(i, bytes, boolbit)
self._print_data(i, pdata)
# if not np.all(boolbit==False):
# self._print(i, bytes, boolbit)
# self._print_data(i, pdata)
def _split_packet(self, rf):
pulsess, gapss = split_packet(rf.pulses, rf.gaps, self.reset_limit)
return pulsess, gapss
class ChuangoDemodulate(Demodulate):
def __init__(self):
self.name = "Chuango"
self.reset_limit = 3800
self.short_limit = 200
# Zeros
#self.short_pulse_minlen = 120
self.short_pulse_maxlen = 180
# Ones
self.long_pulse_minlen = 380
#self.pulse_maxlen = 160
def __call__(self, rf):
self.boolbits = []
self.bytess = []
self.data = []
# Pulses/gaps to bool bits
pulsess, gapss = self._split_packet(rf)
for pulses,gaps in zip(pulsess, gapss):
if len(pulses)>0:
# Some demodulation! Converting some series of pulses and gaps to raw bytes
#boolbit = pulses<200 # More robust?
#self.boolbits.append(boolbit)
boolbit = (pulses<self.short_pulse_maxlen)
boolbit = ~boolbit # Long pulses are 1's
boolbit2 = (pulses>self.long_pulse_minlen)
if np.all(boolbit==boolbit2):
self.boolbits.append(boolbit)
# else:
# print(boolbit, boolbit2)
# Bool bits to bytes
for b in self.boolbits:
self.bytess.append(pack_bytes(b))
# Decode bytes to data
for i, (boolbit, bytes) in enumerate(zip(self.boolbits, self.bytess)):
pdata = {}
# 25 bits, always ending on a short pulse, and not all device id bits equal
if len(boolbit)==25 and not boolbit[-1] and not np.all(boolbit[0]==boolbit[0:20]):
pdata["device_id"] = int((bytes[0] << 12) | (bytes[1] << 4) | (bytes[2] >> 4))
pdata["cmd_id"] = int(bytes[2] & 0x0F)
pdata["product"] = self.name
self.data.append(pdata)
return self.data
class ProoveDemodulate(Demodulate):
def __init__(self):
self.name = "Proove"
self.reset_limit = 2400
self.short_limit = 100
def __call__(self, rf):
self.boolbits = []
self.bytess = []
self.data = []
# Pulses/gaps to bool bits
pulsess, gapss = self._split_packet(rf)
for pulses,gaps in zip(pulsess, gapss):
if len(pulses)>0:
# Some demodulation! Converting some series of pulses and gaps to raw bytes
boolbit = gaps<self.short_limit
try:
boolbit = manchester(boolbit[1:-1])
self.boolbits.append(boolbit)
except ValueError:
continue
# Bool bits to bytes
for b in self.boolbits:
self.bytess.append(pack_bytes(b))
# Decode bytes to data
for i, (boolbit, bytes) in enumerate(zip(self.boolbits, self.bytess)):
pdata = {}
if len(boolbit)==32:
pdata["id"] = int((bytes[0] << 18) | (bytes[1] << 10) | (bytes[2] << 2) | (bytes[3]>>6)) # ID 26 bits
pdata["group"] = int((bytes[3] >> 5) & 1)
pdata["state"] = "OFF" if ((bytes[3] >> 4) & 1)==1 else "ON"
pdata["channel"] = int((bytes[3] >> 2) & 0x03)
pdata["unit"] = int((bytes[3] & 0x03))
pdata["product"] = self.name
self.data.append(pdata)
return self.data
# class OregonDemodulate(Demodulate):
# def __init__(self):
# self.reset_limit = 2400
# self.short_limit = 130
if __name__ == "__main__":
import matplotlib.pylab as plt
N = 100
x = np.zeros(N)
x[1:5] = 1
x[10:20] = 1
x[40:50] = 1
x[55:56] = 1.2
x[90:100] = 1
x = x + 0.2*(np.random.random(N)-0.5)
# Convert signal to uint8
x = x * 255
x[x>255] = 255
x[x<0] = 0
x = np.asarray(x, dtype=np.uint8)
xq0 = np.array([10,11], dtype=np.uint8)
#xq0 = None
max_iter = 10
xq, b = lloyd_max_bin(x,xq0=xq0, max_iter=max_iter)
print("Result: levels = ", xq)
print("Result: bits = ", b)
plt.figure("result")
plt.plot(x)
plt.plot(xq[b], "o")
plt.show()
| gpl-3.0 |
briney/abtools | abtools/_finder.py | 1 | 25595 | #!/usr/bin/python
# filename: _finder.py
#
# Copyright (c) 2015 Bryan Briney
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
import multiprocessing as mp
import platform
import os
import subprocess as sp
import sys
import tempfile
from threading import Thread
import time
import numpy as np
import pandas as pd
from pymongo import MongoClient
from Bio import SeqIO
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from abtools import color, log, mongodb
from abtools.utils import progbar
def parse_args():
import argparse
parser = argparse.ArgumentParser("For a MongoDB collection, plots the germline divergence against the sequence identity to a given 'subject' sequence.")
parser.add_argument('-d', '--database', dest='db', required=True,
help="Name of the MongoDB database to query. Required.")
parser.add_argument('-c', '--collection', dest='collection', default=None,
help="Name of the MongoDB collection to query. \
If not provided, all collections in the given database will be processed iteratively.")
parser.add_argument('--collection-prefix', dest='collection_prefix', default=None,
help="If supplied, will iteratively process only collections beginning with <collection_prefix>.")
parser.add_argument('-o', '--output', dest='output_dir', default=None,
help="Output directory figure files. If not provided, figures will not be generated. \
Directory will be created if it does not already exist.")
parser.add_argument('-t', '--temp', dest='temp_dir', required=True,
help="Directory for temporary storage. \
Will be created if it does not already exist. Required.")
parser.add_argument('-l', '--log', dest='log', default=None,
help="The log file, to which the blast_parse log info will be written. \
Default is <output>/abfinder.log.")
parser.add_argument('-C', '--cluster', dest="cluster", default=False, action='store_true',
help="Use if performing computation on a Celery cluster. \
If set, input files will be split into many subfiles and passed to a Celery queue. \
If not set, input files will still be split, \
but will be distributed to local processors using multiprocessing.")
parser.add_argument('-i', '--ip', dest='ip', default='localhost',
help="The IP address for the MongoDB server. \
Defaults to 'localhost'.")
parser.add_argument('--port', dest='port', default=27017,
help="The port for the MongoDB server. Defaults to '27017'.")
parser.add_argument('-u', '--user', dest='user', default=None,
help="Username for the MongoDB server. Not used if not provided.")
parser.add_argument('-p', '--password', dest='password', default=None,
help="Password for the MongoDB server. Not used if not provided.")
parser.add_argument('-s', '--standard', dest='standard', required=True,
help='Path to a file containing the standard sequence(s) for which \
identity/divergence will be calculated, in FASTA format. \
All sequences in the standard file will iteratively processed. Required')
parser.add_argument('-q', '--chain', dest='chain', default='heavy',
choices=['heavy', 'kappa', 'lambda', 'light'],
help="The chain type of the subject sequence. \
Options are 'heavy', 'kappa', 'lambda' and 'light'. \
Default is 'heavy'.")
parser.add_argument('-n', '--no_update', dest='update', action='store_false', default=True,
help="Does not update the MongoDB with AbFinder info. \
Can save some time if the identity calculations aren't needed again.")
parser.add_argument('--no_figure', dest='make_figure', action='store_false', default=True,
help="Does not make the identity/divergence figure. \
Useful if you don't want the figure, just the identity info written to the database.")
parser.add_argument('--single-process-update', dest='single_process_update', action='store_true', default=False,
help="Perform the MongoDB update using a single process (without multiprocessing).")
parser.add_argument('--update-threads', dest='update_threads', type=int, default=25,
help="Number of threads to use when update the MongoDB database. Default is 25.")
parser.add_argument('-N', '--nucleotide', dest='is_aa', action='store_false', default=True,
help="Use nucleotide sequences for alignment. Default is amino acid sequences. \
Ensure standard format matches.")
parser.add_argument('-x', '--xmin', dest='x_min', type=int, default=-1,
help="Minimum X-axis (germline divergence) value for the AbCompare plot. Default is -1.")
parser.add_argument('-X', '--xmax', dest='x_max', type=int, default=35,
help="Maximum X-axis (germline divergence) value for the AbCompare plot. Default is 35.")
parser.add_argument('-y', '--ymin', dest='y_min', type=int, default=65,
help="Minimum Y-axis (mAb identity) value for the AbCompare plot. Default is 65.")
parser.add_argument('-Y', '--ymax', dest='y_max', type=int, default=101,
help="Maximum Y-axis (mAb identity) value for the AbCompare plot. Default is 101.")
parser.add_argument('-g', '--gridsize', dest='gridsize', type=int, default=0,
help="Gridsize for the AbFinder hexbin plot. \
Default is 36 for amino acid sequences and 50 for nucleotide sequences.")
parser.add_argument('--colormap', dest='colormap', default='Blues',
help="Colormap to be used in the AbFinder hexbin plots. \
Can accept a matplotlib cmap or the name of one of matplotlib's builtin cmaps. \
Default is 'Blues'.")
parser.add_argument('--mincount', dest='mincount', default=3, type=int,
help="Minimum number of sequences in a hexbin for that hexbin to be colored. \
Default is 3.")
parser.add_argument('--skip-padding', dest='remove_padding', default=True, action='store_false',
help="If set, will not remove padding field from MongoDB.")
parser.add_argument('-D', '--debug', dest="debug", action='store_true', default=False,
help="If set, will write all failed/exception sequences to file \
and should give more informative errors.")
return parser
class Args(object):
def __init__(self, db=None, collection=None,
output=None, temp=None, log=None, cluster=False,
ip='localhost', port=27017, user=None, password=None, update=True,
standard=None, chain='heavy', is_aa=True,
x_min=-1, x_max=35, y_min=65, y_max=101, gridsize=0, mincount=3,
colormap='Blues', debug=False):
super(Args, self).__init__()
if not all([db, output, temp, standard]):
err = 'You must provide a MongoDB database name, output and temp directories, \
and a file containing one or more comparison (standard) sequences in FASTA format.'
raise RuntimeError(err)
self.db = db
self.collection = collection
self.output_dir = output
self.temp_dir = temp
self.log = log
self.cluster = bool(cluster)
self.ip = ip
self.port = int(port)
self.user = user
self.password = password
self.standard = standard
if chain not in ['heavy', 'kappa', 'lambda', 'light']:
err = 'Please select an appropriate chain. \
Valid choices are: heavy, light, kappa and lambda.'
raise RuntimeError(err)
self.chain = chain
self.update = bool(update)
self.is_aa = bool(is_aa)
self.x_min = int(x_min)
self.x_max = int(x_max)
self.y_min = int(y_min)
self.y_max = int(y_max)
self.gridsize = int(gridsize)
mincount = int(mincount)
self.colormap = colormap
self.debug = bool(debug)
# ================================================
#
# FILES AND DIRECTORIES
#
# ================================================
def make_directories(args):
for d in [args.output_dir, args.temp_dir]:
if d:
_make_direc(d, args)
def _make_direc(d, args):
if not os.path.exists(d):
os.makedirs(d)
if args.cluster:
cmd = 'sudo chmod 777 {}'.format(d)
p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
def get_standards(args):
standards = []
for s in SeqIO.parse(open(args.standard, 'r'), 'fasta'):
standards.append(s)
return standards
def get_chain(args):
if args.chain == 'light':
return ['kappa', 'lambda']
return [args.chain, ]
def get_sequences(db, collection, temp_dir, args):
files = []
fastas = []
chunksize = 1000
seq_counter = 0
total_seq_counter = 0
query_results = query(db, collection, args)
iden_field = 'aa_identity' if args.is_aa else 'nt_identity'
vdj_field = 'vdj_aa' if args.is_aa else 'vdj_nt'
for seq in query_results:
fastas.append('>{}_{}\n{}'.format(seq['seq_id'], seq[iden_field]['v'], seq[vdj_field]))
seq_counter += 1
total_seq_counter += 1
if seq_counter == chunksize:
files.append(write_to_temp_file(fastas, temp_dir))
fastas = []
seq_counter = 0
if fastas:
files.append(write_to_temp_file(fastas, temp_dir))
return files
def write_to_temp_file(fastas, temp_dir):
tfile = tempfile.NamedTemporaryFile(dir=temp_dir, delete=False)
tfile.write('\n'.join(fastas))
tfile.close()
return tfile.name
def clean_up(files):
for f in files:
os.unlink(f)
# ================================================
#
# MONGO
#
# ================================================
def query(db, collection, args):
coll = db[collection]
chain = get_chain(args)
mongodb.index(db, collection, ['chain'])
print_query_info()
iden_field = 'aa_identity.v' if args.is_aa else 'nt_identity.v'
vdj_field = 'vdj_aa' if args.is_aa else 'vdj_nt'
return coll.find({'chain': {'$in': chain}, 'prod': 'yes'}, {'_id': 0, 'seq_id': 1, iden_field: 1, vdj_field: 1})
def chunker(l, n):
'Generator that produces n-length chunks from iterable l.'
for i in xrange(0, len(l), n):
yield l[i:i + n]
def update_db(db, standard, scores, collection, args):
db = mongodb.get_db(args.db, args.ip, args.port, args.user, args.password)
print_index_info()
mongodb.index(db, collection, ['seq_id'])
print_update_info()
start = time.time()
conn = mongodb.get_connection(args.ip, args.port,
args.user, args.password)
mongo_version = conn.server_info()['version']
standard = standard.replace('.', '_')
g = scores.groupby('identity')
groups = regroup(g.groups)
for g in range(0, len(groups), args.update_threads):
tlist = []
for group in groups[g:g + args.update_threads]:
t = Thread(target=update, args=(db, collection, group, standard, mongo_version, args))
t.start()
tlist.append(t)
for t in tlist:
t.join()
progbar.progress_bar(g + args.update_threads, len(groups))
# if platform.system().lower() == 'darwin' or args.debug or args.single_process_update:
# for i, group in enumerate(groups):
# update(db, collection, group, standard, mongo_version, args)
# progbar.progress_bar(i, len(groups))
# else:
# p = mp.Pool(processes=25)
# async_results = []
# for group in groups:
# async_results.append(p.apply_async(update, args=(db, collection, group, standard, mongo_version, args)))
# monitor_update(async_results)
# p.close()
# p.join()
print('')
run_time = time.time() - start
logger.info('Updating took {} seconds. ({} sequences per second)'.format(round(run_time, 2),
round(len(scores) / run_time, 1)))
def update(db, collection, data, standard, version, args):
db = mongodb.get_db(args.db, args.ip, args.port, args.user, args.password)
coll = db[collection]
score = data[0]
ids = data[1]
mab_id_field = 'mab_identity_aa' if args.is_aa else 'mab_identity_nt'
if int(version.split('.')[0]) < 3:
result = coll.update({'seq_id': {'$in': ids}},
{'$set': {'{}.{}'.format(mab_id_field, standard.lower()): float(score)}},
multi=True)
else:
result = coll.update_many({'seq_id': {'$in': ids}},
{'$set': {'{}.{}'.format(mab_id_field, standard.lower()): float(score)}})
if args.debug:
print('matched: {}'.format(result.matched_count))
print('modified: {}'.format(result.modified_count))
def monitor_update(results):
finished = 0
jobs = len(results)
while finished < jobs:
time.sleep(1)
finished = len([r for r in results if r.ready()])
progbar.progress_bar(finished, jobs)
progbar.progress_bar(finished, jobs)
def regroup(oldgs):
newgs = []
for og in oldgs:
if len(oldgs[og]) <= 500:
newgs.append((og, oldgs[og]))
else:
for ng in chunker(oldgs[og], 500):
newgs.append((og, ng))
return newgs
# ================================================
#
# FIGURES
#
# ================================================
def make_figure(standard_id, scores, collection, args):
print_fig_info()
sns.set_style('white')
fig_file = os.path.join(args.output_dir, '{0}_{1}_{2}.pdf'.format(args.db, collection, standard_id))
x = list(scores['germ_divergence'].values)
y = list(scores['identity'].values)
xy_vals = zip(x, y)
trunc_xy_vals = [v for v in xy_vals if v[0] <= args.x_max and v[1] >= args.y_min]
x = [v[0] for v in trunc_xy_vals]
y = [v[1] for v in trunc_xy_vals]
# To make sure the gridsize is correct (since it's based on the actual values)
# I need to add a single value near the max and min of each axis.
# They're added just outside the visible plot, so there's no effect on the plot.
x.extend([args.x_min - 1, args.x_max + 1])
y.extend([args.y_min - 1, args.y_max + 1])
# plot params
cmap = color.get_cmap(args.colormap)
plt.subplots_adjust(hspace=0.95)
plt.subplot(111)
plt.hexbin(x, y, bins='log', cmap=cmap, mincnt=3, gridsize=set_gridsize(args))
plt.title(standard_id, fontsize=18)
# set and label axes
plt.axis([args.x_min, args.x_max, args.y_min, args.y_max])
plt.xlabel('Germline divergence')
plt.ylabel('{0} identity'.format(standard_id))
# make and label the colorbar
cb = plt.colorbar()
cb.set_label('Sequence count (log10)', labelpad=10)
# save figure and close
plt.savefig(fig_file)
plt.close()
def set_gridsize(args):
if args.gridsize:
return args.gridsize
elif args.is_aa:
return 36
return 50
# ================================================
#
# PRINTING
#
# ================================================
def print_abfinder_start():
logger.info('')
logger.info('')
logger.info('')
logger.info('-' * 25)
logger.info('ABFINDER')
logger.info('-' * 25)
def print_standards_info(standards):
logger.info('')
logger.info('Found {} standard sequence(s):'.format(len(standards)))
logger.info(', '.join([s.id for s in standards]))
def print_collections_info(collections):
logger.info('')
logger.info('Found {} collection(s):'.format(len(collections)))
logger.info(', '.join(collections))
def print_single_standard(standard):
standard_id_string = '{}'.format(standard.id)
logger.info('')
logger.info(standard_id_string)
logger.info('-' * len(standard_id_string))
def print_single_collection(collection):
logger.info('')
logger.info('')
logger.info(collection)
logger.info('-' * len(collection))
def print_query_info():
logger.info('Querying for comparison sequences...')
def print_remove_padding():
logger.info('')
logger.info('Removing MongoDB padding...')
def print_fig_info():
logger.info('Making identity/divergence figure...')
def print_index_info():
logger.info('Indexing the MongoDB collection...')
def print_update_info():
logger.info('Updating the MongoDB database with identity scores:')
# ================================================
#
# IDENTITY JOBS
#
# ================================================
def run_jobs(files, standard, args):
logger.info('Running AbCompare...')
if args.cluster:
return _run_jobs_via_celery(files, standard, args)
else:
return _run_jobs_via_multiprocessing(files, standard, args)
def _run_jobs_via_multiprocessing(files, standard, args):
from abtools.queue.tasks import identity
results = []
if args.debug:
for f in files:
results.extend(identity(f, standard, args.is_aa, args.debug))
else:
p = mp.Pool()
async_results = []
for f in files:
async_results.append(p.apply_async(identity, (f, standard, args.is_aa)))
monitor_mp_jobs(async_results)
for a in async_results:
results.extend(a.get())
p.close()
p.join()
ids = [r[0] for r in results]
identities = pd.Series([r[1] for r in results], index=ids)
divergences = pd.Series([100. - r[2] for r in results], index=ids)
d = {'identity': identities, 'germ_divergence': divergences}
df = pd.DataFrame(d)
return df
def monitor_mp_jobs(results):
finished = 0
jobs = len(results)
while finished < jobs:
time.sleep(1)
ready = [ar for ar in results if ar.ready()]
finished = len(ready)
update_progress(finished, jobs)
print('')
def _run_jobs_via_celery(files, standard, args):
from abtools.queue.tasks import identity
async_results = []
for f in files:
async_results.append(identity.delay(f, standard, args.is_aa))
succeeded, failed = monitor_celery_jobs(async_results)
scores = []
for s in succeeded:
scores.extend(s.get())
ids = [r[0] for r in scores]
identities = pd.Series([r[1] for r in scores], index=ids)
divergences = pd.Series([r[2] for r in scores], index=ids)
d = {'identity': identities, 'germ_divergence': divergences}
df = pd.DataFrame(d)
return df
def monitor_celery_jobs(results):
finished = 0
jobs = len(results)
while finished < jobs:
time.sleep(1)
succeeded = [ar for ar in results if ar.successful()]
failed = [ar for ar in results if ar.failed()]
finished = len(succeeded) + len(failed)
update_progress(finished, jobs, failed=len(failed))
print('')
return succeeded, failed
def update_progress(finished, jobs):
pct = int(100. * finished / jobs)
ticks = pct / 2
spaces = 50 - ticks
prog_bar = '\r({}/{}) |{}{}| {}%'.format(finished, jobs, '|' * ticks, ' ' * spaces, pct)
sys.stdout.write(prog_bar)
sys.stdout.flush()
def run(**kwargs):
'''
Mines NGS datasets for identity to known antibody sequences.
All of ``db``, ``output``, ``temp`` and ``standard`` are required.
Args:
db (str): Name of a MongoDB database to query.
collection (str): Name of a MongoDB collection. If not provided, all collections
in ``db`` will be processed iteratively.
output_dir (str): Path to the output directory, into which identity/divergence
figures will be deposited.
temp_dir (str): Path to a temporary directory.
log (str): Path to a log file. If not provided, log information will not be retained.
ip (str): IP address of the MongoDB server. Default is ``localhost``.
port (str): Port of the MongoDB server. Default is ``27017``.
user (str): Username with which to connect to the MongoDB database. If either
of ``user`` or ``password`` is not provided, the connection to the MongoDB
database will be attempted without authentication.
password (str): Password with which to connect to the MongoDB database. If either
of ``user`` or ``password`` is not provided, the connection to the MongoDB
database will be attempted without authentication.
standard (path): Path to a FASTA-formatted file containing one or more 'standard'
sequences, against which the NGS sequences will be compared.
chain (str): Antibody chain. Choices are 'heavy', 'kappa', 'lambda', and 'light'.
Default is 'heavy'. Only NGS sequences matching ``chain`` (with 'light' covering
both 'kappa' and 'lambda') will be compared to the ``standard`` sequences.
update (bool): If ``True``, the MongoDB record for each NGS sequence will be updated
with identity information for each standard. If ``False``, the updated is skipped.
Default is ``True``.
is_aa (bool): If ``True``, the ``standard`` sequences are amino acid sequences. If
``False``, they are nucleotide seqeunces. Default is ``False``.
x_min (int): Minimum x-axis value on identity/divergence plots.
x_max (int): Maximum x-axis value on identity/divergence plots.
y_min (int): Minimum y-axis value on identity/divergence plots.
y_max (int): Maximum y-axis value on identity/divergence plots.
gridsize (int): Relative size of hexbin grids.
mincount (int): Minimum number of sequences in a hexbin for the bin to be colored.
Default is 3.
colormap (str, colormap): Colormap to be used for identity/divergence plots.
Default is ``Blues``.
debug (bool): If ``True``, more verbose logging.
'''
args = Args(**kwargs)
global logger
logger = log.get_logger('abfinder')
main(args)
def run_standalone(args):
logfile = args.log if args.log else os.path.join(args.output_dir, 'abfinder.log')
log.setup_logging(logfile)
global logger
logger = log.get_logger('abfinder')
main(args)
def main(args):
print_abfinder_start()
db = mongodb.get_db(args.db, args.ip, args.port,
args.user, args.password)
make_directories(args)
standards = get_standards(args)
print_standards_info(standards)
collections = mongodb.get_collections(db, args.collection, prefix=args.collection_prefix)
print_collections_info(collections)
for collection in collections:
indexed = False
print_single_collection(collection)
if args.remove_padding:
print_remove_padding()
mongodb.remove_padding(db, collection)
seq_files = get_sequences(db, collection, args.temp_dir, args)
for standard in standards:
print_single_standard(standard)
scores = run_jobs(seq_files, standard, args)
if args.output_dir:
make_figure(standard.id, scores, collection, args)
if args.update:
if not indexed:
mongodb.index(db, collection, 'seq_id')
indexed = True
update_db(db, standard.id, scores, collection, args)
clean_up(seq_files)
if __name__ == '__main__':
parser = parse_args()
args = parser.parse_args()
logfile = args.log if args.log else os.path.join(args.output_dir, 'abfinder.log')
log.setup_logging(logfile)
logger = log.get_logger('abfinder')
main(args)
| mit |
blab/antibody-response-pulse | bcell-array/code/VBMG_vaccination_1st-Copy1.py | 2 | 14679 |
# coding: utf-8
# # Antibody Response Pulse
# https://github.com/blab/antibody-response-pulse/
#
# ### B-cells evolution --- cross-reactive antibody response after influenza virus infection or vaccination
# ### Adaptive immune response for sequential vaccination
# In[1]:
'''
author: Alvason Zhenhua Li
date: 04/09/2015
'''
get_ipython().magic(u'matplotlib inline')
import numpy as np
import matplotlib.pyplot as plt
import os
from matplotlib.ticker import FuncFormatter
AlvaFontSize = 23
AlvaFigSize = (9, 6)
numberingFig = 0
# plotting
dir_path = '/Users/al/Desktop/GitHub/antibody-response-pulse/bcell-array/figure'
file_name = 'Vaccine-Bcell-IgM-IgG'
#figure_name = '-equation'
#file_suffix = '.png'
#save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize=(12, 5))
plt.axis('off')
plt.title(r'$ Vaccine-Bcell-IgM-IgG \ equations \ (antibody-response \ for \ sequential-vaccination) $'
, fontsize = AlvaFontSize)
plt.text(0, 7.0/9, r'$ \frac{\partial V_n(t)}{\partial t} = +\xi_{v}V_{n}(t)(1 - \frac{V_n(t)}{V_{max}}) - \phi_{m} M_{n}(t) V_{n}(t) - \phi_{g} G_{n}(t) V_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 5.0/9, r'$ \frac{\partial B_n(t)}{\partial t} = +\xi_{b}V_{n}(t)(1 - \frac{V_n(t)}{V_{max}}) + (\beta_{m} + \beta_{g}) V_{n}(t) B_{n}(t) - \mu_{b} B_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 3.0/9,r'$ \frac{\partial M_n(t)}{\partial t} = +\xi_{m} B_{n}(t) - \phi_{m} M_{n}(t) V_{n}(t) - \mu_{m} M_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 1.0/9,r'$ \frac{\partial G_n(t)}{\partial t} = +\xi_{g} B_{n}(t) - \phi_{g} G_{n}(t) V_{n}(t) - \mu_{g} G_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
#plt.savefig(save_figure, dpi = 100)
plt.show()
# define the V-B-M-G partial differential equations
def dVdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dV_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
for xn in range(x_totalPoint):
dV_dt_array[xn] = +inRateV*V[xn]*(1 - V[xn]/maxV) - killRateVm*M[xn]*V[xn] - killRateVg*G[xn]*V[xn]
return(dV_dt_array)
def dBdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dB_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
for xn in range(x_totalPoint):
dB_dt_array[xn] = +inRateB*V[xn]*(1 - V[xn]/maxV) + (actRateBm + actRateBg)*B[xn]*V[xn] - outRateB*B[xn]
return(dB_dt_array)
def dMdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dM_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
for xn in range(x_totalPoint):
dM_dt_array[xn] = +inRateM*B[xn] - consumeRateM*M[xn]*V[xn] - outRateM*M[xn]
return(dM_dt_array)
def dGdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dG_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
for xn in range(x_totalPoint):
dG_dt_array[xn] = +inRateG*B[xn] - consumeRateG*G[xn]*V[xn] - outRateG*G[xn]
return(dG_dt_array)
# define RK4 for an array (3, n) of coupled differential equations
def AlvaRungeKutta4ArrayXT(pde_array, startingOut_Value, minX_In, maxX_In, totalGPoint_X, minT_In, maxT_In, totalGPoint_T):
# primary size of pde equations
outWay = pde_array.shape[0]
# initialize the whole memory-space for output and input
inWay = 1; # one layer is enough for storing "x" and "t" (only two list of variable)
# define the first part of array as output memory-space
gridOutIn_array = np.zeros([outWay + inWay, totalGPoint_X, totalGPoint_T])
# loading starting output values
for i in range(outWay):
gridOutIn_array[i, :, :] = startingOut_Value[i, :, :]
# griding input X value
gridingInput_X = np.linspace(minX_In, maxX_In, num = totalGPoint_X, retstep = True)
# loading input values to (define the final array as input memory-space)
gridOutIn_array[-inWay, :, 0] = gridingInput_X[0]
# step-size (increment of input X)
dx = gridingInput_X[1]
# griding input T value
gridingInput_T = np.linspace(minT_In, maxT_In, num = totalGPoint_T, retstep = True)
# loading input values to (define the final array as input memory-space)
gridOutIn_array[-inWay, 0, :] = gridingInput_T[0]
# step-size (increment of input T)
dt = gridingInput_T[1]
# starting
# initialize the memory-space for local try-step
dydt1_array = np.zeros([outWay, totalGPoint_X])
dydt2_array = np.zeros([outWay, totalGPoint_X])
dydt3_array = np.zeros([outWay, totalGPoint_X])
dydt4_array = np.zeros([outWay, totalGPoint_X])
# initialize the memory-space for keeping current value
currentOut_Value = np.zeros([outWay, totalGPoint_X])
for tn in range(totalGPoint_T - 1):
# cut off --- setting virus = 0 if virus < 1
if gridOutIn_array[0, 0, tn] < 1.0:
gridOutIn_array[0, 0, tn] = 0.0
# bottom line --- setting bcell = 1 if bcell < 1
if gridOutIn_array[1, 0, tn] < 1.0:
gridOutIn_array[1, 0, tn] = 1.0
# keep initial value at the moment of tn
currentOut_Value[:, :] = np.copy(gridOutIn_array[:-inWay, :, tn])
currentIn_T_Value = np.copy(gridOutIn_array[-inWay, 0, tn])
# first try-step
for i in range(outWay):
for xn in range(totalGPoint_X):
dydt1_array[i, xn] = pde_array[i](gridOutIn_array[:, :, tn])[xn] # computing ratio
gridOutIn_array[:-inWay, :, tn] = currentOut_Value[:, :] + dydt1_array[:, :]*dt/2 # update output
gridOutIn_array[-inWay, 0, tn] = currentIn_T_Value + dt/2 # update input
# second half try-step
for i in range(outWay):
for xn in range(totalGPoint_X):
dydt2_array[i, xn] = pde_array[i](gridOutIn_array[:, :, tn])[xn] # computing ratio
gridOutIn_array[:-inWay, :, tn] = currentOut_Value[:, :] + dydt2_array[:, :]*dt/2 # update output
gridOutIn_array[-inWay, 0, tn] = currentIn_T_Value + dt/2 # update input
# third half try-step
for i in range(outWay):
for xn in range(totalGPoint_X):
dydt3_array[i, xn] = pde_array[i](gridOutIn_array[:, :, tn])[xn] # computing ratio
gridOutIn_array[:-inWay, :, tn] = currentOut_Value[:, :] + dydt3_array[:, :]*dt # update output
gridOutIn_array[-inWay, 0, tn] = currentIn_T_Value + dt # update input
# fourth try-step
for i in range(outWay):
for xn in range(totalGPoint_X):
dydt4_array[i, xn] = pde_array[i](gridOutIn_array[:, :, tn])[xn] # computing ratio
# solid step (update the next output) by accumulate all the try-steps with proper adjustment
gridOutIn_array[:-inWay, :, tn + 1] = currentOut_Value[:, :] + dt*(dydt1_array[:, :]/6
+ dydt2_array[:, :]/3
+ dydt3_array[:, :]/3
+ dydt4_array[:, :]/6)
# restore to initial value
gridOutIn_array[:-inWay, :, tn] = np.copy(currentOut_Value[:, :])
gridOutIn_array[-inWay, 0, tn] = np.copy(currentIn_T_Value)
# end of loop
return (gridOutIn_array[:-inWay, :])
# -----------------------------------------
# setting parameter
timeUnit = 'day'
if timeUnit == 'hour':
hour = float(1); day = float(24);
elif timeUnit == 'day':
day = float(1); hour = float(1)/24;
# Experimental lab data from (Quantifying the Early Immune Response and Adaptive Immune) paper
gT_lab = np.array([0, 7, 14, 28])*day
gFM1_lab = np.array([2**(5 + 1.0/3), 2**7, 2**(8 + 1.0/6), 2**(8 - 1.0/2)])
error_FM1 = gFM1_lab**(4.0/5)
bar_width = 2
###
maxV = float(16) # max vaccine/micro-liter
inRateV = 0.2/hour # in-rate of virus
killRateVm = 0.001/hour # kill-rate of virus by antibody-IgM
killRateVg = killRateVm # kill-rate of virus by antibody-IgG
inRateB = 0.06/hour # in-rate of B-cell
outRateB = inRateB/8 # out-rate of B-cell
actRateBm = killRateVm # activation rate of naive B-cell
actRateBg = killRateVg # activation rate of memory B-cell
inRateM = 0.16/hour # in-rate of antibody-IgM from naive B-cell
outRateM = inRateM/1 # out-rate of antibody-IgM from naive B-cell
consumeRateM = killRateVm # consume-rate of antibody-IgM by cleaning virus
inRateG = inRateM/6 # in-rate of antibody-IgG from memory B-cell
outRateG = outRateM/60 # out-rate of antibody-IgG from memory B-cell
consumeRateG = killRateVg # consume-rate of antibody-IgG by cleaning virus
# time boundary and griding condition
minT = float(0)
maxT = float(80*day)
totalGPoint_T = int(2*10**3 + 1)
gridT = np.linspace(minT, maxT, totalGPoint_T)
spacingT = np.linspace(minT, maxT, num = totalGPoint_T, retstep = True)
gridT = spacingT[0]
dt = spacingT[1]
# space boundary and griding condition
minX = float(0)
maxX = float(1)
totalGPoint_X = int(1 + 1)
gridX = np.linspace(minX, maxX, totalGPoint_X)
gridingX = np.linspace(minX, maxX, num = totalGPoint_X, retstep = True)
gridX = gridingX[0]
dx = gridingX[1]
gridV_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridB_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridM_array = np.zeros([totalGPoint_X, totalGPoint_T])
gridG_array = np.zeros([totalGPoint_X, totalGPoint_T])
# initial output condition
gridV_array[0, 0] = float(16)
gridB_array[0, 0] = float(0)
gridM_array[0, 0] = float(0)
gridG_array[0, 0] = float(0)
# Runge Kutta numerical solution
pde_array = np.array([dVdt_array, dBdt_array, dMdt_array, dGdt_array])
startingOut_Value = np.array([gridV_array, gridB_array, gridM_array, gridG_array])
gridOut_array = AlvaRungeKutta4ArrayXT(pde_array, startingOut_Value, minX, maxX, totalGPoint_X, minT, maxT, totalGPoint_T)
# plotting
gridV = gridOut_array[0]
gridB = gridOut_array[1]
gridM = gridOut_array[2]
gridG = gridOut_array[3]
figure_name = '-first-vaccination'
file_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
ymin = -40
ymax = 400
for i in range(1):
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gridT, gridV[i], color = 'red', label = r'$ V_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gridT, gridB[i], color = 'purple', label = r'$ B_{%i}(t) $'%(i), linewidth = 5.0, alpha = 0.5
, linestyle = '-.')
plt.plot(gridT, gridM[i], color = 'blue', label = r'$ IgM_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gridT, gridG[i], color = 'green', label = r'$ IgG_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gridT, gridM[i] + gridG[i], color = 'black', linewidth = 5.0, alpha = 0.5, linestyle = 'dashed'
, label = r'$ IgM_{%i}(t) + IgG_{%i}(t) $'%(i, i))
plt.bar(gT_lab - bar_width/2, gFM1_lab, bar_width, alpha = 0.3, color = 'black', yerr = error_FM1
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ (FM1-vaccine) $')
plt.grid(True, which = 'both')
plt.title(r'$ Antibody \ for \ First-Vaccination $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Serum \ antibody \ (pg/ml) $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.7)
plt.yticks(fontsize = AlvaFontSize*0.7)
plt.text(maxT*16.0/10, ymax*8.0/10, r'$ V_{max} = %f $'%(maxV), fontsize = AlvaFontSize)
plt.text(maxT*16.0/10, ymax*7.0/10, r'$ \mu_{v} = %f $'%(inRateV), fontsize = AlvaFontSize)
plt.text(maxT*16.0/10, ymax*6.0/10, r'$ \phi_{m} = %f $'%(killRateVm), fontsize = AlvaFontSize)
plt.text(maxT*16.0/10, ymax*5.0/10, r'$ \phi_{g} = %f $'%(killRateVg), fontsize = AlvaFontSize)
plt.text(maxT*16.0/10, ymax*4.0/10, r'$ \mu_{b} = %f $'%(inRateB), fontsize = AlvaFontSize)
plt.text(maxT*16.0/10, ymax*3.0/10, r'$ \xi_{m} = %f $'%(inRateM), fontsize = AlvaFontSize)
plt.text(maxT*16.0/10, ymax*2.0/10, r'$ \xi_{g} = %f $'%(inRateG), fontsize = AlvaFontSize)
plt.text(maxT*16.0/10, ymax*1.0/10, r'$ \mu_{g} = %f $'%(outRateG), fontsize = AlvaFontSize)
plt.ylim(ymin, ymax)
plt.gca().xaxis.set_major_locator(plt.MultipleLocator(7))
plt.legend(loc = (1, 0), fontsize = AlvaFontSize)
plt.savefig(save_figure, dpi = 100, bbox_inches='tight')
plt.show()
# In[2]:
numberingFig = numberingFig + 1
ymin = 2**0
ymax = 2**9
for i in range(1):
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gridT, gridV[i], color = 'red', label = r'$ V_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gridT, gridB[i], color = 'purple', label = r'$ B_{%i}(t) $'%(i), linewidth = 5.0, alpha = 0.5
, linestyle = '-.')
plt.plot(gridT, gridM[i], color = 'blue', label = r'$ IgM_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gridT, gridG[i], color = 'green', label = r'$ IgG_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gridT, gridM[i] + gridG[i], color = 'black', linewidth = 5.0, alpha = 0.5, linestyle = 'dashed'
, label = r'$ IgM_{%i}(t) + IgG_{%i}(t) $'%(i, i))
plt.bar(gT_lab - bar_width/2, gFM1_lab, bar_width, alpha = 0.3, color = 'black', yerr = error_FM1
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ (FM1-vaccine) $')
plt.grid(True, which = 'both')
plt.title(r'$ Antibody \ for \ First-Vaccination $', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Serum \ antibody \ (pg/ml) $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.7)
plt.yticks(fontsize = AlvaFontSize*0.7)
plt.ylim(ymin, ymax)
plt.yscale('log', basey = 2)
# gca()---GetCurrentAxis and Format the ticklabel to be 2**x
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(2**(np.log(x)/np.log(2)))))
plt.gca().xaxis.set_major_locator(plt.MultipleLocator(7))
plt.legend(loc = (1, 0), fontsize = AlvaFontSize)
plt.show()
# In[ ]:
| gpl-2.0 |
abhipr1/DATA_SCIENCE_INTENSIVE | Machine_Learning/Supervised_Learning/svm/svm_author_id.py | 1 | 1281 | #!/usr/bin/python
"""
this is the code to accompany the Lesson 2 (SVM) mini-project
use an SVM to identify emails from the Enron corpus by their authors
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
### your code goes here ###
#########################################################
from sklearn.svm import SVC
features_train = features_train[:len(features_train)]
labels_train = labels_train[:len(labels_train)]
#clf = SVC(C=1, kernel='linear')
clf = SVC(C=10000, kernel='rbf')
t0 = time()
clf.fit(features_train, labels_train)
print "Training Time ",round(time()-t0, 3),"s"
t1 = time()
prediction = clf.predict(features_test)
print "Predicting Time ",round(time()-t1, 3 ),"s"
print "Accuracy ", clf.score(features_test,labels_test)
print(prediction[10])
print(prediction[26])
print(prediction[50])
print(list(prediction).count(1))
| apache-2.0 |
jackmaney/pg-utils | test/test4_dtypes.py | 1 | 1155 | import sys
sys.path = ['..'] + sys.path
import unittest
from pg_utils import table
import pandas as pd
table_name = "pg_utils_test_dtypes"
class TestDtypes(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.table = table.Table.create(table_name,
"""create table {} as
select x::int as x, random() as y, random() as z, 'abc'::text as w
from generate_series(1,100) x
distributed by (x,y);""".format(table_name))
@classmethod
def tearDownClass(cls):
cls.table.drop()
def test_dtypes(self):
dtypes = self.table.dtypes
self.assertTrue(isinstance(dtypes, pd.Series))
self.assertEqual(list(dtypes.index), list("xyzw"))
self.assertEqual(list(dtypes),
["integer", "double precision", "double precision", "text"])
dtype_counts = self.table.get_dtype_counts()
self.assertTrue(isinstance(dtype_counts, pd.Series))
self.assertEqual(list(dtype_counts.index),
["double precision", "integer", "text"])
self.assertEqual(list(dtype_counts), [2, 1, 1])
| mit |
yanlend/scikit-learn | benchmarks/bench_covertype.py | 120 | 7381 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
phihes/judgmentHMM | src/LR.py | 1 | 3445 | import pylab as pl
import numpy as np
import pandas as pd
from sklearn import datasets, linear_model
import sklearn.cross_validation as cv
from sklearn.metrics import *
import sys
data = pd.read_csv("data/combined.CAOL.delta.pca.scaled.ssa-dummies.csv")
# shuffle data a bit
#data = data.reindex(np.random.permutation(data.index))
pc = int(sys.argv[1])
ssa = sys.argv[2]
if(ssa == 1):
ssa = True
else:
ssa = False
print("linear regression with " + str(pc) + " PCs and SSA=" + str(ssa))
features = list([False,False,False,False,False,False,False,False,False])
features[2] = ["pc-2-1","pc-2-2"]
features[3] = ["pc-3-1","pc-3-2","pc-3-3"]
features[4] = ["pc-4-1","pc-4-2","pc-4-3","pc-4-4"]
features[5] = ["pc-5-1","pc-5-2","pc-5-3","pc-5-4","pc-5-5"]
features[6] = ["pc-6-1","pc-6-2","pc-6-3","pc-6-4","pc-6-5","pc-6-6"]
features[7] = ["pc-7-1","pc-7-2","pc-7-3","pc-7-4","pc-7-5","pc-7-6","pc-7-7"]
features[8] = ["pc-8-1","pc-8-2","pc-8-3","pc-8-4","pc-8-5","pc-8-6","pc-8-7","pc-8-8"]
features = features[pc]
if(ssa):
features.extend(["SSA-WELCOME","SSA-ASKCONFIRM","SSA-ASKFORINFO","SSA-SORRY","SSA-INFO","SSA-DETAILS","SSA-NAV"])
feat_data = [data[f].values for f in features]
rate_data = data['rating'].values
# generate CV mask
masks = cv.LeaveOneLabelOut(data['label'].values)
# store results here
true = list()
pred = list()
pred_q = list()
for train,test in masks:
X_train = np.array(zip(*[data[f].loc[train].values for f in features]))
X_test = np.array(zip(*[data[f].loc[test].values for f in features]))
y_train = np.array(data['rating'].loc[train].values)
y_test = np.array(data['rating'].loc[test].values)
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, y_train)
true.extend(y_test)
pred.extend(regr.predict(X_test))
#print(y_test)
#print(regr.predict(X_test))
# "quantize" prediction
# MSE
for i in range(0,len(true)):
q = round(pred[i])
if(q==0):
q = 1
if(q==6):
q = 5
pred_q.append(q)
# MAE
dist = 0.0
for i in range(0,len(true)):
dist += abs(float(true[i])-float(pred_q[i]))
print("MAE: " + str(dist/(float(len(true)))))
# MAE-LR
dist = 0.0
for i in range(0,len(true)):
dist += abs(float(true[i])-float(pred[i]))
print("MAE-LR: " + str(dist/(float(len(true)))))
# MSE
dist = 0.0
for i in range(0,len(true)):
dist += (float(true[i])-float(pred[i]))**2
#print("MSE: " + str(dist/(float(len(true)))))
# R2
#print"R2: " + str((r2_score(true,pred)))
# MAE-class
for c in xrange(1,6):
dist = 0.0
count = 0
for i in range(0,len(true)):
if(true[i]==c):
count += 1
dist += (float(true[i])-float(pred[i]))**2
#print("MAE-" + str(c) + ": " + str(dist/(float(count))))
#print(precision_recall_fscore_support(true,pred_q))
print(f1_score(true,pred_q, average=None))
print("acc: " + str(accuracy_score(true,pred_q)))
# The coefficients
#print('Coefficients: \n', regr.coef_)
# The mean square error
#print("Residual sum of squares: %.2f"
# % np.mean((regr.predict(X_test) - y_test) ** 2))
# Explained variance score: 1 is perfect prediction
#print('Variance score: %.2f' % regr.score(X_test, y_test))
# Plot outputs
#pl.scatter(X_test, diabetes_y_test, color='black')
#pl.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
#linewidth=3)
#pl.xticks(())
#pl.yticks(())
#pl.show()
| mit |
yl565/statsmodels | statsmodels/sandbox/tsa/fftarma.py | 30 | 16438 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 19:53:25 2009
Author: josef-pktd
generate arma sample using fft with all the lfilter it looks slow
to get the ma representation first
apply arma filter (in ar representation) to time series to get white noise
but seems slow to be useful for fast estimation for nobs=10000
change/check: instead of using marep, use fft-transform of ar and ma
separately, use ratio check theory is correct and example works
DONE : feels much faster than lfilter
-> use for estimation of ARMA
-> use pade (scipy.misc) approximation to get starting polynomial
from autocorrelation (is autocorrelation of AR(p) related to marep?)
check if pade is fast, not for larger arrays ?
maybe pade doesn't do the right thing for this, not tried yet
scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)
raises LinAlgError: singular matrix
also doesn't have roots inside unit circle ??
-> even without initialization, it might be fast for estimation
-> how do I enforce stationarity and invertibility,
need helper function
get function drop imag if close to zero from numpy/scipy source, where?
"""
from __future__ import print_function
import numpy as np
import numpy.fft as fft
#import scipy.fftpack as fft
from scipy import signal
#from try_var_convolve import maxabs
from statsmodels.sandbox.archive.linalg_decomp_1 import OneTimeProperty
from statsmodels.tsa.arima_process import ArmaProcess
#trying to convert old experiments to a class
class ArmaFft(ArmaProcess):
'''fft tools for arma processes
This class contains several methods that are providing the same or similar
returns to try out and test different implementations.
Notes
-----
TODO:
check whether we don't want to fix maxlags, and create new instance if
maxlag changes. usage for different lengths of timeseries ?
or fix frequency and length for fft
check default frequencies w, terminology norw n_or_w
some ffts are currently done without padding with zeros
returns for spectral density methods needs checking, is it always the power
spectrum hw*hw.conj()
normalization of the power spectrum, spectral density: not checked yet, for
example no variance of underlying process is used
'''
def __init__(self, ar, ma, n):
#duplicates now that are subclassing ArmaProcess
super(ArmaFft, self).__init__(ar, ma)
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.nobs = n
#could make the polynomials into cached attributes
self.arpoly = np.polynomial.Polynomial(ar)
self.mapoly = np.polynomial.Polynomial(ma)
self.nar = len(ar) #1d only currently
self.nma = len(ma)
def padarr(self, arr, maxlag, atend=True):
'''pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : boolean
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy.
'''
if atend:
return np.r_[arr, np.zeros(maxlag-len(arr))]
else:
return np.r_[np.zeros(maxlag-len(arr)), arr]
def pad(self, maxlag):
'''construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients
'''
arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]
mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]
return arpad, mapad
def fftar(self, n=None):
'''Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ar, n))
def fftma(self, n):
'''Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ma, n))
#@OneTimeProperty # not while still debugging things
def fftarma(self, n=None):
'''Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial
'''
if n is None:
n = self.nobs
return (self.fftma(n) / self.fftar(n))
def spd(self, npos):
'''raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft
'''
n = npos
w = fft.fftfreq(2*n) * 2 * np.pi
hw = self.fftarma(2*n) #not sure, need to check normalization
#return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #doesn't show in plot
return (hw*hw.conj()).real * 0.5 / np.pi, w
def spdshift(self, n):
'''power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
mapadded = self.padarr(self.ma, n)
arpadded = self.padarr(self.ar, n)
hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))
#return np.abs(spd)[n//2-1:]
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(n//2-1, None, None)
#return (hw*hw.conj()).real[wslice], w[wslice]
return (hw*hw.conj()).real, w
def spddirect(self, n):
'''power spectral density using padding to length n done by fft
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
#abs looks wrong
hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(None, n//2, None)
#return (np.abs(hw)**2)[wslice], w[wslice]
return (np.abs(hw)**2) * 0.5/np.pi, w
def _spddirect2(self, n):
'''this looks bad, maybe with an fftshift
'''
#size = s1+s2-1
hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)
/ fft.fft(np.r_[self.ar[::-1],self.ar], n))
return (hw*hw.conj()) #.real[n//2-1:]
def spdroots(self, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
'''
return self.spdroots_(self.arroots, self.maroots, w)
def spdroots_(self, arroots, maroots, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
Parameters
----------
arroots : ndarray
roots of ar (denominator) lag-polynomial
maroots : ndarray
roots of ma (numerator) lag-polynomial
w : array_like
frequencies for which spd is calculated
Notes
-----
this should go into a function
'''
w = np.atleast_2d(w).T
cosw = np.cos(w)
#Greene 5th edt. p626, section 20.2.7.a.
maroots = 1./maroots
arroots = 1./arroots
num = 1 + maroots**2 - 2* maroots * cosw
den = 1 + arroots**2 - 2* arroots * cosw
#print 'num.shape, den.shape', num.shape, den.shape
hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog
return np.squeeze(hw), w.squeeze()
def spdpoly(self, w, nma=50):
'''spectral density from MA polynomial representation for ARMA process
References
----------
Cochrane, section 8.3.3
'''
mpoly = np.polynomial.Polynomial(self.arma2ma(nma))
hw = mpoly(np.exp(1j * w))
spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)
return spd, w
def filter(self, x):
'''
filter a timeseries with the ARMA filter
padding with zero is missing, in example I needed the padding to get
initial conditions identical to direct filter
Initial filtered observations differ from filter2 and signal.lfilter, but
at end they are the same.
See Also
--------
tsa.filters.fftconvolve
'''
n = x.shape[0]
if n == self.fftarma:
fftarma = self.fftarma
else:
fftarma = self.fftma(n) / self.fftar(n)
tmpfft = fftarma * fft.fft(x)
return fft.ifft(tmpfft)
def filter2(self, x, pad=0):
'''filter a time series using fftconvolve3 with ARMA filter
padding of x currently works only if x is 1d
in example it produces same observations at beginning as lfilter even
without padding.
TODO: this returns 1 additional observation at the end
'''
from statsmodels.tsa.filters import fftconvolve3
if not pad:
pass
elif pad == 'auto':
#just guessing how much padding
x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)
else:
x = self.padarr(x, x.shape[0] + int(pad), atend=False)
return fftconvolve3(x, self.ma, self.ar)
def acf2spdfreq(self, acovf, nfreq=100, w=None):
'''
not really a method
just for comparison, not efficient for large n or long acf
this is also similarly use in tsa.stattools.periodogram with window
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)[:, None]
nac = len(acovf)
hw = 0.5 / np.pi * (acovf[0] +
2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))
return hw
def invpowerspd(self, n):
'''autocovariance from spectral density
scaling is correct, but n needs to be large for numerical accuracy
maybe padding with zero in fft would be faster
without slicing it returns 2-sided autocovariance with fftshift
>>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
>>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
'''
hw = self.fftarma(n)
return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]
def spdmapoly(self, w, twosided=False):
'''ma only, need division for ar, use LagPolynomial
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)
return 0.5 / np.pi * self.mapoly(np.exp(w*1j))
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
rvs = self.generate_sample(nsample=100, burnin=500)
acf = self.acf(nacf)[:nacf] #TODO: check return length
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq)
spdr, wr = self.spdroots(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
ax.plot(rvs)
ax.set_title('Random Sample \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,3)
ax.plot(wr, spdr)
ax.set_title('Power Spectrum \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation \nar=%s, ma=%s' % (self.ar, self.ma))
return fig
def spdar1(ar, w):
if np.ndim(ar) == 0:
rho = ar
else:
rho = -ar[1]
return 0.5 / np.pi /(1 + rho*rho - 2 * rho * np.cos(w))
if __name__ == '__main__':
def maxabs(x,y):
return np.max(np.abs(x-y))
nobs = 200 #10000
ar = [1, 0.0]
ma = [1, 0.0]
ar2 = np.zeros(nobs)
ar2[:2] = [1, -0.9]
uni = np.zeros(nobs)
uni[0]=1.
#arrep = signal.lfilter(ma, ar, ar2)
#marep = signal.lfilter([1],arrep, uni)
# same faster:
arcomb = np.convolve(ar, ar2, mode='same')
marep = signal.lfilter(ma,arcomb, uni) #[len(ma):]
print(marep[:10])
mafr = fft.fft(marep)
rvs = np.random.normal(size=nobs)
datafr = fft.fft(rvs)
y = fft.ifft(mafr*datafr)
print(np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]],rowvar=0))
arrep = signal.lfilter([1],marep, uni)
print(arrep[:20]) # roundtrip to ar
arfr = fft.fft(arrep)
yfr = fft.fft(y)
x = fft.ifft(arfr*yfr).real #imag part is e-15
# the next two are equal, roundtrip works
print(x[:5])
print(rvs[:5])
print(np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]],rowvar=0))
# ARMA filter using fft with ratio of fft of ma/ar lag polynomial
# seems much faster than using lfilter
#padding, note arcomb is already full length
arcombp = np.zeros(nobs)
arcombp[:len(arcomb)] = arcomb
map_ = np.zeros(nobs) #rename: map was shadowing builtin
map_[:len(ma)] = ma
ar0fr = fft.fft(arcombp)
ma0fr = fft.fft(map_)
y2 = fft.ifft(ma0fr/ar0fr*datafr)
#the next two are (almost) equal in real part, almost zero but different in imag
print(y2[:10])
print(y[:10])
print(maxabs(y, y2)) # from chfdiscrete
#1.1282071239631782e-014
ar = [1, -0.4]
ma = [1, 0.2]
arma1 = ArmaFft([1, -0.5,0,0,0,00, -0.7, 0.3], [1, 0.8], nobs)
nfreq = nobs
w = np.linspace(0, np.pi, nfreq)
w2 = np.linspace(0, 2*np.pi, nfreq)
import matplotlib.pyplot as plt
plt.close('all')
plt.figure()
spd1, w1 = arma1.spd(2**10)
print(spd1.shape)
_ = plt.plot(spd1)
plt.title('spd fft complex')
plt.figure()
spd2, w2 = arma1.spdshift(2**10)
print(spd2.shape)
_ = plt.plot(w2, spd2)
plt.title('spd fft shift')
plt.figure()
spd3, w3 = arma1.spddirect(2**10)
print(spd3.shape)
_ = plt.plot(w3, spd3)
plt.title('spd fft direct')
plt.figure()
spd3b = arma1._spddirect2(2**10)
print(spd3b.shape)
_ = plt.plot(spd3b)
plt.title('spd fft direct mirrored')
plt.figure()
spdr, wr = arma1.spdroots(w)
print(spdr.shape)
plt.plot(w, spdr)
plt.title('spd from roots')
plt.figure()
spdar1_ = spdar1(arma1.ar, w)
print(spdar1_.shape)
_ = plt.plot(w, spdar1_)
plt.title('spd ar1')
plt.figure()
wper, spdper = arma1.periodogram(nfreq)
print(spdper.shape)
_ = plt.plot(w, spdper)
plt.title('periodogram')
startup = 1000
rvs = arma1.generate_sample(startup+10000)[startup:]
import matplotlib.mlab as mlb
plt.figure()
sdm, wm = mlb.psd(x)
print('sdm.shape', sdm.shape)
sdm = sdm.ravel()
plt.plot(wm, sdm)
plt.title('matplotlib')
from nitime.algorithms import LD_AR_est
#yule_AR_est(s, order, Nfreqs)
wnt, spdnt = LD_AR_est(rvs, 10, 512)
plt.figure()
print('spdnt.shape', spdnt.shape)
_ = plt.plot(spdnt.ravel())
print(spdnt[:10])
plt.title('nitime')
fig = plt.figure()
arma1.plot4(fig)
#plt.show()
| bsd-3-clause |
altairpearl/scikit-learn | sklearn/linear_model/bayes.py | 50 | 16145 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_offset, y_offset, X_scale)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset, y_offset, X_scale)
return self
| bsd-3-clause |
Captain-Coder/tribler | Tribler/Test/Integration/test_live_downloads.py | 1 | 9576 | import glob
import logging
import os
import shutil
import sys
import time
import unittest
from unittest import skipUnless
from urllib import pathname2url
import numpy
from PyQt5.QtCore import QPoint, Qt
from PyQt5.QtGui import QPixmap, QRegion
from PyQt5.QtTest import QTest
from PyQt5.QtWidgets import QApplication, QListWidget, QTreeWidget
import run_tribler
from check_os import setup_gui_logging
import matplotlib.pyplot as plot
import TriblerGUI
from TriblerGUI.tribler_window import TriblerWindow
from TriblerGUI.widgets.home_recommended_item import HomeRecommendedItem
from TriblerGUI.widgets.loading_list_item import LoadingListItem
default_download_dir = os.path.join(os.path.dirname(__file__), u"Downloads")
default_output_file = os.path.join(os.path.dirname(__file__), u"output.csv")
default_state_dir = os.path.join(os.path.dirname(__file__), u".Tribler")
default_timeout = 60000 # milliseconds
default_num_hops = 1
download_dir = os.environ.get("DOWNLOAD_DIR", default_download_dir)
output_file = os.environ.get("OUTPUT_FILE", default_output_file)
test_timeout = int(os.environ.get("TEST_TIMEOUT", default_timeout))
num_hops = int(os.environ.get("TEST_NUM_HOPS", default_num_hops))
state_dir = default_state_dir
if os.environ.get("TEST_INTEGRATION") == "yes":
# Get & set state directory
if os.environ.has_key('TSTATEDIR'):
state_dir = os.environ['TSTATEDIR']
else:
os.environ['TSTATEDIR'] = os.environ.get('TSTATEDIR', default_state_dir)
if state_dir and os.path.exists(state_dir):
shutil.rmtree(state_dir, ignore_errors=False, onerror=None)
if not os.path.exists(state_dir):
os.makedirs(state_dir)
# Set up logging before starting the GUI
setup_gui_logging()
core_script_file = os.path.abspath(run_tribler.__file__)
core_args = [core_script_file]
# QT App initialization
app = QApplication(sys.argv)
window = TriblerWindow(core_args=core_args)
# Wait till the window is shown
QTest.qWaitForWindowExposed(window)
else:
window = None
sys.excepthook = sys.__excepthook__
MAX_TIMEOUT = 60000
class TimeoutException(Exception):
pass
class AbstractTriblerIntegrationTest(unittest.TestCase):
"""
This class contains various utility methods that are used during the GUI test, i.e. methods that wait until
some data in a list is loaded or for taking a screenshot of the current window.
"""
def setUp(self):
self.signal_received = None
QTest.qWait(100)
self.screenshots_taken = 0
window.downloads_page.can_update_items = True
if not window.tribler_started:
self.screenshot(window, name="tribler_loading")
self.wait_for_signal(window.core_manager.events_manager.tribler_started, no_args=True, timeout=-1)
# Wait for tribler setting to be available
self.wait_for_settings(timeout=20)
# Set local download directory
window.tribler_settings['download_defaults']['number_hops'] = num_hops
window.tribler_settings['download_defaults']['saveas'] = download_dir
# Clear everything
if os.path.exists(download_dir):
shutil.rmtree(download_dir, ignore_errors=False, onerror=None)
os.makedirs(download_dir)
def tearDown(self):
window.downloads_page.can_update_items = False
if window:
window.close_tribler()
for _ in range(0, MAX_TIMEOUT, 100):
QTest.qWait(100)
if window.core_manager.check_stopped() is None:
return
def go_to_and_wait_for_downloads(self):
QTest.mouseClick(window.left_menu_button_downloads, Qt.LeftButton)
QTest.mouseClick(window.downloads_all_button, Qt.LeftButton)
self.wait_for_variable("downloads_page.downloads")
def screenshot(self, widget, name=None):
"""
Take a screenshot of the widget. You can optionally append a string to the name of the screenshot. The
screenshot itself is saved as a JPEG file.
"""
pixmap = QPixmap(widget.rect().size())
widget.render(pixmap, QPoint(), QRegion(widget.rect()))
self.screenshots_taken += 1
img_name = 'screenshot_%d.jpg' % self.screenshots_taken
if name is not None:
img_name = 'screenshot_%s.jpg' % name
screenshots_dir = os.path.join(os.path.dirname(TriblerGUI.__file__), 'screenshots')
if not os.path.exists(screenshots_dir):
os.mkdir(screenshots_dir)
pixmap.save(os.path.join(screenshots_dir, img_name))
def wait_for_list_populated(self, llist, num_items=1, timeout=10):
for _ in range(0, timeout * 1000, 100):
QTest.qWait(100)
if isinstance(llist, QListWidget) and llist.count() >= num_items:
if not isinstance(llist.itemWidget(llist.item(0)), LoadingListItem):
return
elif isinstance(llist, QTreeWidget) and llist.topLevelItemCount() > num_items:
if not isinstance(llist.topLevelItem(0), LoadingListItem):
return
# List was not populated in time, fail the test
raise TimeoutException("The list was not populated within 10 seconds")
def wait_for_home_page_table_populated(self, timeout=10):
for _ in range(0, timeout * 1000, 100):
QTest.qWait(100)
if isinstance(window.home_page_table_view.cellWidget(0, 0), HomeRecommendedItem):
return
# List was not populated in time, fail the test
raise TimeoutException("The list was not populated within 10 seconds")
def get_attr_recursive(self, attr_name):
parts = attr_name.split(".")
cur_attr = window
for part in parts:
cur_attr = getattr(cur_attr, part)
return cur_attr
def wait_for_variable(self, var, timeout=10, cmp_var=None):
for _ in range(0, timeout * 1000, 100):
QTest.qWait(100)
if self.get_attr_recursive(var) is not cmp_var:
return
raise TimeoutException("Variable %s within 10 seconds" % var)
def wait_for_settings(self, timeout=10):
for _ in range(0, timeout * 1000, 100):
QTest.qWait(100)
if window.tribler_settings is not None:
return
raise TimeoutException("Did not receive settings within 10 seconds")
def wait_for_signal(self, signal, timeout=10, no_args=False):
self.signal_received = False
def on_signal(_):
self.signal_received = True
if no_args:
signal.connect(lambda: on_signal(None))
else:
signal.connect(on_signal)
if timeout < 0:
timeout = MAX_TIMEOUT
for _ in range(0, timeout * 1000, 100):
QTest.qWait(100)
if self.signal_received:
logging.info("Signal %s received in %d seconds", signal, timeout)
return
raise TimeoutException("Signal %s not raised within %d seconds" % (signal, timeout))
@skipUnless(os.environ.get("TEST_INTEGRATION") == "yes", "Not integration testing by default")
class TriblerDownloadTest(AbstractTriblerIntegrationTest):
"""
GUI tests for the GUI written in PyQt. These methods are using the QTest framework to simulate mouse clicks.
"""
def test_live_downloads(self):
QTest.mouseClick(window.left_menu_button_home, Qt.LeftButton)
QTest.mouseClick(window.home_tab_torrents_button, Qt.LeftButton)
self.screenshot(window, name="home_page_torrents_loading")
# Start downloading some torrents
if os.environ.has_key('TORRENTS_DIR'):
torrent_dir = os.environ.get('TORRENTS_DIR')
else:
torrent_dir = os.path.join(os.path.join(os.path.dirname(__file__), os.pardir), "data", "linux_torrents")
window.selected_torrent_files = [pathname2url(torrent_file)
for torrent_file in glob.glob(torrent_dir + "/*.torrent")]
window.on_confirm_add_directory_dialog(0)
self.go_to_and_wait_for_downloads()
QTest.qWait(2000)
with open(output_file, "w") as output:
output.write("time, upload, download\n")
def download_refreshed(_):
line = "%s, %s, %s\n" % (time.time(), window.downloads_page.total_upload/1000,
window.downloads_page.total_download/1000)
output.write(line)
window.downloads_page.received_downloads.connect(download_refreshed)
QTest.qWait(test_timeout)
# Stop downloads after timeout
window.downloads_page.received_downloads.disconnect()
window.downloads_page.stop_loading_downloads()
QTest.qWait(5000)
# Plot graph
data = numpy.genfromtxt(output_file, delimiter=',', skip_header=1,
skip_footer=0, names=['time', 'upload', 'download'])
figure = plot.figure()
subplot = figure.add_subplot(111)
subplot.set_title("Live downloads plot")
subplot.set_xlabel('Time (seconds)')
subplot.set_ylabel('Speed (kB/s)')
subplot.plot(data['time'], data['upload'], color='g', label='upload')
subplot.plot(data['time'], data['download'], color='r', label='download')
subplot.legend()
figure.savefig(output_file + '.png', bbox_inches='tight')
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
bokeh/bokeh | bokeh/util/hex.py | 1 | 8529 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Functions useful for dealing with hexagonal tilings.
For more information on the concepts employed here, see this informative page
https://www.redblobgames.com/grids/hexagons/
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import Any, Tuple
# External imports
import numpy as np
# Bokeh imports
from .dependencies import import_required
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'axial_to_cartesian',
'cartesian_to_axial',
'hexbin',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def axial_to_cartesian(q: Any, r: Any, size: float, orientation: str, aspect_scale: float = 1) -> Tuple[Any, Any]:
''' Map axial *(q,r)* coordinates to cartesian *(x,y)* coordinates of
tiles centers.
This function can be useful for positioning other Bokeh glyphs with
cartesian coordinates in relation to a hex tiling.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#hex-to-pixel
Args:
q (array[float]) :
A NumPy array of q-coordinates for binning
r (array[float]) :
A NumPy array of r-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
if orientation == "pointytop":
x = size * np.sqrt(3) * (q + r/2.0) / aspect_scale
y = -size * 3/2.0 * r
else:
x = size * 3/2.0 * q
y = -size * np.sqrt(3) * (r + q/2.0) * aspect_scale
return (x, y)
def cartesian_to_axial(x: Any, y: Any, size: float, orientation: str, aspect_scale: float = 1) -> Tuple[Any, Any]:
''' Map Cartesion *(x,y)* points to axial *(q,r)* coordinates of enclosing
tiles.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#pixel-to-hex
Args:
x (array[float]) :
A NumPy array of x-coordinates to convert
y (array[float]) :
A NumPy array of y-coordinates to convert
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
HEX_FLAT = [2.0/3.0, 0.0, -1.0/3.0, np.sqrt(3.0)/3.0]
HEX_POINTY = [np.sqrt(3.0)/3.0, -1.0/3.0, 0.0, 2.0/3.0]
coords = HEX_FLAT if orientation == 'flattop' else HEX_POINTY
x = x / size * (aspect_scale if orientation == "pointytop" else 1)
y = -y / size / (aspect_scale if orientation == "flattop" else 1)
q = coords[0] * x + coords[1] * y
r = coords[2] * x + coords[3] * y
return _round_hex(q, r)
def hexbin(x: Any, y: Any, size: float, orientation: str = "pointytop", aspect_scale: float = 1) -> Any:
''' Perform an equal-weight binning of data points into hexagonal tiles.
For more sophisticated use cases, e.g. weighted binning or scaling
individual tiles proportional to some other quantity, consider using
HoloViews.
Args:
x (array[float]) :
A NumPy array of x-coordinates for binning
y (array[float]) :
A NumPy array of y-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str, optional) :
Whether the hex tile orientation should be "pointytop" or
"flattop". (default: "pointytop")
aspect_scale (float, optional) :
Match a plot's aspect ratio scaling.
When working with a plot with ``aspect_scale != 1``, this
parameter can be set to match the plot, in order to draw
regular hexagons (instead of "stretched" ones).
This is roughly equivalent to binning in "screen space", and
it may be better to use axis-aligned rectangular bins when
plot aspect scales are not one.
Returns:
DataFrame
The resulting DataFrame will have columns *q* and *r* that specify
hexagon tile locations in axial coordinates, and a column *counts* that
provides the count for each tile.
.. warning::
Hex binning only functions on linear scales, i.e. not on log plots.
'''
pd: Any = import_required('pandas','hexbin requires pandas to be installed')
q, r = cartesian_to_axial(x, y, size, orientation, aspect_scale=aspect_scale)
df = pd.DataFrame(dict(r=r, q=q))
return df.groupby(['q', 'r']).size().reset_index(name='counts')
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _round_hex(q: Any, r: Any) -> Tuple[Any, Any]:
''' Round floating point axial hex coordinates to integer *(q,r)*
coordinates.
This code was adapted from:
https://www.redblobgames.com/grids/hexagons/#rounding
Args:
q (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
r (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
Returns:
(array[int], array[int])
'''
x = q
z = r
y = -x-z
rx = np.round(x) # type: ignore[no-untyped-call]
ry = np.round(y) # type: ignore[no-untyped-call]
rz = np.round(z) # type: ignore[no-untyped-call]
dx = np.abs(rx - x)
dy = np.abs(ry - y)
dz = np.abs(rz - z)
cond = (dx > dy) & (dx > dz)
q = np.where(cond , -(ry + rz), rx)
r = np.where(~cond & ~(dy > dz), -(rx + ry), rz)
return q.astype(int), r.astype(int)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
JaggedG/ThinkStats2 | code/regression.py | 62 | 9652 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import pandas
import random
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import chap01soln
import first
import linear
import thinkplot
import thinkstats2
def QuickLeastSquares(xs, ys):
"""Estimates linear least squares fit and returns MSE.
xs: sequence of values
ys: sequence of values
returns: inter, slope, mse
"""
n = float(len(xs))
meanx = xs.mean()
dxs = xs - meanx
varx = np.dot(dxs, dxs) / n
meany = ys.mean()
dys = ys - meany
cov = np.dot(dxs, dys) / n
slope = cov / varx
inter = meany - slope * meanx
res = ys - (inter + slope * xs)
mse = np.dot(res, res) / n
return inter, slope, mse
def ReadVariables():
"""Reads Stata dictionary files for NSFG data.
returns: DataFrame that maps variables names to descriptions
"""
vars1 = thinkstats2.ReadStataDct('2002FemPreg.dct').variables
vars2 = thinkstats2.ReadStataDct('2002FemResp.dct').variables
all_vars = vars1.append(vars2)
all_vars.index = all_vars.name
return all_vars
def JoinFemResp(df):
"""Reads the female respondent file and joins on caseid.
df: DataFrame
"""
resp = chap01soln.ReadFemResp()
resp.index = resp.caseid
join = df.join(resp, on='caseid', rsuffix='_r')
# convert from colon-separated time strings to datetimes
join.screentime = pandas.to_datetime(join.screentime)
return join
def GoMining(df):
"""Searches for variables that predict birth weight.
df: DataFrame of pregnancy records
returns: list of (rsquared, variable name) pairs
"""
variables = []
for name in df.columns:
try:
if df[name].var() < 1e-7:
continue
formula = 'totalwgt_lb ~ agepreg + ' + name
formula = formula.encode('ascii')
model = smf.ols(formula, data=df)
if model.nobs < len(df)/2:
continue
results = model.fit()
except (ValueError, TypeError):
continue
variables.append((results.rsquared, name))
return variables
def MiningReport(variables, n=30):
"""Prints variables with the highest R^2.
t: list of (R^2, variable name) pairs
n: number of pairs to print
"""
all_vars = ReadVariables()
variables.sort(reverse=True)
for mse, name in variables[:n]:
key = re.sub('_r$', '', name)
try:
desc = all_vars.loc[key].desc
if isinstance(desc, pandas.Series):
desc = desc[0]
print(name, mse, desc)
except KeyError:
print(name, mse)
def PredictBirthWeight(live):
"""Predicts birth weight of a baby at 30 weeks.
live: DataFrame of live births
"""
live = live[live.prglngth>30]
join = JoinFemResp(live)
t = GoMining(join)
MiningReport(t)
formula = ('totalwgt_lb ~ agepreg + C(race) + babysex==1 + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
SummarizeResults(results)
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.iteritems():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
except AttributeError:
print('R^2 %.4g' % results.prsquared)
def RunSimpleRegression(live):
"""Runs a simple regression and compare results to thinkstats2 functions.
live: DataFrame of live births
"""
# run the regression with thinkstats2 functions
live_dropna = live.dropna(subset=['agepreg', 'totalwgt_lb'])
ages = live_dropna.agepreg
weights = live_dropna.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
# run the regression with statsmodels
formula = 'totalwgt_lb ~ agepreg'
model = smf.ols(formula, data=live)
results = model.fit()
SummarizeResults(results)
def AlmostEquals(x, y, tol=1e-6):
return abs(x-y) < tol
assert(AlmostEquals(results.params['Intercept'], inter))
assert(AlmostEquals(results.params['agepreg'], slope))
assert(AlmostEquals(results.rsquared, r2))
def PivotTables(live):
"""Prints a pivot table comparing first babies to others.
live: DataFrame of live births
"""
table = pandas.pivot_table(live, rows='isfirst',
values=['totalwgt_lb', 'agepreg'])
print(table)
def FormatRow(results, columns):
"""Converts regression results to a string.
results: RegressionResults object
returns: string
"""
t = []
for col in columns:
coef = results.params.get(col, np.nan)
pval = results.pvalues.get(col, np.nan)
if np.isnan(coef):
s = '--'
elif pval < 0.001:
s = '%0.3g (*)' % (coef)
else:
s = '%0.3g (%0.2g)' % (coef, pval)
t.append(s)
try:
t.append('%.2g' % results.rsquared)
except AttributeError:
t.append('%.2g' % results.prsquared)
return t
def RunModels(live):
"""Runs regressions that predict birth weight.
live: DataFrame of pregnancy records
"""
columns = ['isfirst[T.True]', 'agepreg', 'agepreg2']
header = ['isfirst', 'agepreg', 'agepreg2']
rows = []
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ isfirst + agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
live['agepreg2'] = live.agepreg**2
formula = 'totalwgt_lb ~ isfirst + agepreg + agepreg2'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
PrintTabular(rows, header)
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
def LogisticRegressionExample():
"""Runs a simple example of logistic regression and prints results.
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
beta = [-1.5, 2.8, 1.1]
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
print(log_o)
o = np.exp(log_o)
print(o)
p = o / (o+1)
print(p)
like = y * p + (1-y) * (1-p)
print(like)
print(np.prod(like))
df = pandas.DataFrame(dict(y=y, x1=x1, x2=x2))
results = smf.logit('y ~ x1 + x2', data=df).fit()
print(results.summary())
def RunLogisticModels(live):
"""Runs regressions that predict sex.
live: DataFrame of pregnancy records
"""
#live = linear.ResampleRowsWeighted(live)
df = live[live.prglngth>30]
df['boy'] = (df.babysex==1).astype(int)
df['isyoung'] = (df.agepreg<20).astype(int)
df['isold'] = (df.agepreg<35).astype(int)
df['season'] = (((df.datend+1) % 12) / 3).astype(int)
# run the simple model
model = smf.logit('boy ~ agepreg', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# run the complex model
model = smf.logit('boy ~ agepreg + hpagelb + birthord + C(race)', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# make the scatter plot
exog = pandas.DataFrame(model.exog, columns=model.exog_names)
endog = pandas.DataFrame(model.endog, columns=[model.endog_names])
xs = exog['agepreg']
lo = results.fittedvalues
o = np.exp(lo)
p = o / (o+1)
#thinkplot.Scatter(xs, p, alpha=0.1)
#thinkplot.Show()
# compute accuracy
actual = endog['boy']
baseline = actual.mean()
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
print(acc, baseline)
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pandas.DataFrame([[35, 39, 3, 1]], columns=columns)
y = results.predict(new)
print(y)
def main(name, data_dir='.'):
thinkstats2.RandomSeed(17)
LogisticRegressionExample()
live, firsts, others = first.MakeFrames()
live['isfirst'] = (live.birthord == 1)
RunLogisticModels(live)
RunSimpleRegression(live)
RunModels(live)
PredictBirthWeight(live)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
anaandresarroyo/Python-GarminDataAnalyser | database/plot.py | 1 | 1348 | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def generate_colours(df, column, cmap_name):
# TODO: they get generated a little different than what pandas does automatically
labels = np.sort(df[column].unique())
cmap = plt.get_cmap(cmap_name)
colours = cmap(np.linspace(0,1,len(labels)+1))
colour_dict = dict(zip(labels,colours))
return colour_dict
def populate_plot_options(kind, alpha, cmap_name, df=pd.DataFrame(),
index=False, legend=False, stacked=True):
plot_options = dict()
plot_options['kind'] = kind
plot_options['alpha'] = alpha
if not df.empty:
colour_dict = generate_colours(df, legend, cmap_name)
label = df.loc[index,legend]
plot_options['c'] = colour_dict[label]
plot_options['label'] = str(label)
else:
plot_options['colormap'] = cmap_name
if kind == 'line':
plot_options['linewidth'] = 2
# plot_options['marker'] = '.'
# plot_options['markersize'] = 12
# TODO: move default marker size to MatplotlibSettings.py
elif kind == 'scatter':
plot_options['edgecolors'] = 'face'
plot_options['s'] = 12
elif 'bar' in kind:
plot_options['stacked'] = stacked
plot_options['edgecolor'] = 'none'
return plot_options | mit |
aeklant/scipy | scipy/signal/windows/windows.py | 5 | 74101 | """The suite of window functions."""
import operator
import warnings
import numpy as np
from scipy import linalg, special, fft as sp_fft
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_cosine','general_gaussian',
'general_hamming', 'chebwin', 'slepian', 'cosine', 'hann',
'exponential', 'tukey', 'dpss', 'get_window']
def _len_guards(M):
"""Handle small or incorrect window lengths"""
if int(M) != M or M < 0:
raise ValueError('Window length M must be a non-negative integer')
return M <= 1
def _extend(M, sym):
"""Extend window by 1 sample if needed for DFT-even symmetry"""
if not sym:
return M + 1, True
else:
return M, False
def _truncate(w, needed):
"""Truncate window by 1 sample if needed for DFT-even symmetry"""
if needed:
return w[:-1]
else:
return w
def general_cosine(M, a, sym=True):
r"""
Generic weighted sum of cosine terms window
Parameters
----------
M : int
Number of points in the output window
a : array_like
Sequence of weighting coefficients. This uses the convention of being
centered on the origin, so these will typically all be positive
numbers, not alternating sign.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
References
----------
.. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE
Transactions on Acoustics, Speech, and Signal Processing, vol. 29,
no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`.
.. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the
Discrete Fourier transform (DFT), including a comprehensive list of
window functions and some new flat-top windows", February 15, 2002
https://holometer.fnal.gov/GH_FFT.pdf
Examples
--------
Heinzel describes a flat-top window named "HFT90D" with formula: [2]_
.. math:: w_j = 1 - 1.942604 \cos(z) + 1.340318 \cos(2z)
- 0.440811 \cos(3z) + 0.043097 \cos(4z)
where
.. math:: z = \frac{2 \pi j}{N}, j = 0...N - 1
Since this uses the convention of starting at the origin, to reproduce the
window, we need to convert every other coefficient to a positive number:
>>> HFT90D = [1, 1.942604, 1.340318, 0.440811, 0.043097]
The paper states that the highest sidelobe is at -90.2 dB. Reproduce
Figure 42 by plotting the window and its frequency response, and confirm
the sidelobe level in red:
>>> from scipy.signal.windows import general_cosine
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = general_cosine(1000, HFT90D, sym=False)
>>> plt.plot(window)
>>> plt.title("HFT90D window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 10000) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = np.abs(fftshift(A / abs(A).max()))
>>> response = 20 * np.log10(np.maximum(response, 1e-10))
>>> plt.plot(freq, response)
>>> plt.axis([-50/1000, 50/1000, -140, 0])
>>> plt.title("Frequency response of the HFT90D window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.axhline(-90.2, color='red')
>>> plt.show()
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
fac = np.linspace(-np.pi, np.pi, M)
w = np.zeros(M)
for k in range(len(a)):
w += a[k] * np.cos(k * fac)
return _truncate(w, needs_trunc)
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Also known as a rectangular window or Dirichlet window, this is equivalent
to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
w = np.ones(M, float)
return _truncate(w, needs_trunc)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
See Also
--------
bartlett : A triangular window that touches zero
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = np.abs(fftshift(A / abs(A).max()))
>>> response = 20 * np.log10(np.maximum(response, 1e-10))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
return _truncate(w, needs_trunc)
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] E. Parzen, "Mathematical Considerations in the Estimation of
Spectra", Technometrics, Vol. 3, No. 2 (May, 1961), pp. 167-190
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
return _truncate(w, needs_trunc)
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
return _truncate(w, needs_trunc)
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
The "exact Blackman" window was designed to null out the third and fourth
sidelobes, but has discontinuities at the boundaries, resulting in a
6 dB/oct fall-off. This window is an approximation of the "exact" window,
which does not null the sidelobes as well, but is smooth at the edges,
improving the fall-off rate to 18 dB/oct. [3]_
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
.. [3] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837`.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = np.abs(fftshift(A / abs(A).max()))
>>> response = 20 * np.log10(np.maximum(response, 1e-10))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
return general_cosine(M, [0.42, 0.50, 0.08], sym)
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
This variation is called "Nuttall4c" by Heinzel. [2]_
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE
Transactions on Acoustics, Speech, and Signal Processing, vol. 29,
no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`.
.. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the
Discrete Fourier transform (DFT), including a comprehensive list of
window functions and some new flat-top windows", February 15, 2002
https://holometer.fnal.gov/GH_FFT.pdf
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return general_cosine(M, [0.3635819, 0.4891775, 0.1365995, 0.0106411], sym)
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return general_cosine(M, [0.35875, 0.48829, 0.14128, 0.01168], sym)
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
Flat top windows are used for taking accurate measurements of signal
amplitude in the frequency domain, with minimal scalloping error from the
center of a frequency bin to its edges, compared to others. This is a
5th-order cosine window, with the 5 terms optimized to make the main lobe
maximally flat. [1]_
References
----------
.. [1] D'Antona, Gabriele, and A. Ferrero, "Digital Signal Processing for
Measurement Systems", Springer Media, 2006, p. 70
:doi:`10.1007/0-387-28666-7`.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
a = [0.21557895, 0.41663158, 0.277263158, 0.083578947, 0.006947368]
return general_cosine(M, a, sym)
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
See Also
--------
triang : A triangular window that does not touch zero at the ends
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich. [2]_
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
return _truncate(w, needs_trunc)
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = np.abs(fftshift(A / abs(A).max()))
>>> response = 20 * np.log10(np.maximum(response, 1e-10))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
return general_hamming(M, 0.5, sym)
@np.deprecate(new_name='scipy.signal.windows.hann')
def hanning(*args, **kwargs):
return hann(*args, **kwargs)
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the fraction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837`
.. [2] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
return _truncate(w, needs_trunc)
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
return _truncate(w, needs_trunc)
def general_hamming(M, alpha, sym=True):
r"""Return a generalized Hamming window.
The generalized Hamming window is constructed by multiplying a rectangular
window by one period of a cosine function [1]_.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float
The window coefficient, :math:`\alpha`
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Hamming window is defined as
.. math:: w(n) = \alpha - \left(1 - \alpha\right) \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
Both the common Hamming window and Hann window are special cases of the
generalized Hamming window with :math:`\alpha` = 0.54 and :math:`\alpha` =
0.5, respectively [2]_.
See Also
--------
hamming, hann
Examples
--------
The Sentinel-1A/B Instrument Processing Facility uses generalized Hamming
windows in the processing of spaceborne Synthetic Aperture Radar (SAR)
data [3]_. The facility uses various values for the :math:`\alpha`
parameter based on operating mode of the SAR instrument. Some common
:math:`\alpha` values include 0.75, 0.7 and 0.52 [4]_. As an example, we
plot these different windows.
>>> from scipy.signal.windows import general_hamming
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> fig1, spatial_plot = plt.subplots()
>>> spatial_plot.set_title("Generalized Hamming Windows")
>>> spatial_plot.set_ylabel("Amplitude")
>>> spatial_plot.set_xlabel("Sample")
>>> fig2, freq_plot = plt.subplots()
>>> freq_plot.set_title("Frequency Responses")
>>> freq_plot.set_ylabel("Normalized magnitude [dB]")
>>> freq_plot.set_xlabel("Normalized frequency [cycles per sample]")
>>> for alpha in [0.75, 0.7, 0.52]:
... window = general_hamming(41, alpha)
... spatial_plot.plot(window, label="{:.2f}".format(alpha))
... A = fft(window, 2048) / (len(window)/2.0)
... freq = np.linspace(-0.5, 0.5, len(A))
... response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
... freq_plot.plot(freq, response, label="{:.2f}".format(alpha))
>>> freq_plot.legend(loc="upper right")
>>> spatial_plot.legend(loc="upper right")
References
----------
.. [1] DSPRelated, "Generalized Hamming Window Family",
https://www.dsprelated.com/freebooks/sasp/Generalized_Hamming_Window_Family.html
.. [2] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [3] Riccardo Piantanida ESA, "Sentinel-1 Level 1 Detailed Algorithm
Definition",
https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Level-1-Detailed-Algorithm-Definition
.. [4] Matthieu Bourbigot ESA, "Sentinel-1 Product Definition",
https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Definition
"""
return general_cosine(M, [alpha, 1. - alpha], sym)
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
return general_hamming(M, 0.54, sym)
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate other windows by varying the beta parameter.
(Some literature uses alpha = beta/pi.) [4]_
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
be returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transform," Proceedings of the IEEE, vol. 66,
no. 1, pp. 51-83, Jan. 1978. :doi:`10.1109/PROC.1978.10837`.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
return _truncate(w, needs_trunc)
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
return _truncate(w, needs_trunc)
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian "
... r"window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
return _truncate(w, needs_trunc)
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (2 * (M % 2) - 1) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(sp_fft.fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(sp_fft.fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
return _truncate(w, needs_trunc)
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
.. note:: Deprecated in SciPy 1.1.
`slepian` will be removed in a future version of SciPy, it is
replaced by `dpss`, which uses the standard definition of a
digital Slepian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
See Also
--------
dpss
References
----------
.. [1] D. Slepian & H. O. Pollak: "Prolate spheroidal wave functions,
Fourier analysis and uncertainty-I," Bell Syst. Tech. J., vol.40,
pp.43-63, 1961. https://archive.org/details/bstj40-1-43
.. [2] H. J. Landau & H. O. Pollak: "Prolate spheroidal wave functions,
Fourier analysis and uncertainty-II," Bell Syst. Tech. J. , vol.40,
pp.65-83, 1961. https://archive.org/details/bstj40-1-65
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
warnings.warn('slepian is deprecated and will be removed in a future '
'version, use dpss instead', DeprecationWarning)
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
return _truncate(win, needs_trunc)
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
return _truncate(w, needs_trunc)
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
return _truncate(w, needs_trunc)
def dpss(M, NW, Kmax=None, sym=True, norm=None, return_ratios=False):
"""
Compute the Discrete Prolate Spheroidal Sequences (DPSS).
DPSS (or Slepian sequences) are often used in multitaper power spectral
density estimation (see [1]_). The first window in the sequence can be
used to maximize the energy concentration in the main lobe, and is also
called the Slepian window.
Parameters
----------
M : int
Window length.
NW : float
Standardized half bandwidth corresponding to ``2*NW = BW/f0 = BW*N*dt``
where ``dt`` is taken as 1.
Kmax : int | None, optional
Number of DPSS windows to return (orders ``0`` through ``Kmax-1``).
If None (default), return only a single window of shape ``(M,)``
instead of an array of windows of shape ``(Kmax, M)``.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
norm : {2, 'approximate', 'subsample'} | None, optional
If 'approximate' or 'subsample', then the windows are normalized by the
maximum, and a correction scale-factor for even-length windows
is applied either using ``M**2/(M**2+NW)`` ("approximate") or
a FFT-based subsample shift ("subsample"), see Notes for details.
If None, then "approximate" is used when ``Kmax=None`` and 2 otherwise
(which uses the l2 norm).
return_ratios : bool, optional
If True, also return the concentration ratios in addition to the
windows.
Returns
-------
v : ndarray, shape (Kmax, N) or (N,)
The DPSS windows. Will be 1D if `Kmax` is None.
r : ndarray, shape (Kmax,) or float, optional
The concentration ratios for the windows. Only returned if
`return_ratios` evaluates to True. Will be 0D if `Kmax` is None.
Notes
-----
This computation uses the tridiagonal eigenvector formulation given
in [2]_.
The default normalization for ``Kmax=None``, i.e. window-generation mode,
simply using the l-infinity norm would create a window with two unity
values, which creates slight normalization differences between even and odd
orders. The approximate correction of ``M**2/float(M**2+NW)`` for even
sample numbers is used to counteract this effect (see Examples below).
For very long signals (e.g., 1e6 elements), it can be useful to compute
windows orders of magnitude shorter and use interpolation (e.g.,
`scipy.interpolate.interp1d`) to obtain tapers of length `M`,
but this in general will not preserve orthogonality between the tapers.
.. versionadded:: 1.1
References
----------
.. [1] Percival DB, Walden WT. Spectral Analysis for Physical Applications:
Multitaper and Conventional Univariate Techniques.
Cambridge University Press; 1993.
.. [2] Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430.
.. [3] Kaiser, JF, Schafer RW. On the Use of the I0-Sinh Window for
Spectrum Analysis. IEEE Transactions on Acoustics, Speech and
Signal Processing. ASSP-28 (1): 105-107; 1980.
Examples
--------
We can compare the window to `kaiser`, which was invented as an alternative
that was easier to calculate [3]_ (example adapted from
`here <https://ccrma.stanford.edu/~jos/sasp/Kaiser_DPSS_Windows_Compared.html>`_):
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import windows, freqz
>>> N = 51
>>> fig, axes = plt.subplots(3, 2, figsize=(5, 7))
>>> for ai, alpha in enumerate((1, 3, 5)):
... win_dpss = windows.dpss(N, alpha)
... beta = alpha*np.pi
... win_kaiser = windows.kaiser(N, beta)
... for win, c in ((win_dpss, 'k'), (win_kaiser, 'r')):
... win /= win.sum()
... axes[ai, 0].plot(win, color=c, lw=1.)
... axes[ai, 0].set(xlim=[0, N-1], title=r'$\\alpha$ = %s' % alpha,
... ylabel='Amplitude')
... w, h = freqz(win)
... axes[ai, 1].plot(w, 20 * np.log10(np.abs(h)), color=c, lw=1.)
... axes[ai, 1].set(xlim=[0, np.pi],
... title=r'$\\beta$ = %0.2f' % beta,
... ylabel='Magnitude (dB)')
>>> for ax in axes.ravel():
... ax.grid(True)
>>> axes[2, 1].legend(['DPSS', 'Kaiser'])
>>> fig.tight_layout()
>>> plt.show()
And here are examples of the first four windows, along with their
concentration ratios:
>>> M = 512
>>> NW = 2.5
>>> win, eigvals = windows.dpss(M, NW, 4, return_ratios=True)
>>> fig, ax = plt.subplots(1)
>>> ax.plot(win.T, linewidth=1.)
>>> ax.set(xlim=[0, M-1], ylim=[-0.1, 0.1], xlabel='Samples',
... title='DPSS, M=%d, NW=%0.1f' % (M, NW))
>>> ax.legend(['win[%d] (%0.4f)' % (ii, ratio)
... for ii, ratio in enumerate(eigvals)])
>>> fig.tight_layout()
>>> plt.show()
Using a standard :math:`l_{\\infty}` norm would produce two unity values
for even `M`, but only one unity value for odd `M`. This produces uneven
window power that can be counteracted by the approximate correction
``M**2/float(M**2+NW)``, which can be selected by using
``norm='approximate'`` (which is the same as ``norm=None`` when
``Kmax=None``, as is the case here). Alternatively, the slower
``norm='subsample'`` can be used, which uses subsample shifting in the
frequency domain (FFT) to compute the correction:
>>> Ms = np.arange(1, 41)
>>> factors = (50, 20, 10, 5, 2.0001)
>>> energy = np.empty((3, len(Ms), len(factors)))
>>> for mi, M in enumerate(Ms):
... for fi, factor in enumerate(factors):
... NW = M / float(factor)
... # Corrected using empirical approximation (default)
... win = windows.dpss(M, NW)
... energy[0, mi, fi] = np.sum(win ** 2) / np.sqrt(M)
... # Corrected using subsample shifting
... win = windows.dpss(M, NW, norm='subsample')
... energy[1, mi, fi] = np.sum(win ** 2) / np.sqrt(M)
... # Uncorrected (using l-infinity norm)
... win /= win.max()
... energy[2, mi, fi] = np.sum(win ** 2) / np.sqrt(M)
>>> fig, ax = plt.subplots(1)
>>> hs = ax.plot(Ms, energy[2], '-o', markersize=4,
... markeredgecolor='none')
>>> leg = [hs[-1]]
>>> for hi, hh in enumerate(hs):
... h1 = ax.plot(Ms, energy[0, :, hi], '-o', markersize=4,
... color=hh.get_color(), markeredgecolor='none',
... alpha=0.66)
... h2 = ax.plot(Ms, energy[1, :, hi], '-o', markersize=4,
... color=hh.get_color(), markeredgecolor='none',
... alpha=0.33)
... if hi == len(hs) - 1:
... leg.insert(0, h1[0])
... leg.insert(0, h2[0])
>>> ax.set(xlabel='M (samples)', ylabel=r'Power / $\\sqrt{M}$')
>>> ax.legend(leg, ['Uncorrected', r'Corrected: $\\frac{M^2}{M^2+NW}$',
... 'Corrected (subsample)'])
>>> fig.tight_layout()
""" # noqa: E501
if _len_guards(M):
return np.ones(M)
if norm is None:
norm = 'approximate' if Kmax is None else 2
known_norms = (2, 'approximate', 'subsample')
if norm not in known_norms:
raise ValueError('norm must be one of %s, got %s'
% (known_norms, norm))
if Kmax is None:
singleton = True
Kmax = 1
else:
singleton = False
Kmax = operator.index(Kmax)
if not 0 < Kmax <= M:
raise ValueError('Kmax must be greater than 0 and less than M')
if NW >= M/2.:
raise ValueError('NW must be less than M/2.')
if NW <= 0:
raise ValueError('NW must be positive')
M, needs_trunc = _extend(M, sym)
W = float(NW) / M
nidx = np.arange(M)
# Here we want to set up an optimization problem to find a sequence
# whose energy is maximally concentrated within band [-W,W].
# Thus, the measure lambda(T,W) is the ratio between the energy within
# that band, and the total energy. This leads to the eigen-system
# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
# eigenvalue is the sequence with maximally concentrated energy. The
# collection of eigenvectors of this system are called Slepian
# sequences, or discrete prolate spheroidal sequences (DPSS). Only the
# first K, K = 2NW/dt orders of DPSS will exhibit good spectral
# concentration
# [see https://en.wikipedia.org/wiki/Spectral_concentration_problem]
# Here we set up an alternative symmetric tri-diagonal eigenvalue
# problem such that
# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
# and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1]
# [see Percival and Walden, 1993]
d = ((M - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
e = nidx[1:] * (M - nidx[1:]) / 2.
# only calculate the highest Kmax eigenvalues
w, windows = linalg.eigh_tridiagonal(
d, e, select='i', select_range=(M - Kmax, M - 1))
w = w[::-1]
windows = windows[:, ::-1].T
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
fix_even = (windows[::2].sum(axis=1) < 0)
for i, f in enumerate(fix_even):
if f:
windows[2 * i] *= -1
# * antisymmetric tapers should begin with a positive lobe
# (this depends on the definition of "lobe", here we'll take the first
# point above the numerical noise, which should be good enough for
# sufficiently smooth functions, and more robust than relying on an
# algorithm that uses max(abs(w)), which is susceptible to numerical
# noise problems)
thresh = max(1e-7, 1. / M)
for i, w in enumerate(windows[1::2]):
if w[w * w > thresh][0] < 0:
windows[2 * i + 1] *= -1
# Now find the eigenvalues of the original spectral concentration problem
# Use the autocorr sequence technique from Percival and Walden, 1993 pg 390
if return_ratios:
dpss_rxx = _fftautocorr(windows)
r = 4 * W * np.sinc(2 * W * nidx)
r[0] = 2 * W
ratios = np.dot(dpss_rxx, r)
if singleton:
ratios = ratios[0]
# Deal with sym and Kmax=None
if norm != 2:
windows /= windows.max()
if M % 2 == 0:
if norm == 'approximate':
correction = M**2 / float(M**2 + NW)
else:
s = sp_fft.rfft(windows[0])
shift = -(1 - 1./M) * np.arange(1, M//2 + 1)
s[1:] *= 2 * np.exp(-1j * np.pi * shift)
correction = M / s.real.sum()
windows *= correction
# else we're already l2 normed, so do nothing
if needs_trunc:
windows = windows[:, :-1]
if singleton:
windows = windows[0]
return (windows, ratios) if return_ratios else windows
def _fftautocorr(x):
"""Compute the autocorrelation of a real array and crop the result."""
N = x.shape[-1]
use_N = sp_fft.next_fast_len(2*N-1)
x_fft = sp_fft.rfft(x, use_N, axis=-1)
cxy = sp_fft.irfft(x_fft * x_fft.conj(), n=use_N)[:, :N]
# Or equivalently (but in most cases slower):
# cxy = np.array([np.convolve(xx, yy[::-1], mode='full')
# for xx, yy in zip(x, x)])[:, N-1:2*N-1]
return cxy
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window of a given length and type.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True (default), create a "periodic" window, ready to use with
`ifftshift` and be multiplied by the result of an FFT (see also
:func:`~scipy.fft.fftfreq`).
If False, create a "symmetric" window, for use in filter design.
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
- `~scipy.signal.windows.boxcar`
- `~scipy.signal.windows.triang`
- `~scipy.signal.windows.blackman`
- `~scipy.signal.windows.hamming`
- `~scipy.signal.windows.hann`
- `~scipy.signal.windows.bartlett`
- `~scipy.signal.windows.flattop`
- `~scipy.signal.windows.parzen`
- `~scipy.signal.windows.bohman`
- `~scipy.signal.windows.blackmanharris`
- `~scipy.signal.windows.nuttall`
- `~scipy.signal.windows.barthann`
- `~scipy.signal.windows.kaiser` (needs beta)
- `~scipy.signal.windows.gaussian` (needs standard deviation)
- `~scipy.signal.windows.general_gaussian` (needs power, width)
- `~scipy.signal.windows.slepian` (needs width)
- `~scipy.signal.windows.dpss` (needs normalized half-bandwidth)
- `~scipy.signal.windows.chebwin` (needs attenuation)
- `~scipy.signal.windows.exponential` (needs decay scale)
- `~scipy.signal.windows.tukey` (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the `~scipy.signal.windows.kaiser` window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.125, 0.375, 0.625, 0.875, 0.875, 0.625, 0.375])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093,
0.97885093, 0.82160913, 0.56437221, 0.29425961])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093,
0.97885093, 0.82160913, 0.56437221, 0.29425961])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, str):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| bsd-3-clause |
poine/rosmip | rosmip/rosmip_control/scripts/test_cpp_ext.py | 1 | 1667 | #!/usr/bin/env python
import numpy as np, matplotlib.pyplot as plt
import scipy.signal
import pdb
import keras
import homere_control.io_dataset as iod
import cpp_rosmip_control
import ident_plant
'''
running the identified plant on real control
'''
if __name__ == '__main__':
keras.backend.set_floatx('float64')
np.set_printoptions(precision=2, linewidth=300)
ann = ident_plant.ANN()
ann_filename = '/tmp/rosmip_ann.h5'
ann.load(ann_filename)
ann.report()
filename, _type = '/home/poine/work/homere/homere_control/data/rosmip/gazebo/rosmip_io_04_sine_2.npz', 'rosmip'
ds = iod.DataSet(filename, _type)
# test control vs ann
ctl = cpp_rosmip_control.LegacyCtlLaw()
# original
ctl.set_inner_loop_coeffs(1.05, [-4.945, 8.862, -3.967], [1.000, -1.481, 0.4812])
# test
#ctl.set_inner_loop_coeffs(1.05, [-3.945, 8.862, -3.967], [1.000, -1.481, 0.8])
time = np.arange(0, 10, 0.01)
U = np.zeros((len(time), 2))
X = np.zeros((len(time), 6))
Sp = np.zeros((len(time), 2))
Sp[:,0] = scipy.signal.square(time)
#Sp[:,1] = scipy.signal.square(time)
for k in range(1, len(time)):
phi, gamma, theta = X[k-1, 0], X[k-1, 1], X[k-1, 4]
rwa_p_lwa = (phi-theta)*2
rwa_m_lwa = gamma/ann.wr*ann.ws
lwa = (rwa_p_lwa - rwa_m_lwa)/2
rwa = (rwa_p_lwa + rwa_m_lwa)/2
U[k-1] = ctl.update(theta, lwa, rwa, Sp[k-1,0], Sp[k-1,1] )
X[k] = ann.predict(np.hstack([X[k-1], U[k-1]])[np.newaxis,:])
U[-1] = U[-2]
pdb.set_trace()
_in = np.hstack([X, U])
ann.plot_io_chronogram(_in, None, time)
#plt.plot(X)
plt.show()
| gpl-3.0 |
grg2rsr/line_scan_traces_extractor | tifffile.py | 2 | 98853 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2012, Christoph Gohlke
# Copyright (c) 2008-2012, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and meta-data can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
and FluoView files. Only a subset of the TIFF specification is supported,
mainly uncompressed and losslessly compressed 2**(0 to 6) bit integer,
16, 32 and 64-bit float, grayscale and RGB(A) images, which are commonly
used in bio-scientific imaging. Specifically, reading JPEG or CCITT
compressed image data is not implemented. Only primary info records are
read for STK, FluoView, and NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, and OME-TIFF
are custom extensions defined by MetaMorph, Carl Zeiss MicroImaging,
Olympus, and the Open Microscopy Environment consortium respectively.
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
For command line usage run ``python tifffile.py --help``
:Authors:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`__,
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2012.06.06
Requirements
------------
* `CPython 2.7 or 3.2 <http://www.python.org>`__
* `Numpy 1.6 <http://numpy.scipy.org>`__
* `Matplotlib 1.1 <http://matplotlib.sourceforge.net>`__
(optional for plotting)
* `tifffile.c 2012.01.01 <http://www.lfd.uci.edu/~gohlke/>`__
(optional for faster decoding of PackBits and LZW encoded strings)
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis, for a bug fix and some read_cz_lsm functions.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(5) BioFormats. http://www.loci.wisc.edu/ome/formats.html
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
Examples
--------
>>> data = numpy.random.rand(301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> assert numpy.all(image == data)
>>> tif = TIFFfile('test.tif')
>>> images = tif.asarray()
>>> image0 = tif[0].asarray()
>>> for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
... if page.is_rgb: pass
... if page.is_palette:
... t = page.color_map
... if page.is_stk:
... t = page.mm_uic_tags.number_planes
... if page.is_lsm:
... t = page.cz_lsm_info
>>> tif.close()
"""
from __future__ import division, print_function
import sys
import os
import math
import zlib
import time
import struct
import warnings
import datetime
import collections
from xml.etree import cElementTree as ElementTree
import numpy
__all__ = ['imsave', 'imread', 'imshow', 'TIFFfile']
def imsave(filename, data, photometric=None, planarconfig=None,
resolution=None, description=None, software='tifffile.py',
byteorder=None, bigtiff=False):
"""Write image data to TIFF file.
Image data are written uncompressed in one stripe per plane.
Dimensions larger than 2 or 3 (depending on photometric mode and
planar configuration) are flattened and saved as separate pages.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image height,
width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : ((int, int), (int, int))
X and Y resolution in dots per inch as rational numbers.
description : str
The subject of the image. Saved with the first page only.
software : str
Name of the software used to create the image.
Saved with the first page only.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
bigtiff : bool
If True the BigTIFF format is used.
By default the standard TIFF format is used for data less than 2040 MB.
Examples
--------
>>> data = numpy.random.rand(10, 3, 301, 219)
>>> imsave('temp.tif', data)
"""
assert(photometric in (None, 'minisblack', 'miniswhite', 'rgb'))
assert(planarconfig in (None, 'contig', 'planar'))
assert(byteorder in (None, '<', '>'))
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
if not bigtiff and data.size * data.dtype.itemsize < 2040*2**20:
bigtiff = False
offset_size = 4
tag_size = 12
numtag_format = 'H'
offset_format = 'I'
val_format = '4s'
else:
bigtiff = True
offset_size = 8
tag_size = 20
numtag_format = 'Q'
offset_format = 'Q'
val_format = '8s'
# unify shape of data
samplesperpixel = 1
extrasamples = 0
if photometric is None:
if data.ndim > 2 and (shape[-3] in (3, 4) or shape[-1] in (3, 4)):
photometric = 'rgb'
else:
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if planarconfig is None:
planarconfig = 'planar' if shape[-3] in (3, 4) else 'contig'
if planarconfig == 'contig':
if shape[-1] not in (3, 4):
raise ValueError("not a contiguous RGB(A) image")
data = data.reshape((-1, 1) + shape[-3:])
samplesperpixel = shape[-1]
else:
if shape[-3] not in (3, 4):
raise ValueError("not a planar RGB(A) image")
data = data.reshape((-1, ) + shape[-3:] + (1, ))
samplesperpixel = shape[-3]
if samplesperpixel == 4:
extrasamples = 1
elif planarconfig and len(shape) > 2:
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[-3:])
samplesperpixel = shape[-1]
else:
data = data.reshape((-1, ) + shape[-3:] + (1, ))
samplesperpixel = shape[-3]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
data = data.reshape((-1, 1) + shape[-2:] + (1, ))
shape = data.shape # (pages, planes, height, width, contig samples)
bytestr = bytes if sys.version[0] == '2' else lambda x: bytes(x, 'ascii')
tifftypes = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
tifftags = {'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'extra_samples': 338, 'sample_format': 339}
tags = []
tag_data = []
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def tag(name, dtype, number, value, offset=[0]):
# append tag binary string to tags list
# append (offset, value as binary string) to tag_data list
# increment offset by tag_size
if dtype == 's':
value = bytestr(value) + b'\0'
number = len(value)
value = (value, )
t = [pack('HH', tifftags[name], tifftypes[dtype]),
pack(offset_format, number)]
if len(dtype) > 1:
number *= int(dtype[:-1])
dtype = dtype[-1]
if number == 1:
if isinstance(value, (tuple, list)):
value = value[0]
t.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * number <= offset_size:
t.append(pack(val_format, pack(str(number)+dtype, *value)))
else:
t.append(pack(offset_format, 0))
tag_data.append((offset[0] + offset_size + 4,
pack(str(number)+dtype, *value)))
tags.append(b''.join(t))
offset[0] += tag_size
if software:
tag('software', 's', 0, software)
if description:
tag('image_description', 's', 0, description)
elif shape != data_shape:
tag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)))
tag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"))
# write previous tags only once
writeonce = (len(tags), len(tag_data)) if shape[0] > 1 else None
tag('compression', 'H', 1, 1)
tag('orientation', 'H', 1, 1)
tag('image_width', 'I', 1, shape[-2])
tag('image_length', 'I', 1, shape[-3])
tag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
tag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
tag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
tag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig:
tag('planar_configuration', 'H', 1, 1 if planarconfig=='contig' else 2)
tag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
tag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb':
tag('extra_samples', 'H', 1, 1) # alpha channel
else:
tag('extra_samples', 'H', extrasamples, (0, ) * extrasamples)
if resolution:
tag('x_resolution', '2I', 1, resolution[0])
tag('y_resolution', '2I', 1, resolution[1])
tag('resolution_unit', 'H', 1, 2)
tag('rows_per_strip', 'I', 1, shape[-3])
# use one strip per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize, ) * shape[1]
tag('strip_byte_counts', offset_format, shape[1], strip_byte_counts)
# strip_offsets must be the last tag; will be updated later
tag('strip_offsets', offset_format, shape[1], (0, ) * shape[1])
fd = open(filename, 'wb')
seek = fd.seek
tell = fd.tell
def write(arg, *args):
fd.write(pack(arg, *args) if args else arg)
write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
write('HHH', 43, 8, 0)
else:
write('H', 42)
ifd_offset = tell()
write(offset_format, 0) # first IFD
for i in range(shape[0]):
# update pointer at ifd_offset
pos = tell()
seek(ifd_offset)
write(offset_format, pos)
seek(pos)
# write tags
write(numtag_format, len(tags))
tag_offset = tell()
write(b''.join(tags))
ifd_offset = tell()
write(offset_format, 0) # offset to next ifd
# write extra tag data and update pointers
for off, dat in tag_data:
pos = tell()
seek(tag_offset + off)
write(offset_format, pos)
seek(pos)
write(dat)
# update strip_offsets
pos = tell()
if len(strip_byte_counts) == 1:
seek(ifd_offset - offset_size)
write(offset_format, pos)
else:
seek(pos - offset_size*shape[1])
strip_offset = pos
for size in strip_byte_counts:
write(offset_format, strip_offset)
strip_offset += size
seek(pos)
# write data
data[i].tofile(fd) # if this fails, try update Python and numpy
fd.flush()
# remove tags that should be written only once
if writeonce:
tags = tags[writeonce[0]:]
d = writeonce[0] * tag_size
tag_data = [(o-d, v) for (o, v) in tag_data[writeonce[1]:]]
writeonce = None
fd.close()
def imread(filename, *args, **kwargs):
"""Return image data from TIFF file as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
Examples
--------
>>> image = imread('test.tif', 0)
"""
with TIFFfile(filename) as tif:
return tif.asarray(*args, **kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
result = self.func(instance)
if result is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, result)
return result
class TIFFfile(object):
"""Read image and meta-data from TIFF, STK, LSM, and FluoView files.
TIFFfile instances must be closed using the close method.
Attributes
----------
pages : list
All TIFFpages in file.
series : list of Records(shape, dtype, axes, TIFFpages)
TIFF pages with compatible shapes and types.
All attributes are read-only.
Examples
--------
>>> tif = TIFFfile('test.tif')
... try:
... images = tif.asarray()
... except Exception as e:
... print(e)
... finally:
... tif.close()
"""
def __init__(self, filename):
"""Initialize instance from file."""
filename = os.path.abspath(filename)
self._fd = open(filename, 'rb')
self.fname = os.path.basename(filename)
self.fpath = os.path.dirname(filename)
self._tiffs = {self.fname: self} # cache of TIFFfiles
self.offset_size = None
self.pages = []
try:
self._fromfile()
except Exception:
self._fd.close()
raise
def close(self):
"""Close open file handle(s)."""
if not hasattr(self, 'tiffs'):
return
for tif in self._tiffs.values():
if tif._fd:
tif._fd.close()
tif._fd = None
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fd.seek(0)
try:
self.byte_order = {b'II': '<', b'MM': '>'}[self._fd.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byte_order+'H', self._fd.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byte_order+'HH',
self._fd.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TIFFpage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
@lazyattr
def series(self):
"""Return series of TIFFpage with compatible shape and properties."""
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(self.pages[0].mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'O')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(self.pages[0].dtype))]
elif self.is_lsm:
lsmi = self.pages[0].cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if self.pages[0].is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = [getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes]
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + list(pages[0].shape)
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_nih:
series = [Record(pages=self.pages,
shape=(len(self.pages),) + self.pages[0].shape,
axes='I' + self.pages[0].axes,
dtype=numpy.dtype(self.pages[0].dtype))]
elif self.pages[0].is_shaped:
shape = self.pages[0].tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='O' * len(shape),
dtype=numpy.dtype(self.pages[0].dtype))]
else:
shapes = []
pages = {}
for page in self.pages:
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if not shape in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
return series
def asarray(self, key=None, series=None):
"""Return image data of multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError('key must be an int, slice, or sequence')
if len(pages) == 1:
return pages[0].asarray()
elif self.is_nih:
result = numpy.vstack(p.asarray(colormapped=False,
squeeze=False) for p in pages)
if pages[0].is_palette:
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
if self.is_ome and any(p is None for p in pages):
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray())
result = numpy.vstack((p.asarray() if p else nopage)
for p in pages)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF files."""
root = ElementTree.XML(self.pages[0].tags['image_description'].value)
uuid = root.attrib.get('UUID', None)
self._tiffs = {uuid: self}
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("not an OME-TIFF master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
axes = "".join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = numpy.prod(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
idx = numpy.ravel_multi_index(idx, shape[:-2])
for uuid in data:
if uuid.tag.endswith('UUID'):
if uuid.text not in self._tiffs:
fn = uuid.attrib['FileName']
try:
tf = TIFFfile(os.path.join(self.fpath, fn))
except (IOError, ValueError):
warnings.warn("failed to read %s" % fn)
break
self._tiffs[uuid.text] = tf
pages = self._tiffs[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(ifds[0].dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i+1, size)
record.axes = record.axes.replace(axis, axis+newaxis, 1)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [self.fname.capitalize(),
"%.2f MB" % (self.fstat[6] / 1048576),
{'<': 'little endian', '>': 'big endian'}[self.byte_order]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._tiffs) > 1:
result.append("%i files" % (len(self._tiffs)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
@lazyattr
def fstat(self):
return os.fstat(self._fd.fileno())
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TIFFpage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'P' plane, 'I' image series,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'F' phase, 'H' lifetime,
'L' exposure, 'V' event, 'O' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table if exists.
mm_uic_tags: Record(dict)
Consolidated MetaMorph mm_uic# tags, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
All attributes are read-only.
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fd = self.parent._fd
byte_order = self.parent.byte_order
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byte_order + fmt, fd.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fd.seek(offset, 0)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byte_order + fmt, fd.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
for _ in range(numtags):
tag = TIFFtag(self.parent)
tags[tag.name] = tag
# read custom tags
for name, readtag in CUSTOM_TAGS.values():
if name in tags and readtag:
pos = fd.tell()
value = readtag(fd, byte_order, tags[name])
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
tags[name].value = value
fd.seek(pos)
# read LSM info subrecords
if self.is_lsm:
pos = fd.tell()
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info["offset_"+name]
except KeyError:
continue
if not offset:
continue
fd.seek(offset)
try:
setattr(self, "cz_lsm_"+name, reader(fd, byte_order))
except ValueError:
pass
fd.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TIFFtag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(validate[value]
for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
self.strips_per_image = int(math.floor(float(self.image_length +
self.rows_per_strip - 1) / self.rows_per_strip))
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if self.is_stk:
planes = tags['mm_uic2'].count
# consolidate mm_uci tags
self.mm_uic_tags = Record(tags['mm_uic2'].value)
for key in ('mm_uic3', 'mm_uic4', 'mm_uic1'):
if key in tags:
self.mm_uic_tags.update(tags[key].value)
if self.planar_configuration == 'contig':
self._shape = (planes, 1, self.image_length,
self.image_width, self.samples_per_pixel)
self.shape = tuple(self._shape[i] for i in (0, 2, 3, 4))
self.axes = "PYXS"
else:
self._shape = (planes, self.samples_per_pixel,
self.image_length, self.image_width, 1)
self.shape = self._shape[:4]
self.axes = "PSYX"
elif self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
self._shape = (1, 1, self.image_length, self.image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
self.shape = (3, self.image_length, self.image_width)
self.axes = "SYX"
else:
# LSM and FluoView
self.shape = (self.image_length, self.image_width)
self.axes = "YX"
elif self.is_rgb or self.samples_per_pixel > 1:
if self.planar_configuration == 'contig':
self._shape = (1, 1, self.image_length, self.image_width,
self.samples_per_pixel)
self.shape = (self.image_length, self.image_width,
self.samples_per_pixel)
self.axes = "YXS"
else:
self._shape = (1, self.samples_per_pixel, self.image_length,
self.image_width, 1)
self.shape = self._shape[1:-1]
self.axes = "SYX"
if self.is_rgb and 'extra_samples' in self.tags:
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha'):
if self.planar_configuration == 'contig':
self.shape = self.shape[:2] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, self.image_length, self.image_width, 1)
self.shape = self._shape[2:4]
self.axes = "YX"
if not self.compression and not 'strip_byte_counts' in tags:
self.strip_byte_counts = numpy.prod(self.shape) * (
self.bits_per_sample // 8)
def asarray(self, squeeze=True, colormapped=True, rgbonly=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any argument is False, the shape of the returned array might be
different from the page shape.
Parameters
----------
squeeze : bool
If True all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True color mapping is applied for palette-indexed images.
rgbonly : bool
If True return RGB(A) image without additional extra samples.
"""
fd = self.parent._fd
if not fd:
raise IOError("TIFF file is not open")
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
if ('ycbcr_subsampling' in self.tags and
self.tags['ycbcr_subsampling'].value not in (1, (1, 1))):
raise ValueError("YCbCr subsampling not supported")
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
typecode = self.parent.byte_order + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
shape = shape[:-3] + (tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
try:
offsets[0]
except TypeError:
offsets = (offsets, )
byte_counts = (byte_counts, )
if any(o < 2 for o in offsets):
raise ValueError("corrupted file")
if (not self.is_tiled and (self.is_stk or (not self.compression
and bits_per_sample in (8, 16, 32, 64)
and all(offsets[i] == offsets[i+1] - byte_counts[i]
for i in range(len(offsets)-1))))):
# contiguous data
fd.seek(offsets[0], 0)
result = numpy.fromfile(fd, typecode, numpy.prod(shape))
result = result.astype('=' + dtype)
else:
if self.planar_configuration == 'contig':
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
unpack = lambda x: numpy.fromstring(x, typecode)
elif isinstance(bits_per_sample, tuple):
unpack = lambda x: unpackrgb(x, typecode, bits_per_sample)
else:
unpack = lambda x: unpackints(x, typecode, bits_per_sample,
runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, pl = 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fd.seek(offset, 0)
tile = unpack(decompress(fd.read(bytecount)))
tile.shape = tile_shape
result[0, pl, tl:tl+tile_length,
tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[-2]:
tw, tl = 0, tl + tile_length
if tl >= shape[-3]:
tl, pl = 0, pl + 1
result = result[..., :image_length, :image_width, :]
else:
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fd.seek(offset, 0)
stripe = unpack(decompress(fd.read(bytecount)))
size = min(result.size, stripe.size)
result[index:index+size] = stripe[:size]
del stripe
index += size
result.shape = self._shape
if self.predictor == 'horizontal':
# workaround bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=3, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map, result, axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha'):
if self.planar_configuration == 'contig':
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.planar_configuration == 'contig':
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
pass
return result
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric,
self.compression if self.compression else 'raw',
','.join(t[3:] for t in ('is_stk', 'is_lsm', 'is_nih', 'is_ome',
'is_fluoview', 'is_reduced', 'is_tiled')
if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return self.tags['photometric'].value == 2
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image."""
return self.tags['photometric'].value == 3
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_stk(self):
"""True if page contains MM_UIC2 tag."""
return 'mm_uic2' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
class TIFFtag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data. For codes in CUSTOM_TAGS the 4 bytes file content.
value_offset : int
Location of value in file
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset')
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fd'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
def _fromfile(self, parent):
"""Read tag structure from open file. Advances file cursor."""
fd = parent._fd
byte_order = parent.byte_order
self._offset = fd.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fd.read(size)
code, dtype = struct.unpack(byte_order + fmt[:2], data[:4])
count, value = struct.unpack(byte_order + fmt[2:], data[4:])
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[dtype]
except KeyError:
raise ValueError("unknown TIFF tag data type %i" % dtype)
if not code in CUSTOM_TAGS:
fmt = '%s%i%s' % (byte_order, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size <= parent.offset_size:
value = struct.unpack(fmt, value[:size])
else:
pos = fd.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = struct.unpack(byte_order+tof, value)[0]
fd.seek(self.value_offset)
value = struct.unpack(fmt, fd.read(size))
fd.seek(pos)
if len(value) == 1:
value = value[0]
if dtype == '1s':
value = stripnull(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except TypeError:
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
if k.startswith('_'):
continue
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TIFFpage):
v = [i.index for i in v if i]
s.append(("* %s: %s" % (k, str(v))).split("\n",
1)[0][:PRINT_LINE_LEN])
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TIFFtags with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
#sortbycode = lambda a, b: cmp(a.code, b.code)
#for tag in sorted(self.values(), sortbycode):
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (tag.code, tag.name, typecode,
str(tag.value).split('\n', 1)[0])
s.append(line[:PRINT_LINE_LEN])
return '\n'.join(s)
def read_nih_image_header(fd, byte_order, tag):
"""Read NIH_IMAGE_HEADER tag from file and return as dictionary."""
fd.seek(12 + struct.unpack(byte_order+'I', tag.value)[0])
return {'version': struct.unpack(byte_order+'H', fd.read(2))[0]}
def read_mm_header(fd, byte_order, tag):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
fd.seek(struct.unpack(byte_order+'I', tag.value)[0])
return numpy.rec.fromfile(fd, MM_HEADER, 1, byteorder=byte_order)[0]
def read_mm_stamp(fd, byte_order, tag):
"""Read MM_STAMP tag from file and return as numpy.array."""
fd.seek(struct.unpack(byte_order+'I', tag.value)[0])
return numpy.fromfile(fd, byte_order+'8f8', 1)[0]
def read_mm_uic1(fd, byte_order, tag):
"""Read MM_UIC1 tag from file and return as dictionary."""
fd.seek(struct.unpack(byte_order+'I', tag.value)[0])
t = fd.read(8*tag.count)
t = struct.unpack('%s%iI' % (byte_order, 2*tag.count), t)
return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2])
if k in MM_TAG_IDS)
def read_mm_uic2(fd, byte_order, tag):
"""Read MM_UIC2 tag from file and return as dictionary."""
result = {'number_planes': tag.count}
fd.seek(struct.unpack(byte_order+'I', tag.value)[0])
values = numpy.fromfile(fd, byte_order+'I', 6*tag.count)
result['z_distance'] = values[0::6] // values[1::6]
#result['date_created'] = tuple(values[2::6])
#result['time_created'] = tuple(values[3::6])
#result['date_modified'] = tuple(values[4::6])
#result['time_modified'] = tuple(values[5::6])
return result
def read_mm_uic3(fd, byte_order, tag):
"""Read MM_UIC3 tag from file and return as dictionary."""
fd.seek(struct.unpack(byte_order+'I', tag.value)[0])
t = numpy.fromfile(fd, '%sI' % byte_order, 2*tag.count)
return {'wavelengths': t[0::2] // t[1::2]}
def read_mm_uic4(fd, byte_order, tag):
"""Read MM_UIC4 tag from file and return as dictionary."""
fd.seek(struct.unpack(byte_order+'I', tag.value)[0])
t = struct.unpack(byte_order + 'hI'*tag.count, fd.read(6*tag.count))
return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2])
if k in MM_TAG_IDS)
def read_cz_lsm_info(fd, byte_order, tag):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
fd.seek(struct.unpack(byte_order+'I', tag.value)[0])
result = numpy.rec.fromfile(fd, CZ_LSM_INFO, 1,
byteorder=byte_order)[0]
{50350412: '1.3', 67127628: '2.0'}[result.magic_number] # validation
return result
def read_cz_lsm_time_stamps(fd, byte_order):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack(byte_order+'II', fd.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
return struct.unpack(('%s%dd' % (byte_order, count)),
fd.read(8*count))
def read_cz_lsm_event_list(fd, byte_order):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack(byte_order+'II', fd.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack(byte_order+'IdI', fd.read(16))
etext = stripnull(fd.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fd, byte_order):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack(byte_order+"I", fd.read(4))[0]:
raise ValueError("not a lsm_scan_info structure")
fd.read(8)
while True:
entry, dtype, size = unpack(byte_order+"III", fd.read(12))
if dtype == 2:
value = stripnull(fd.read(size))
elif dtype == 4:
value = unpack(byte_order+"i", fd.read(4))[0]
elif dtype == 5:
value = unpack(byte_order+"d", fd.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
block = blocks.pop()
else:
setattr(block, "unknown_%x" % entry, value)
if not blocks:
break
return block
def _replace_by(module_function, warn=True):
"""Try replace decorated function by module.function."""
def decorate(func, module_function=module_function, warn=warn):
sys.path.append(os.path.dirname(__file__))
try:
module, function = module_function.split('.')
func, oldfunc = getattr(__import__(module), function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
sys.path.pop()
return func
return decorate
@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result.extend(encoded[i:i+n])
i += n
elif n > 129:
result.extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code = code << (bitcount % 8)
code = code & mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len(encoded) < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = oldcode = 0
result = []
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result.append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result.append(decoded)
table.append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
raise ValueError("unexpected end of stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen+(8-runlen%8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code = code << (bitcount % 8)
code = code & bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing tightly packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def stripnull(string):
"""Return string truncated at first null character."""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def datetime_from_timestamp(n, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Examples
--------
>>> datetime_from_timestamp(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(n)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory. Print error message on failure.
Examples
--------
>>> test_tifffile(verbose=False)
"""
import glob
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TIFFfile(f)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (str(tif), str(img.shape),
img.dtype, tif[0].compression, (time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated',
6: 'cielab',
7: 'icclab',
8: 'itulab',
32844: 'logl',
32845: 'logluv'}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000'}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1B', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q'} # IFD8 unsigned 8 byte IFD offset (BigTiff)
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex'}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B'}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom'}
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample',
'P': 'plane',
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'F': 'phase',
'R': 'tile', # region
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'O': 'other'}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# MetaMorph STK tags
MM_TAG_IDS = {
0: 'auto_scale',
1: 'min_scale',
2: 'max_scale',
3: 'spatial_calibration',
#4: 'x_calibration',
#5: 'y_calibration',
#6: 'calibration_units',
#7: 'name',
8: 'thresh_state',
9: 'thresh_state_red',
11: 'thresh_state_green',
12: 'thresh_state_blue',
13: 'thresh_state_lo',
14: 'thresh_state_hi',
15: 'zoom',
#16: 'create_time',
#17: 'last_saved_time',
18: 'current_buffer',
19: 'gray_fit',
20: 'gray_point_count',
#21: 'gray_x',
#22: 'gray_y',
#23: 'gray_min',
#24: 'gray_max',
#25: 'gray_unit_name',
26: 'standard_lut',
27: 'wavelength',
#28: 'stage_position',
#29: 'camera_chip_offset',
#30: 'overlay_mask',
#31: 'overlay_compress',
#32: 'overlay',
#33: 'special_overlay_mask',
#34: 'special_overlay_compress',
#35: 'special_overlay',
36: 'image_property',
#37: 'stage_label',
#38: 'autoscale_lo_info',
#39: 'autoscale_hi_info',
#40: 'absolute_z',
#41: 'absolute_z_valid',
#42: 'gamma',
#43: 'gamma_red',
#44: 'gamma_green',
#45: 'gamma_blue',
#46: 'camera_bin',
47: 'new_lut',
#48: 'image_property_ex',
49: 'plane_property',
#50: 'user_lut_table',
51: 'red_autoscale_info',
#52: 'red_autoscale_lo_info',
#53: 'red_autoscale_hi_info',
54: 'red_minscale_info',
55: 'red_maxscale_info',
56: 'green_autoscale_info',
#57: 'green_autoscale_lo_info',
#58: 'green_autoscale_hi_info',
59: 'green_minscale_info',
60: 'green_maxscale_info',
61: 'blue_autoscale_info',
#62: 'blue_autoscale_lo_info',
#63: 'blue_autoscale_hi_info',
64: 'blue_min_scale_info',
65: 'blue_max_scale_info'}
#66: 'overlay_plane_color',
# Olymus Fluoview
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64')]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4')]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'i4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('dimension_data_type', 'i4'),
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('data_type', 'u4'),
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_information', 'u4'),
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4')]
# Import functions for LSM_INFO subrecords
CZ_LSM_INFO_READERS = {
'scan_information': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT'} # point mode
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time'}
# Descriptions of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
2: '12 bit unsigned integer',
5: '32 bit float'}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detectionchannels",
0x80000000: "illuminationchannels",
0xa0000000: "beamsplitters",
0xc0000000: "datachannels",
0x13000000: "markers",
0x11000000: "timers"}
CZ_LSM_SCAN_INFO_STRUCTS = {
0x40000000: "tracks",
0x50000000: "lasers",
0x70000000: "detectionchannels",
0x90000000: "illuminationchannels",
0xb0000000: "beamsplitters",
0xd0000000: "datachannels",
0x14000000: "markers",
0x12000000: "timers"}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "oledb_recording_scan_type",
0x10000008: "oledb_recording_scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bccorrection",
0x10000049: "position_bccorrection1",
0x10000050: "position_bccorrection2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
# lasers
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# tracks
0x40000001: "multiplex_type",
0x40000002: "multiplex_order",
0x40000003: "sampling_mode",
0x40000004: "sampling_method",
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# detection_channels
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "detection_channel_name",
0x70000015: "detection_detector_gain_bc1",
0x70000016: "detection_detector_gain_bc2",
0x70000017: "detection_amplifier_gain_bc1",
0x70000018: "detection_amplifier_gain_bc2",
0x70000019: "detection_amplifier_offset_bc1",
0x70000020: "detection_amplifier_offset_bc2",
0x70000021: "detection_spectral_scan_channels",
0x70000022: "detection_spi_wavelength_start",
0x70000023: "detection_spi_wavelength_stop",
0x70000026: "detection_dye_name",
0x70000027: "detection_dye_folder",
# illumination_channels
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitters
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channels
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# markers
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
# timers
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number"}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
#280: ('min_sample_value', 0, 3, None, None),
#281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
#700: ('xmp', None, 1, None, None),
33432: ('copyright', None, 1, None, None),
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
34665: ('exif_ifd', None, 4, 1, None)}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
33628: ('mm_uic1', read_mm_uic1),
33629: ('mm_uic2', read_mm_uic2),
33630: ('mm_uic3', read_mm_uic3),
33631: ('mm_uic4', read_mm_uic4),
34361: ('mm_header', read_mm_header),
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', None),
34412: ('cz_lsm_info', read_cz_lsm_info),
43314: ('nih_image_header', read_nih_image_header)}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=4096, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if (isrgb and data.shape[-3] in (3, 4)):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif (not isrgb and data.shape[-1] in (3, 4)):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette':
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not isrgb or bitspersample is None:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind != 'f':
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
pyplot.title(title, size=11)
if cmap is None:
if photometric == 'miniswhite':
cmap = 'gray_r' if vmin == 0 else 'coolwarm_r'
else:
cmap = 'gray' if vmin == 0 else 'coolwarm'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import re
import optparse
search_doc = lambda r, d: re.search(r, __doc__).group(1) if __doc__ else d
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description=search_doc("\n\n([^|]*?)\n\n", ''),
version="%%prog %s" % search_doc(":Version: (.*)", "Unknown"))
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the internal tests")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TIFFfile(path)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
notnone = lambda x: next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_information',
'mm_uic_tags', 'mm_header', 'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if page.is_stk:
try:
vmin = page.mm_uic_tags['min_scale']
vmax = page.mm_uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
__version__ = '2012.06.06'
__docformat__ = 'restructuredtext en'
if __name__ == "__main__":
sys.exit(main())
| gpl-2.0 |
jzt5132/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
csieg/ardupilot | Tools/mavproxy_modules/lib/magcal_graph_ui.py | 108 | 8248 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from pymavlink.mavutil import mavlink
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
import geodesic_grid as grid
class MagcalPanel(wx.Panel):
_status_markup_strings = {
mavlink.MAG_CAL_NOT_STARTED: 'Not started',
mavlink.MAG_CAL_WAITING_TO_START: 'Waiting to start',
mavlink.MAG_CAL_RUNNING_STEP_ONE: 'Step one',
mavlink.MAG_CAL_RUNNING_STEP_TWO: 'Step two',
mavlink.MAG_CAL_SUCCESS: '<span color="blue">Success</span>',
mavlink.MAG_CAL_FAILED: '<span color="red">Failed</span>',
}
_empty_color = '#7ea6ce'
_filled_color = '#4680b9'
def __init__(self, *k, **kw):
super(MagcalPanel, self).__init__(*k, **kw)
facecolor = self.GetBackgroundColour().GetAsString(wx.C2S_HTML_SYNTAX)
fig = plt.figure(facecolor=facecolor, figsize=(1,1))
self._canvas = FigureCanvas(self, wx.ID_ANY, fig)
self._canvas.SetMinSize((300,300))
self._id_text = wx.StaticText(self, wx.ID_ANY)
self._status_text = wx.StaticText(self, wx.ID_ANY)
self._completion_pct_text = wx.StaticText(self, wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._id_text)
sizer.Add(self._status_text)
sizer.Add(self._completion_pct_text)
sizer.Add(self._canvas, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
ax = fig.add_subplot(111, axis_bgcolor=facecolor, projection='3d')
self.configure_plot(ax)
def configure_plot(self, ax):
extra = .5
lim = grid.radius + extra
ax.set_xlim3d(-lim, lim)
ax.set_ylim3d(-lim, lim)
ax.set_zlim3d(-lim, lim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
self._polygons_collection = Poly3DCollection(
grid.sections_triangles,
edgecolors='#386694',
)
ax.add_collection3d(self._polygons_collection)
def update_status_from_mavlink(self, m):
status_string = self._status_markup_strings.get(m.cal_status, '???')
self._status_text.SetLabelMarkup(
'<b>Status:</b> %s' % status_string,
)
def mavlink_magcal_report(self, m):
self.update_status_from_mavlink(m)
self._completion_pct_text.SetLabel('')
def mavlink_magcal_progress(self, m):
facecolors = []
for i, mask in enumerate(m.completion_mask):
for j in range(8):
section = i * 8 + j
if mask & 1 << j:
facecolor = self._filled_color
else:
facecolor = self._empty_color
facecolors.append(facecolor)
self._polygons_collection.set_facecolors(facecolors)
self._canvas.draw()
self._id_text.SetLabelMarkup(
'<b>Compass id:</b> %d' % m.compass_id
)
self._completion_pct_text.SetLabelMarkup(
'<b>Completion:</b> %d%%' % m.completion_pct
)
self.update_status_from_mavlink(m)
_legend_panel = None
@staticmethod
def legend_panel(*k, **kw):
if MagcalPanel._legend_panel:
return MagcalPanel._legend_panel
p = MagcalPanel._legend_panel = wx.Panel(*k, **kw)
sizer = wx.BoxSizer(wx.HORIZONTAL)
p.SetSizer(sizer)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._empty_color)
sizer.Add(marker, flag=wx.ALIGN_CENTER)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections not hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._filled_color)
sizer.Add(marker, border=10, flag=wx.ALIGN_CENTER | wx.LEFT)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
return p
class MagcalFrame(wx.Frame):
def __init__(self, conn):
super(MagcalFrame, self).__init__(
None,
wx.ID_ANY,
title='Magcal Graph',
)
self.SetMinSize((300, 300))
self._conn = conn
self._main_panel = wx.ScrolledWindow(self, wx.ID_ANY)
self._main_panel.SetScrollbars(1, 1, 1, 1)
self._magcal_panels = {}
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._main_panel.SetSizer(self._sizer)
idle_text = wx.StaticText(self._main_panel, wx.ID_ANY)
idle_text.SetLabelMarkup('<i>No calibration messages received yet...</i>')
idle_text.SetForegroundColour('#444444')
self._sizer.AddStretchSpacer()
self._sizer.Add(
idle_text,
proportion=0,
flag=wx.ALIGN_CENTER | wx.ALL,
border=10,
)
self._sizer.AddStretchSpacer()
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.timer_callback, self._timer)
self._timer.Start(200)
def add_compass(self, id):
if not self._magcal_panels:
self._sizer.Clear(deleteWindows=True)
self._magcal_panels_sizer = wx.BoxSizer(wx.HORIZONTAL)
self._sizer.Add(
self._magcal_panels_sizer,
proportion=1,
flag=wx.EXPAND,
)
legend = MagcalPanel.legend_panel(self._main_panel, wx.ID_ANY)
self._sizer.Add(
legend,
proportion=0,
flag=wx.ALIGN_CENTER,
)
self._magcal_panels[id] = MagcalPanel(self._main_panel, wx.ID_ANY)
self._magcal_panels_sizer.Add(
self._magcal_panels[id],
proportion=1,
border=10,
flag=wx.EXPAND | wx.ALL,
)
def timer_callback(self, evt):
close_requested = False
mavlink_msgs = {}
while self._conn.poll():
m = self._conn.recv()
if isinstance(m, str) and m == 'close':
close_requested = True
continue
if m.compass_id not in mavlink_msgs:
# Keep the last two messages so that we get the last progress
# if the last message is the calibration report.
mavlink_msgs[m.compass_id] = [None, m]
else:
l = mavlink_msgs[m.compass_id]
l[0] = l[1]
l[1] = m
if close_requested:
self._timer.Stop()
self.Destroy()
return
if not mavlink_msgs:
return
needs_fit = False
for k in mavlink_msgs:
if k not in self._magcal_panels:
self.add_compass(k)
needs_fit = True
if needs_fit:
self._sizer.Fit(self)
for k, l in mavlink_msgs.items():
for m in l:
if not m:
continue
panel = self._magcal_panels[k]
if m.get_type() == 'MAG_CAL_PROGRESS':
panel.mavlink_magcal_progress(m)
elif m.get_type() == 'MAG_CAL_REPORT':
panel.mavlink_magcal_report(m)
| gpl-3.0 |
MostafaGazar/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 4 | 21995 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import tempfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = tf.contrib.learn.datasets.load_boston()
features = tf.reshape(tf.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
if num_epochs:
features = tf.train.limit_epochs(features, num_epochs=num_epochs)
target = tf.reshape(tf.constant(boston.target), [-1, 1])
return features, target
def iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, _IRIS_INPUT_DIM])
target = tf.reshape(tf.constant(iris.target), [-1])
return features, target
def boston_eval_fn():
boston = tf.contrib.learn.datasets.load_boston()
n_examples = len(boston.target)
features = tf.reshape(
tf.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
target = tf.reshape(tf.constant(boston.target), [n_examples, 1])
return tf.concat(0, [features, features]), tf.concat(0, [target, target])
def linear_model_params_fn(features, target, mode, params):
assert mode in ('train', 'eval', 'infer')
prediction, loss = (
tf.contrib.learn.models.linear_regression_zero_init(features, target)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, target, mode):
assert mode in ('train', 'eval', 'infer')
prediction, loss = (
tf.contrib.learn.models.linear_regression_zero_init(features, target)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def logistic_model_no_mode_fn(features, target):
target = tf.one_hot(target, 3, 1, 0)
prediction, loss = (
tf.contrib.learn.models.logistic_regression_zero_init(features, target)
)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
class CheckCallsMonitor(tf.contrib.learn.monitors.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(tf.test.TestCase):
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = tf.get_default_graph().seed
return tf.constant([[1.]]), tf.constant([1.])
config = tf.contrib.learn.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testCheckInputs(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_targets = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_targets(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7., 8.], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_targets = np.ones(shape=[7., 10.], dtype=np.float32)
wrong_size_targets = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_targets(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_targets(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_targets(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_targets, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_targets, steps=1)
def testBadInput(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(ValueError,
'Either x or input_fn must be provided.',
est.fit, x=None, input_fn=None)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and x or y',
est.fit, x='X', input_fn=iris_input_fn)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and x or y',
est.fit, y='Y', input_fn=iris_input_fn)
self.assertRaisesRegexp(ValueError,
'Can not provide both input_fn and batch_size',
est.fit, input_fn=iris_input_fn, batch_size=100)
self.assertRaisesRegexp(
ValueError, 'Inputs cannot be tensors. Please provide input_fn.',
est.fit, x=tf.constant(1.))
def testUntrained(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
with self.assertRaises(tf.contrib.learn.NotFittedError):
_ = est.evaluate(
x=boston.data,
y=boston.target.astype(np.float64))
with self.assertRaises(tf.contrib.learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = tf.contrib.learn.datasets.load_boston()
output_dir = tempfile.mkdtemp()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn,
model_dir=output_dir)
float64_target = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_target, steps=50)
scores = est.evaluate(
x=boston.data,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = tf.contrib.learn.Estimator(model_fn=linear_model_fn,
model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston.data,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'],
scores['MSE'])
predictions = est2.predict(x=boston.data)
other_score = _sklearn.mean_squared_error(predictions, float64_target)
self.assertAllClose(other_score, scores['MSE'])
# Check we can keep training.
est2.fit(x=boston.data, y=float64_target, steps=100)
scores3 = est2.evaluate(
x=boston.data,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.Estimator(model_fn=linear_model_params_fn,
params={'learning_rate': 0.01})
est.fit(x=boston.data, y=boston.target, steps=100)
def testBostonAll(self):
boston = tf.contrib.learn.datasets.load_boston()
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
float64_target = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_target, steps=100)
scores = est.evaluate(
x=boston.data,
y=float64_target,
metrics={'MSE': tf.contrib.metrics.streaming_mean_squared_error})
predictions = est.predict(x=boston.data)
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(iris.data, iris.target, steps=100)
scores = est.evaluate(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): tf.contrib.metrics.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])
self.assertEqual(predictions['class'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class['class'])
self.assertAllClose(predictions['class'], np.argmax(predictions['prob'],
axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = est.predict(x=iris.data)['class']
self.assertEqual(predictions.shape[0], iris.target.shape[0])
def testIrisIterator(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = est.predict(x=iris.data)['class']
self.assertEqual(predictions.shape[0], iris.target.shape[0])
def testTrainInputFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testTrainStepsIsIncremental(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = est.predict(boston.data)
self.assertEqual(output.shape[0], boston.target.shape[0])
def testPredictInputFn(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = est.predict(input_fn=boston_input_fn)
self.assertEqual(output.shape[0], boston.target.shape[0])
def testPredictAsIterable(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
self.assertEqual(
len(list(est.predict(boston.data, batch_size=10, as_iterable=True))),
boston.target.shape[0])
def testPredictInputFnAsIterable(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
boston = tf.contrib.learn.datasets.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
self.assertEqual(
len(list(est.predict(input_fn=input_fn, as_iterable=True))),
boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {'other': tf.constant([0, 0, 0])}, tf.constant([0, 0, 0])
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitors(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testSummaryWriting(self):
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = tf.contrib.testing.simple_values_from_events(
tf.contrib.testing.latest_events(est.model_dir), ['loss'])
self.assertEqual(len(loss_summary), 1)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with tf.test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = tf.contrib.learn.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(actual, expected)
class InferRealValuedColumnsTest(tf.test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
tf.contrib.learn.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
tf.contrib.learn.infer_real_valued_columns_from_input(tf.constant(1.0))
def _assert_single_feature_column(
self, expected_shape, expected_dtype, feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual({
'': tf.FixedLenFeature(shape=expected_shape, dtype=expected_dtype)
}, feature_column.config)
def testInt32Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], tf.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.int32), None))
self._assert_single_feature_column([8], tf.int32, feature_columns)
def testInt64Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], tf.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.int64), None))
self._assert_single_feature_column([8], tf.int64, feature_columns)
def testFloat32Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], tf.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.float32), None))
self._assert_single_feature_column([8], tf.float32, feature_columns)
def testFloat64Input(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], tf.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.ones(shape=[7, 8], dtype=tf.float64), None))
self._assert_single_feature_column([8], tf.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
tf.contrib.learn.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (tf.constant(False, shape=[7, 8], dtype=tf.bool), None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
tf.contrib.learn.infer_real_valued_columns_from_input_fn(
lambda: (
tf.constant([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column(
[_BOSTON_INPUT_DIM], tf.float64, feature_columns)
def testIrisInputFn(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column(
[_IRIS_INPUT_DIM], tf.float64, feature_columns)
class ReplicaDeviceSetterTest(tf.test.TestCase):
def testVariablesAreOnPs(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(num_ps_replicas=1))):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(num_ps_replicas=0))):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(num_ps_replicas=1))):
default_val = tf.constant([-1, -1], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
input_string = tf.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with tf.device(estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(num_ps_replicas=0))):
default_val = tf.constant([-1, -1], tf.int64)
table = tf.contrib.lookup.MutableHashTable(tf.string,
tf.int64,
default_val)
input_string = tf.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
with tf.device(
estimator._get_replica_device_setter(
tf.contrib.learn.RunConfig(
num_ps_replicas=1, job_name='worker', task=3))):
v = tf.Variable([1, 2])
w = tf.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
maxis1314/pyutils | ml/svm/run.py | 1 | 2132 | # -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
from sklearn import svm
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
data = []
labels = []
with open("1.txt") as ifile:
for line in ifile:
tokens = line.strip().split(' ')
data.append([float(tk) for tk in tokens[:-1]])
labels.append(tokens[-1])
x = np.array(data)
labels = np.array(labels)
y = np.zeros(labels.shape)
y[labels=='fat']=1
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.0)
h = .02
# create a mesh to plot in
x_min, x_max = x_train[:, 0].min() - 0.1, x_train[:, 0].max() + 0.1
y_min, y_max = x_train[:, 1].min() - 1, x_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
''''' SVM '''
# title for the plots
titles = ['LinearSVC (linear kernel)',
'SVC with polynomial (degree 3) kernel',
'SVC with RBF kernel',
'SVC with Sigmoid kernel']
clf_linear = svm.SVC(kernel='linear').fit(x, y)
#clf_linear = svm.LinearSVC().fit(x, y)
clf_poly = svm.SVC(kernel='poly', degree=3).fit(x, y)
clf_rbf = svm.SVC().fit(x, y)
clf_sigmoid = svm.SVC(kernel='sigmoid').fit(x, y)
for i, clf in enumerate((clf_linear, clf_poly, clf_rbf, clf_sigmoid)):
answer = clf.predict(np.c_[xx.ravel(), yy.ravel()])
print(clf)
print(np.mean( answer == y_train))
print(answer)
print(y_train)
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
# Put the result into a color plot
z = answer.reshape(xx.shape)
plt.contourf(xx, yy, z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(x_train[:, 0], x_train[:, 1], c=y_train, cmap=plt.cm.Paired)
plt.xlabel(u'Height')
plt.ylabel(u'Weight')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show() | apache-2.0 |
Ylannl/masb2d | shrinkhistapp.py | 1 | 2853 | import matplotlib
# matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backend_bases import key_press_handler
from Tkinter import *
import numpy as np
from numpy.linalg import norm
from algebra import cos_angle, compute_radius
from math import acos, pi
class ShrinkHistApp(Toplevel):
def __init__(self, master):
Toplevel.__init__(self)
self.sizex = 570
self.sizey = 500
# position window next to main window
self.geometry('{0}x{1}+{2}+{3}'.format(self.sizex, self.sizey, master.sizex+20, 700))
self.master = master
self.minsize(self.sizex, self.sizey)
# self.resizable(0,0)
f = plt.figure()
self.f = f
self.ax = f.add_subplot(111)
self.plotline_a, = self.ax.plot([1,8],[0,180], label='separation angle')
self.plotline_b, = self.ax.twinx().plot([1,8],[0,1], label='radius', color='red')
self.plotline_c, = self.ax.twinx().plot([1,8],[0,1], label='lambda', color='green')
self.ax.set_xlabel('iteration #')
# self.ax.set_ylabel('separation angle (deg)')
plt.legend([self.plotline_a, self.plotline_b, self.plotline_c], ['angle (deg)', 'radius (wrt initial)', 'lambda (wrt initial)'],loc=1)
self.ax.xaxis.grid(True)
self.ax.yaxis.grid(True)
self.canvas = FigureCanvasTkAgg(f, master=self)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
self.bind('d', self.save_to_disk)
self.bind('q', master.exit)
def save_to_disk(self, event):
self.f.savefig('shrinkhist.pdf', format='pdf')
def update_plot(self, p_i, inner):
q_indices = self.master.ma.D['ma_shrinkhist_'+inner][p_i]
if len(q_indices) == 0: return # perhaps also clear the plot...
q_coords = self.master.ma.D['coords'][q_indices]
p_n = self.master.ma.D['normals'][p_i]
# if not is_inner: p_n = -p_n
p = self.master.ma.D['coords'][p_i]
radii = [ compute_radius(p,p_n,q) for q in q_coords ]
centers = [ p - p_n * r for r in radii ]
thetas = [ acos(cos_angle(p-c,q-c))*(180/pi) for c, q in zip(centers, q_coords) ]
lambdas = [ norm(p-q) for q in q_coords ]
r_initial = radii[0]
radii_proportional = [r/r_initial for r in radii]
lambda_proportional = [l/lambdas[0] for l in lambdas]
self.plotline_a.set_xdata(range(1,1+len(thetas)))
self.plotline_b.set_xdata(range(1,1+len(thetas)))
self.plotline_c.set_xdata(range(1,1+len(thetas)))
self.plotline_a.set_ydata(thetas)
self.plotline_b.set_ydata(radii_proportional)
self.plotline_c.set_ydata(lambda_proportional)
self.canvas.draw() | gpl-3.0 |
flo-compbio/gopca | docs/source/conf.py | 1 | 13599 | # -*- coding: utf-8 -*-
#
# GO-PCA documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 11 11:35:24 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from builtins import str as text
#from builtins import str
import pkg_resources
#import sphinx_rtd_theme
import sphinx_bootstrap_theme
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
#MOCK_MODULES = ['cython','numpy','pandas','scipy','genometools','goparser','xlmhg','sklearn','sklearn.decomposition']
MOCK_MODULES = [
'cython',
'scipy', 'scipy.stats', 'scipy.io',
'scipy.spatial', 'scipy.spatial.distance',
'scipy.cluster', 'scipy.cluster.hierarchy',
'sklearn', 'sklearn.decomposition',
'plotly', 'plotly.graph_objs',
#'numpy',
#'pandas',
#'genometools',
#'genometools.basic',
#'genometools.expression',
#'genometools.expression.visualize',
#'genometools.enrichment', 'genometools.ontology',
#'xlmhg'
]
sys.modules.update((text(mod_name), Mock()) for mod_name in MOCK_MODULES)
#print('Path:', sys.path)
#print([os.path.isdir(d) for d in sys.path])
import gopca
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.realpath(os.path.abspath('../../cli')))
#sys.path.insert(0, os.path.realpath(os.path.abspath('../../plotting')))
#import scripts.extract_signature_matrix
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinxarg.ext',
]
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GO-PCA'
copyright = u'2015, 2016 Florian Wagner'
author = u'Florian Wagner'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = text(
'.'.join(
[str(n) for n in pkg_resources.parse_version(gopca.__version__)
._version.release[:2]]))
# The full version, including alpha/beta/rc tags.
release = gopca.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
#html_theme = 'classic'
#html_theme = 'sphinx_rtd_theme'
html_theme = 'bootstrap'
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {
# source:
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "GO-PCA",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "GO-PCA Documentation",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
# ("Examples", "examples"),
# ("Link", "http://example.com", True),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
#'navbar_class': "navbar navbar-inverse",
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "nav",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "simplex",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GO-PCAdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GO-PCA.tex', u'GO-PCA Documentation',
u'Florian Wagner', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'go-pca', u'GO-PCA Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GO-PCA', u'GO-PCA Documentation',
author, 'GO-PCA', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'xlmhg': ('https://xl-mhg.readthedocs.io/en/latest/', None),
'genometools': ('https://genometools.readthedocs.io/en/latest/', None),
}
| gpl-3.0 |
treesnail/tushare | tushare/datayes/trading.py | 14 | 4741 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on 2015年7月4日
@author: JimmyLiu
@QQ:52799046
"""
from tushare.datayes import vars as vs
import pandas as pd
from pandas.compat import StringIO
class Trading():
def __init__(self, client):
self.client = client
def dy_market_tickRT(self, securityID='000001.XSHG,000001.XSHE', field=vs.TICK_RT_DEFAULT_COLS):
"""
获取最新市场信息快照
获取一只或多只证券最新Level1股票信息。
输入一只或多只证券代码,如000001.XSHG (上证指数) 或000001.XSHE(平安银行),
还有所选字段, 得到证券的最新交易快照。
证券可以是股票,指数, 部分债券或 基金。
getTickRTSnapshot
"""
code, result = self.client.getData(vs.TICK_RT%(securityID, field))
return _ret_data(code, result)
def dy_market_tickRtIndex(self, securityID='', field=''):
"""
获取指数成份股的最新市场信息快照
获取一个指数的成份股的最新Level1股票信息。
输入一个指数的证券代码,如000001.XSHG (上证指数) 或000300.XSHG(沪深300),
还有所选字段, 得到指数成份股的最新交易快照。
getTickRTSnapshotIndex
"""
code, result = self.client.getData(vs.TICK_RT_INDEX%(securityID, field))
return _ret_data(code, result)
def dy_market_industry_rt(self, securityID='', field=''):
"""
获取行业(证监会行业标准)资金流向
内容包括小单成交金额、中单成交金额、大单成交金额、超大单成交金额、本次成交单总金额等。
getIndustryTickRTSnapshot
"""
code, result = self.client.getData(vs.INDUSTRY_TICK_RT%(securityID, field))
return _ret_data(code, result)
def dy_market_future_rt(self, instrumentID='', field=''):
"""
获取一只或多只期货的最新市场信息快照
getFutureTickRTSnapshot
"""
code, result = self.client.getData(vs.FUTURE_TICK_RT%(instrumentID, field))
return _ret_data(code, result)
def dy_market_equ_rtrank(self, exchangeCD='', pagesize='',
pagenum='', desc='', field=''):
"""
获取沪深股票涨跌幅排行
getEquRTRank
"""
code, result = self.client.getData(vs.EQU_RT_RANK%(exchangeCD, pagesize,
pagenum, desc, field))
return _ret_data(code, result)
def dy_market_option_rt(self, optionId='', field=''):
"""
获取期权最新市场信息快照
getOptionTickRTSnapshot
"""
code, result = self.client.getData(vs.OPTION_RT%(optionId, field))
return _ret_data(code, result)
def dy_market_sectips(self, tipsTypeCD='H', field=''):
"""
上海证券交易所、深圳证券交易所今日停复牌股票列表。数据更新频率:日。
getSecTips
"""
code, result = self.client.getData(vs.SEC_TIPS%(tipsTypeCD, field))
return _ret_data(code, result)
def dy_market_tickrt_intraday(self, securityID='000001.XSHE', startTime='',
endTime='', field=''):
"""
获取一只股票,指数,债券,基金在当日内时间段Level1信息
对应:getTickRTIntraDay
"""
code, result = self.client.getData(vs.TICK_RT_INTRADAY%(securityID, startTime,
endTime, field))
return _ret_data(code, result)
def dy_market_bar_rt(self, securityID='000001.XSHE', startTime='',
endTime='', unit='1', field=''):
"""
获取一只证券当日的分钟线信息。
输入一只证券代码,如000001.XSHE(平安银行), 得到此证券的当日的分钟线。
证券目前是股票,指数,基金和部分债券。
分钟线的有效数据上午从09:30 到11:30,下午从13:01到15:00
对应:getBarRTIntraDay
"""
code, result = self.client.getData(vs.TICK_RT_INTRADAY%(securityID, startTime,
endTime, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
henrykironde/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
ndingwall/scikit-learn | examples/ensemble/plot_gradient_boosting_early_stopping.py | 17 | 4919 | """
===================================
Early stopping of Gradient Boosting
===================================
Gradient boosting is an ensembling technique where several weak learners
(regression trees) are combined to yield a powerful single model, in an
iterative fashion.
Early stopping support in Gradient Boosting enables us to find the least number
of iterations which is sufficient to build a model that generalizes well to
unseen data.
The concept of early stopping is simple. We specify a ``validation_fraction``
which denotes the fraction of the whole dataset that will be kept aside from
training to assess the validation loss of the model. The gradient boosting
model is trained using the training set and evaluated using the validation set.
When each additional stage of regression tree is added, the validation set is
used to score the model. This is continued until the scores of the model in
the last ``n_iter_no_change`` stages do not improve by atleast `tol`. After
that the model is considered to have converged and further addition of stages
is "stopped early".
The number of stages of the final model is available at the attribute
``n_estimators_``.
This example illustrates how the early stopping can used in the
:class:`~sklearn.ensemble.GradientBoostingClassifier` model to achieve
almost the same accuracy as compared to a model built without early stopping
using many fewer estimators. This can significantly reduce training time,
memory usage and prediction latency.
"""
# Authors: Vighnesh Birodkar <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.model_selection import train_test_split
print(__doc__)
data_list = [datasets.load_iris(), datasets.load_digits()]
data_list = [(d.data, d.target) for d in data_list]
data_list += [datasets.make_hastie_10_2()]
names = ['Iris Data', 'Digits Data', 'Hastie Data']
n_gb = []
score_gb = []
time_gb = []
n_gbes = []
score_gbes = []
time_gbes = []
n_estimators = 500
for X, y in data_list:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=0)
# We specify that if the scores don't improve by atleast 0.01 for the last
# 10 stages, stop fitting additional stages
gbes = ensemble.GradientBoostingClassifier(n_estimators=n_estimators,
validation_fraction=0.2,
n_iter_no_change=5, tol=0.01,
random_state=0)
gb = ensemble.GradientBoostingClassifier(n_estimators=n_estimators,
random_state=0)
start = time.time()
gb.fit(X_train, y_train)
time_gb.append(time.time() - start)
start = time.time()
gbes.fit(X_train, y_train)
time_gbes.append(time.time() - start)
score_gb.append(gb.score(X_test, y_test))
score_gbes.append(gbes.score(X_test, y_test))
n_gb.append(gb.n_estimators_)
n_gbes.append(gbes.n_estimators_)
bar_width = 0.2
n = len(data_list)
index = np.arange(0, n * bar_width, bar_width) * 2.5
index = index[0:n]
# %%
# Compare scores with and without early stopping
# ----------------------------------------------
plt.figure(figsize=(9, 5))
bar1 = plt.bar(index, score_gb, bar_width, label='Without early stopping',
color='crimson')
bar2 = plt.bar(index + bar_width, score_gbes, bar_width,
label='With early stopping', color='coral')
plt.xticks(index + bar_width, names)
plt.yticks(np.arange(0, 1.3, 0.1))
def autolabel(rects, n_estimators):
"""
Attach a text label above each bar displaying n_estimators of each model
"""
for i, rect in enumerate(rects):
plt.text(rect.get_x() + rect.get_width() / 2.,
1.05 * rect.get_height(), 'n_est=%d' % n_estimators[i],
ha='center', va='bottom')
autolabel(bar1, n_gb)
autolabel(bar2, n_gbes)
plt.ylim([0, 1.3])
plt.legend(loc='best')
plt.grid(True)
plt.xlabel('Datasets')
plt.ylabel('Test score')
plt.show()
# %%
# Compare fit times with and without early stopping
# -------------------------------------------------
plt.figure(figsize=(9, 5))
bar1 = plt.bar(index, time_gb, bar_width, label='Without early stopping',
color='crimson')
bar2 = plt.bar(index + bar_width, time_gbes, bar_width,
label='With early stopping', color='coral')
max_y = np.amax(np.maximum(time_gb, time_gbes))
plt.xticks(index + bar_width, names)
plt.yticks(np.linspace(0, 1.3 * max_y, 13))
autolabel(bar1, n_gb)
autolabel(bar2, n_gbes)
plt.ylim([0, 1.3 * max_y])
plt.legend(loc='best')
plt.grid(True)
plt.xlabel('Datasets')
plt.ylabel('Fit Time')
plt.show()
| bsd-3-clause |
dpace1/caterpillar-tube-pricing | utils_xgb.py | 1 | 3698 | from utils import *
from hyperopt import hp
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import xgboost as xgb
from sklearn import cross_validation
def split_formatter(folds, train, labels, missing = 0.0):
"""Returns a list of train-val tuples for a given folds iterator"""
out = []
for ind_train, ind_val in folds:
out += [(xgb.DMatrix(train[ind_train], label = labels[ind_train],
missing = missing),
xgb.DMatrix(train[ind_val], label = labels[ind_val],
missing = missing))]
return out
def hyperopt_xgb(params, train, test, labels, folds, missing = 0.0, refit = True):
# Obtaining CV folds
splits = split_formatter(folds, train, labels, missing = missing)
bst_params = params['bst_params']
other_params = params['other_params']
out_dict = defaultdict(list)
out_dict['model'] = 'xgb'
# Training on each fold and saving results
for tr, val in splits:
evallist = [(tr, 'tr'), (val, 'val')]
evals_result = {}
bst = xgb.train(bst_params,
tr,
num_boost_round = other_params['num_rounds'],
evals = evallist,
early_stopping_rounds = other_params['early_stopping_rounds'],
evals_result = evals_result)
preds = bst.predict(val)
evals_result['tr'] = np.asarray(map(float, evals_result['tr']))
evals_result['val'] = np.asarray(map(float, evals_result['val']))
out_dict['best_iteration'].extend([bst.best_iteration])
out_dict['loss_val'].extend([bst.best_score])
out_dict['oob_predictions'].extend([preds])
out_dict['evals_result'].append(evals_result)
## Adding other objects to the output dictionary
# Sorting predictions by validation id
val_inds = np.hstack([tup[1] for tup in list(folds)])
oob_preds = np.hstack([preds for preds in out_dict['oob_predictions']])
tmp = zip(oob_preds, val_inds)
tmp.sort(key = lambda tup: tup[1])
out_dict['oob_predictions_cat'] = np.asarray([tup[0] for tup in tmp])
out_dict['loss'] = np.mean(out_dict['loss_val'])
out_dict['loss_val_sd'] = np.var(out_dict['loss_val'])
# Training on full training set and saving test predictions
if refit:
xgtrain = xgb.DMatrix(train, label = labels)
xgtest = xgb.DMatrix(test)
# n_rounds_final = out_dict['best_iteration']
bst = xgb.train(bst_params, xgtrain, num_boost_round = \
other_params['num_rounds'])
test_preds = bst.predict(xgtest)
out_dict['test_predictions'] = test_preds
out_dict['f_score'] = bst.get_fscore()
out_dict['status'] = STATUS_OK
# Converting to a normal dict
out_dict = dict(out_dict)
print "Val loss (SD): ", str(out_dict['loss']) + " (" + \
str(out_dict['loss_val_sd']) + ")"
return out_dict
### TO BE REFACTORED...
# def score_xgb(params_new, train, test, labels, folds, missing = 0.0, refit = True):
# params = {}
# bst_params = {}
# bst_params["seed"] = np.random.randint(1000000)
# bst_params["objective"] = params_new["objective"]
# bst_params["eta"] = params_new["eta"]
# bst_params["gamma"] = params_new["gamma"]
# bst_params["nthread"] = params_new["nthread"]
# bst_params["min_child_weight"] = params_new["min_child_weight"]
# bst_params["subsample"] = params_new["subsample"]
# bst_params["colsample_bytree"] = params_new["colsample_bytree"]
# bst_params["silent"] = params_new["silent"]
# bst_params["max_depth"] = params_new["max_depth"]
# params['bst_params'] = bst_params
# other_params = {}
# other_params['num_rounds'] = params_new['num_rounds']
# other_params['early_stopping_rounds'] = params_new['early_stopping_rounds']
# params['other_params'] = other_params
# return hyperopt_xgb(params, train = train, test = test, labels = labels,
# folds = folds, missing = missing, refit = refit) | mit |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/cluster/birch.py | 1 | 22756 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
from math import sqrt
import numpy as np
from scipy import sparse
from ..externals.six.moves import xrange
from .hierarchical import AgglomerativeClustering
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..exceptions import NotFittedError
from ..metrics.pairwise import euclidean_distances
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| mit |
bert9bert/statsmodels | statsmodels/sandbox/distributions/examples/ex_mvelliptical.py | 34 | 5169 | # -*- coding: utf-8 -*-
"""examples for multivariate normal and t distributions
Created on Fri Jun 03 16:00:26 2011
@author: josef
for comparison I used R mvtnorm version 0.9-96
"""
from __future__ import print_function
import numpy as np
import statsmodels.sandbox.distributions.mv_normal as mvd
from numpy.testing import assert_array_almost_equal
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
#************** multivariate normal distribution ***************
mvn3 = mvd.MVNormal(mu, cov3)
#compare with random sample
x = mvn3.rvs(size=1000000)
xli = [[2., 1., 1.5],
[0., 2., 1.5],
[1.5, 1., 2.5],
[0., 1., 1.5]]
xliarr = np.asarray(xli).T[None,:, :]
#from R session
#pmvnorm(lower=-Inf,upper=(x[0,.]-mu)/sqrt(diag(cov3)),mean=rep(0,3),corr3)
r_cdf = [0.3222292, 0.3414643, 0.5450594, 0.3116296]
r_cdf_errors = [1.715116e-05, 1.590284e-05, 5.356471e-05, 3.567548e-05]
n_cdf = [mvn3.cdf(a) for a in xli]
assert_array_almost_equal(r_cdf, n_cdf, decimal=4)
print(n_cdf)
print('')
print((x<np.array(xli[0])).all(-1).mean(0))
print((x[...,None]<xliarr).all(1).mean(0))
print(mvn3.expect_mc(lambda x: (x<xli[0]).all(-1), size=100000))
print(mvn3.expect_mc(lambda x: (x[...,None]<xliarr).all(1), size=100000))
#other methods
mvn3n = mvn3.normalized()
assert_array_almost_equal(mvn3n.cov, mvn3n.corr, decimal=15)
assert_array_almost_equal(mvn3n.mean, np.zeros(3), decimal=15)
xn = mvn3.normalize(x)
xn_cov = np.cov(xn, rowvar=0)
assert_array_almost_equal(mvn3n.cov, xn_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xn.mean(0), decimal=2)
mvn3n2 = mvn3.normalized2()
assert_array_almost_equal(mvn3n.cov, mvn3n2.cov, decimal=2)
#mistake: "normalized2" standardizes - FIXED
#assert_array_almost_equal(np.eye(3), mvn3n2.cov, decimal=2)
xs = mvn3.standardize(x)
xs_cov = np.cov(xn, rowvar=0)
#another mixup xs is normalized
#assert_array_almost_equal(np.eye(3), xs_cov, decimal=2)
assert_array_almost_equal(mvn3.corr, xs_cov, decimal=2)
assert_array_almost_equal(np.zeros(3), xs.mean(0), decimal=2)
mv2m = mvn3.marginal(np.array([0,1]))
print(mv2m.mean)
print(mv2m.cov)
mv2c = mvn3.conditional(np.array([0,1]), [0])
print(mv2c.mean)
print(mv2c.cov)
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print(mv2c.mean)
print(mv2c.cov)
import statsmodels.api as sm
mod = sm.OLS(x[:,0], sm.add_constant(x[:,1:], prepend=True))
res = mod.fit()
print(res.model.predict(np.array([1,0,0])))
mv2c = mvn3.conditional(np.array([0]), [0, 0])
print(mv2c.mean)
mv2c = mvn3.conditional(np.array([0]), [1, 1])
print(res.model.predict(np.array([1,1,1])))
print(mv2c.mean)
#the following wrong input doesn't raise an exception but produces wrong numbers
#mv2c = mvn3.conditional(np.array([0]), [[1, 1],[2,2]])
#************** multivariate t distribution ***************
mvt3 = mvd.MVT(mu, cov3, 4)
xt = mvt3.rvs(size=100000)
assert_array_almost_equal(mvt3.cov, np.cov(xt, rowvar=0), decimal=1)
mvt3s = mvt3.standardized()
mvt3n = mvt3.normalized()
#the following should be equal or correct up to numerical precision of float
assert_array_almost_equal(mvt3.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(mvt3n.corr, mvt3n.sigma, decimal=15)
assert_array_almost_equal(np.eye(3), mvt3s.sigma, decimal=15)
xts = mvt3.standardize(xt)
xts_cov = np.cov(xts, rowvar=0)
xtn = mvt3.normalize(xt)
xtn_cov = np.cov(xtn, rowvar=0)
xtn_corr = np.corrcoef(xtn, rowvar=0)
assert_array_almost_equal(mvt3n.mean, xtn.mean(0), decimal=2)
#the following might fail sometimes (random test), add seed in tests
assert_array_almost_equal(mvt3n.corr, xtn_corr, decimal=1)
#watch out cov is not the same as sigma for t distribution, what's right here?
#normalize by sigma or by cov ? now normalized by sigma
assert_array_almost_equal(mvt3n.cov, xtn_cov, decimal=1)
assert_array_almost_equal(mvt3s.cov, xts_cov, decimal=1)
a = [0.0, 1.0, 1.5]
mvt3_cdf0 = mvt3.cdf(a)
print(mvt3_cdf0)
print((xt<np.array(a)).all(-1).mean(0))
print('R', 0.3026741) # "error": 0.0004832187
print('R', 0.3026855) # error 3.444375e-06 with smaller abseps
print('diff', mvt3_cdf0 - 0.3026855)
a = [0.0, 0.5, 1.0]
mvt3_cdf1 = mvt3.cdf(a)
print(mvt3_cdf1)
print((xt<np.array(a)).all(-1).mean(0))
print('R', 0.1946621) # "error": 0.0002524817)
print('R', 0.1946217) # "error:"2.748699e-06 with smaller abseps)
print('diff', mvt3_cdf1 - 0.1946217)
assert_array_almost_equal(mvt3_cdf0, 0.3026855, decimal=5)
assert_array_almost_equal(mvt3_cdf1, 0.1946217, decimal=5)
import statsmodels.distributions.mixture_rvs as mix
mu2 = np.array([4, 2.0, 2.0])
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
md = mix.mv_mixture_rvs([0.4, 0.6], 5, [mvt3, mvt3n], 3)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
#rvs2 = rvs[:,:2]
import matplotlib.pyplot as plt
fig = plt.figure()
fig.add_subplot(2, 2, 1)
plt.plot(rvs[:,0], rvs[:,1], '.', alpha=0.25)
plt.title('1 versus 0')
fig.add_subplot(2, 2, 2)
plt.plot(rvs[:,0], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 0')
fig.add_subplot(2, 2, 3)
plt.plot(rvs[:,1], rvs[:,2], '.', alpha=0.25)
plt.title('2 versus 1')
#plt.show()
| bsd-3-clause |
c-wilson/klustaviewa | klustaviewa/views/tests/test_waveformview.py | 2 | 1385 | """Unit tests for waveform view."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import numpy as np
import numpy.random as rnd
import pandas as pd
from klustaviewa.views.tests.mock_data import (setup, teardown,
nspikes, nclusters, nsamples, nchannels, fetdim)
from kwiklib.dataio import KlustersLoader
from kwiklib.dataio.selection import select
from kwiklib.dataio.tools import check_dtype, check_shape
from klustaviewa import USERPREF
from klustaviewa.views import WaveformView
from klustaviewa.views.tests.utils import show_view, get_data
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_waveformview():
keys = ('waveforms,clusters,cluster_colors,clusters_selected,masks,'
'geometrical_positions'
).split(',')
data = get_data()
kwargs = {k: data[k] for k in keys}
operators = [
lambda self: self.view.toggle_mask(),
lambda self: (self.close()
if USERPREF['test_auto_close'] != False else None),
]
# Show the view.
show_view(WaveformView, operators=operators, **kwargs)
| bsd-3-clause |
GkAntonius/feynman | examples/Solid_State_Physics/plot_multi.py | 2 | 1868 | """
Electron-phonon self-energy
===========================
Example for multiple independent diagrams.
"""
from feynman import Diagram
import matplotlib.pyplot as plt
def main():
fig, axes = plt.subplots(figsize=(6,3), nrows=1, ncols=2,
subplot_kw=dict(aspect='equal', frameon=True),
sharex=True, sharey=True,
)
G_style = dict(style='single', arrow=True, arrow_param={'width':0.05, 'length': .15})
Ph_style = dict(style='elliptic loopy', ellipse_spread=.55, xamp=0.035, yamp=-0.05, nloops=13)
DW_style = dict(style='circular loopy', circle_radius=.25, xamp=.04, yamp=.05, nloops=15)
V_style = dict()
get_diagram_one(axes[0], G_style, Ph_style, V_style)
get_diagram_two(axes[1], G_style, DW_style, V_style)
plt.tight_layout()
plt.show()
def get_diagram_one(ax, fermion_style, boson_style, vertex_style):
D = Diagram(ax)
w = 0.75
xy0 = [0.5 - w/2, 0.25]
v1 = D.vertex(xy0, **vertex_style)
v2 = D.vertex(v1.xy, dx=w, **vertex_style)
G = D.line(v1, v2, **fermion_style)
B = D.line(v1, v2, **boson_style)
# In case the axes get smaller (you have more diagrams), you might want to change the scale
D.scale(1.0)
D.plot()
return D
def get_diagram_two(ax, fermion_style, boson_style, vertex_style):
D = Diagram(ax)
w = 0.75
xy0 = [0.5 - w/2, 0.25]
v1 = D.vertex(xy0, **vertex_style)
v2 = D.vertex(v1.xy, dx=w/2, **vertex_style)
v3 = D.vertex(v1.xy, dx=w, **vertex_style)
G1 = D.line(v1, v2, **fermion_style)
G2 = D.line(v2, v3, **fermion_style)
B = D.line(v2, v2, **boson_style)
# In case the axes get smaller (you have more diagrams), you might want to change the scale
D.scale(1.0)
D.plot()
return D
if __name__ == '__main__':
main()
| gpl-3.0 |
gustfrontar/LETKF_WRF | scale_breeding/python/plot_bvamp_timeseries.py | 1 | 6686 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 1 18:45:15 2016
@author:
"""
# LECTURA Y GRAFICADO RADAR (Formato binario GVAR-SMN)
import numpy as np
import matplotlib as plt
import datetime as dt
import binary_io as bio
import bred_vector_functions as bvf
import os
basedir='/home/jruiz/share/exp/'
expname = '/breeding_osaka_pawr_1km_bip5_local_1000m_UVT/'
plotbasedir=basedir + expname + '/plots/'
inibv=1 #Initial bred vector to plot.
endbv=1 #Final bred vector to plot.
niter=5 #End iter to plot.
nbv=endbv-inibv+1 #Total number of bred vectors.
undef_out=np.nan #This is the undef value that we will use internally.
undef_in=1.0e20 #This is the undef value in the original data (or at least a big number that is lower than the real undef value).
#Define regions
lati=np.array([34.75,34.6])
late=np.array([35.25,34.9])
loni=np.array([135.5,135.4])
lone=np.array([136.25,135.7])
reg_name='REG_1','REG_2','TOTAL'
plotlevels=np.array([6,13,17]) #Which levels will be plotted.
plotvars='UV','W','T','QV','QHYD' #Which variables will be plotted.
smooth_type='None'
smooth_sigma=np.array([1.5])
#Create the plotbasedir
if not os.path.exists(plotbasedir):
os.mkdir(plotbasedir)
#Defini initial and end times using datetime module.
itime = dt.datetime(2013,7,13,5,10,30) #Initial time.
etime = dt.datetime(2013,7,13,5,40,00) #End time.
#Define the delta.
delta=dt.timedelta(seconds=30)
ctime = itime + delta
ntimes=1 + np.around((etime-itime).seconds / delta.seconds)
nx=180
ny=180
nz=20
data_pp_o=dict()
data_pn_o=dict()
data_pp_r=dict()
data_pn_r=dict()
bv_o=dict()
bv_r=dict()
#Get lat lon.
lat=bio.read_data_direct(basedir + expname + '/latlon/lat_d01z001.grd',nx,ny,1,'>f4')[:,:,0]
lon=bio.read_data_direct(basedir + expname + '/latlon/lon_d01z001.grd',nx,ny,1,'>f4')[:,:,0]
time_mean_growth_rate=np.zeros([nx,ny,nz])
time_sprd_growth_rate=np.zeros([nx,ny,nz])
time_mean_norm=np.zeros([nx,ny,nz])
time_sprd_norm=np.zeros([nx,ny,nz])
#Convert lat lon to the nearest grid point.
#Add the global domain as a region.
lati=np.append(lati,lat[0,0])
late=np.append(late,lat[nx-1,ny-1])
loni=np.append(loni,lon[0,0])
lone=np.append(lone,lon[nx-1,ny-1])
xi , yi = bvf.lat_lon_to_i_j(lon,lat,loni,lati)
xe , ye = bvf.lat_lon_to_i_j(lon,lat,lone,late)
nregs=xi.shape[0]
norm_mean_o=dict()
norm_max_o=dict()
norm_min_o=dict()
norm_mean_r=dict()
norm_max_r=dict()
norm_min_r=dict()
norm_mean_i=dict()
norm_max_i=dict()
norm_min_i=dict()
gr_bv_mean=dict()
gr_bv_min=dict()
gr_bv_max=dict()
#Allocate memory for the dictionaries.
for myvar in plotvars :
norm_mean_o[myvar]=np.zeros([ntimes,nbv,nregs])
norm_max_o[myvar]=np.zeros([ntimes,nbv,nregs])
norm_min_o[myvar]=np.zeros([ntimes,nbv,nregs])
norm_mean_r[myvar]=np.zeros([ntimes,nbv,nregs])
norm_max_r[myvar]=np.zeros([ntimes,nbv,nregs])
norm_min_r[myvar]=np.zeros([ntimes,nbv,nregs])
norm_mean_i[myvar]=np.zeros([ntimes,nbv,nregs])
norm_max_i[myvar]=np.zeros([ntimes,nbv,nregs])
norm_min_i[myvar]=np.zeros([ntimes,nbv,nregs])
gr_bv_mean[myvar]=np.zeros([ntimes,nbv,nregs])
gr_bv_min[myvar]=np.zeros([ntimes,nbv,nregs])
gr_bv_max[myvar]=np.zeros([ntimes,nbv,nregs])
for ibv in range (inibv , endbv + 1):
bvstr="%04d" % ibv
#print( ' Plotting bred vector number ' + bvstr )
while ( ctime <= etime ):
it = ( ctime - itime ).seconds / delta.seconds
iterstr="%04d" % niter
ptime=ctime - delta #Data correspinding to the previous step (to compute bv growth)
print ( 'The date is :', ctime )
print ( 'Reading the positive perturbation original')
mydir=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/' + bvstr + '/' + '/grads_pp_o' + iterstr + '/'
data_pp_o=bio.read_data_scale(mydir,expname,ctime,nx,ny,nz,undef_in=undef_in,undef_out=undef_out)
#print(mydir)
print ( 'Reading the negative perturbation original')
mydir=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/' + bvstr + '/' + '/grads_pn_o' + iterstr + '/'
data_pn_o=bio.read_data_scale(mydir,expname,ctime,nx,ny,nz,undef_in=undef_in,undef_out=undef_out)
#print(mydir)
print ( 'Reading the positive perturbation rescaled')
mydir=basedir + expname + ptime.strftime("%Y%m%d%H%M%S") + '/' + bvstr + '/' + '/grads_pp_r' + iterstr + '/'
data_pp_i=bio.read_data_scale(mydir,expname,ptime,nx,ny,nz,undef_in=undef_in,undef_out=undef_out)
#print(mydir)
print ( 'Reading the negative perturbation rescaled')
mydir=basedir + expname + ptime.strftime("%Y%m%d%H%M%S") + '/' + bvstr + '/' + '/grads_pn_r' + iterstr + '/'
data_pn_i=bio.read_data_scale(mydir,expname,ptime,nx,ny,nz,undef_in=undef_in,undef_out=undef_out)
#print(mydir)
iterstr="%04d" % 1 #Leo el rescaling de la perturbacion 1 en el tiempo t que es el rescaling de la perturbacion de la ultima iteracion del tiempo t.
print ( 'Reading the positive perturbation rescaled')
mydir=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/' + bvstr + '/' + '/grads_pp_r' + iterstr + '/'
data_pp_r=bio.read_data_scale(mydir,expname,ctime,nx,ny,nz,undef_in=undef_in,undef_out=undef_out)
#print(mydir)
print ( 'Reading the negative perturbation rescaled')
mydir=basedir + expname + ctime.strftime("%Y%m%d%H%M%S") + '/' + bvstr + '/' + '/grads_pn_r' + iterstr + '/'
data_pn_r=bio.read_data_scale(mydir,expname,ctime,nx,ny,nz,undef_in=undef_in,undef_out=undef_out)
#print(mydir)
#bv_o=bvf.data_diff( data_pp_o , data_pn_o )
#bv_r=bvf.data_diff( data_pp_o , data_pn_o )
for my_var in plotvars :
norm_mean_o[my_var][it,ibv-1,:],norm_max_o[my_var][it,ibv-1,:],norm_min_o[my_var][it,ibv-1,:],norm_o=bvf.norm_bv( data_pp_o , data_pn_o , norm_type=my_var , smooth=smooth_type , sigma=smooth_sigma , xi=xi , yi=yi , xe=xe , ye=ye )
norm_mean_r[my_var][it,ibv-1,:],norm_max_r[my_var][it,ibv-1,:],norm_min_r[my_var][it,ibv-1,:],norm_r=bvf.norm_bv( data_pp_r , data_pn_r , norm_type=my_var , smooth=smooth_type , sigma=smooth_sigma , xi=xi , yi=yi , xe=xe , ye=ye )
norm_mean_i[my_var][it-1,ibv-1,:],norm_max_i[my_var][it-1,ibv-1,:],norm_min_i[my_var][it-1,ibv-1,:],norm_r=bvf.norm_bv( data_pp_i , data_pn_i , norm_type=my_var , smooth=smooth_type , sigma=smooth_sigma , xi=xi , yi=yi , xe=xe , ye=ye )
ctime = ctime + delta
ntimes = ntimes + 1
print ( "Finish time loop" )
mybv=0
mydir=plotbasedir + '/time_independent_plots/' + '/' + bvstr + '/'
#Plot norm time series.
bvf.plot_norm_timeseries(norm_mean_o,norm_mean_i,norm_mean_r,plotvars,reg_name,mydir,mybv,'norm_mean',figsize='None')
| gpl-3.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/examples/user_interfaces/embedding_in_qt4_wtoolbar.py | 6 | 2033 | from __future__ import print_function
import sys
import numpy as np
from matplotlib.figure import Figure
from matplotlib.backend_bases import key_press_handler
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QTAgg as NavigationToolbar)
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class AppForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
#self.x, self.y = self.get_data()
self.data = self.get_data2()
self.create_main_frame()
self.on_draw()
def create_main_frame(self):
self.main_frame = QWidget()
self.fig = Figure((5.0, 4.0), dpi=100)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
self.canvas.setFocusPolicy(Qt.StrongFocus)
self.canvas.setFocus()
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
self.canvas.mpl_connect('key_press_event', self.on_key_press)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas) # the matplotlib canvas
vbox.addWidget(self.mpl_toolbar)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def get_data2(self):
return np.arange(20).reshape([4, 5]).copy()
def on_draw(self):
self.fig.clear()
self.axes = self.fig.add_subplot(111)
#self.axes.plot(self.x, self.y, 'ro')
self.axes.imshow(self.data, interpolation='nearest')
#self.axes.plot([1,2,3])
self.canvas.draw()
def on_key_press(self, event):
print('you pressed', event.key)
# implement the default mpl key press events described at
# http://matplotlib.org/users/navigation_toolbar.html#navigation-keyboard-shortcuts
key_press_handler(event, self.canvas, self.mpl_toolbar)
def main():
app = QApplication(sys.argv)
form = AppForm()
form.show()
app.exec_()
if __name__ == "__main__":
main()
| mit |
CartoDB/cartoframes | cartoframes/data/observatory/catalog/catalog.py | 1 | 11865 | from .entity import is_slug_value
from .country import Country
from .category import Category
from .provider import Provider
from .dataset import Dataset
from .geography import Geography
from .subscriptions import Subscriptions
from .repository.constants import (COUNTRY_FILTER, CATEGORY_FILTER, GEOGRAPHY_FILTER, GLOBAL_COUNTRY_FILTER,
PROVIDER_FILTER, PUBLIC_FILTER)
from ....utils.logger import log
from ....utils.utils import get_credentials
class Catalog:
"""This class represents the Data Observatory metadata
:py:class:`Catalog <cartoframes.data.observatory.Catalog>`.
The catalog contains metadata that helps to discover and understand the
data available in the Data Observatory for :py:attr:`Dataset.download` and :obj:`Enrichment` purposes.
You can get more information about the Data Observatory catalog from the
`CARTO website <https://carto.com/platform/location-data-streams/>`__ and in your CARTO user account dashboard.
The Catalog has three main purposes:
- Explore and discover the datasets available in the repository (both public and premium datasets).
- Subscribe to some premium datasets and manage your datasets licenses.
- Download data and use your licensed datasets and variables to enrich your own data by means of the
:obj:`Enrichment` functions.
The Catalog is public and can be explored without a CARTO account. Once you discover a
:obj:`Dataset` of interest and want to acquire a license to use it, you'll need a CARTO account to
subscribe to it, by means of the :py:attr:`Dataset.subscribe` or :py:attr:`Geography.subscribe` functions.
The Catalog is composed of three main entities:
- :obj:`Dataset`: It is the main :obj:`CatalogEntity`. It contains metadata of the actual data
you can use to :py:attr:`Dataset.download` or for :obj:`Enrichment` purposes.
- :obj:`Geography`: Datasets in the Data Observatory are aggregated by different geographic boundaries.
The `Geography` entity contains metadata to understand the boundaries of a :obj:`Dataset`. It's used for
enrichment and you can also :py:attr:`Geography.download` the underlying data.
- :obj:`Variable`: Variables contain metadata about the columns available in each dataset for enrichment.
Let's say you explore a `dataset` with demographic data for the whole US at the Census tract level.
The variables give you information about the actual columns you have available, such as: total_population,
total_males, etc.
On the other hand, you can use lists of `Variable` instances, :py:attr:`Variable.id`, or
:py:attr:`Variable.slug` to enrich your own data.
Every `Dataset` is related to a `Geography`. You can have for example, demographics data at the Census
tract, block groups or blocks levels.
When subscribing to a premium dataset, you should subscribe to both the :py:attr:`Dataset.subscribe` and the
:py:attr:`Geography.subscribe` to be able to access both tables to enrich your own data.
The two main entities of the Catalog (`Dataset` and `Geography`) are related to other entities, that
are useful for a hierarchical categorization and discovery of available data in the Data Observatory:
- :obj:`Category`: Groups datasets of the same topic, for example, `demographics`, `financial`, etc.
- :obj:`Country`: Groups datasets available by country
- :obj:`Provider`: Gives you information about the provider of the source data
You can just list all the grouping entities. Take into account this is not the preferred way
to discover the catalog metadata, since there can be thousands of entities on it:
>>> Category.get_all()
[<Category.get('demographics')>, ...]
>>> Country.get_all()
[<Country.get('usa')>, ...]
>>> Provider.get_all()
[<Provider.get('mrli')>, ...]
Or you can get them by ID:
>>> Category.get('demographics')
<Category.get('demographics')>
>>> Country.get('usa')
<Country.get('usa')>
>>> Provider.get('mrli')
<Provider.get('mrli')>
Examples:
The preferred way of discover the available datasets in the Catalog is through nested filters
>>> catalog = Catalog()
>>> catalog.country('usa').category('demographics').datasets
[<Dataset.get('acs_sociodemogr_b758e778')>, ...]
You can include the geography as part of the nested filter like this:
>>> catalog = Catalog()
>>> catalog.country('usa').category('demographics').geography('ags_blockgroup_1c63771c').datasets
If a filter is already applied to a Catalog instance and you want to do a new hierarchical search,
clear the previous filters with the `Catalog().clear_filters()` method:
>>> catalog = Catalog()
>>> catalog.country('usa').category('demographics').geography('ags_blockgroup_1c63771c').datasets
>>> catalog.clear_filters()
>>> catalog.country('esp').category('demographics').datasets
Otherwise the filters accumulate and you'll get unexpected results.
During the discovery process, it's useful to understand the related metadata to a given Geography or Dataset.
A useful way of reading or filtering by metadata values consists on converting the entities to a pandas
DataFrame:
>>> catalog = Catalog()
>>> catalog.country('usa').category('demographics').geography('ags_blockgroup_1c63771c').datasets.to_dataframe()
For each dataset in the Catalog, you can explore its variables, get a summary of its stats, etc.
>>> dataset = Dataset.get('od_acs_13345497')
>>> dataset.variables
[<Variable.get('dwellings_2_uni_fb8f6cfb')> #'Two-family (two unit) dwellings', ...]
See the Catalog guides and examples in our
`public documentation website <https://carto.com/developers/cartoframes/guides/Introduction/>`__
for more information.
"""
def __init__(self):
self.filters = {}
@property
def countries(self):
"""Get all the countries with datasets available in the Catalog.
Returns:
:py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>`
Raises:
CatalogError: if there's a problem when connecting to the catalog or no countries are found.
"""
return Country.get_all(self.filters)
@property
def categories(self):
"""Get all the categories in the Catalog.
Returns:
:py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>`
Raises:
CatalogError: if there's a problem when connecting to the catalog or no categories are found.
"""
self._global_message()
return Category.get_all(self.filters)
@property
def providers(self):
"""Get all the providers in the Catalog.
Returns:
:py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>`
Raises:
CatalogError: if there's a problem when connecting to the catalog or no providers are found.
"""
self._global_message()
return Provider.get_all(self.filters)
@property
def datasets(self):
"""Get all the datasets in the Catalog.
Returns:
:py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>`
Raises:
CatalogError: if there's a problem when connecting to the catalog or no datasets are found.
"""
self._global_message()
return Dataset.get_all(self.filters)
@property
def geographies(self):
"""Get all the geographies in the Catalog.
Returns:
:py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>`
Raises:
CatalogError: if there's a problem when connecting to the catalog or no geographies are found.
"""
self._global_message()
return Geography.get_all(self.filters)
def country(self, country_id):
"""Add a country filter to the current Catalog instance.
Args:
country_id (str):
ID of the country to be used for filtering the Catalog.
Returns:
:py:class:`Catalog <cartoframes.data.observatory.Catalog>`
"""
self.filters[COUNTRY_FILTER] = country_id
return self
def category(self, category_id):
"""Add a category filter to the current Catalog instance.
Args:
category_id (str):
ID of the category to be used for filtering the Catalog.
Returns:
:py:class:`Catalog <cartoframes.data.observatory.Catalog>`
"""
self.filters[CATEGORY_FILTER] = category_id
return self
def geography(self, geography_id):
"""Add a geography filter to the current Catalog instance.
Args:
geography_id (str):
ID or slug of the geography to be used for filtering the Catalog
Returns:
:py:class:`Catalog <cartoframes.data.observatory.Catalog>`
"""
filter_value = geography_id
if is_slug_value(geography_id):
geography = Geography.get(geography_id)
filter_value = geography.id
self.filters[GEOGRAPHY_FILTER] = filter_value
return self
def provider(self, provider_id):
"""Add a provider filter to the current Catalog instance
Args:
provider_id (str):
ID of the provider to be used for filtering the Catalog.
Returns:
:py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>`
"""
self.filters[PROVIDER_FILTER] = provider_id
return self
def public(self, is_public=True):
"""Add a public filter to the current Catalog instance
Args:
is_public (str, optional):
Flag to filter public (True) or private (False) datasets. Default is True.
Returns:
:py:class:`CatalogList <cartoframes.data.observatory.entity.CatalogList>`
"""
self.filters[PUBLIC_FILTER] = 'true' if is_public else 'false'
return self
def clear_filters(self):
"""Remove the current filters from this Catalog instance."""
self.filters = {}
def subscriptions(self, credentials=None):
"""Get all the subscriptions in the Catalog. You'll get all the `Dataset` or `Geography` instances you have
previously subscribed to.
Args:
credentials (:py:class:`Credentials <cartoframes.auth.Credentials>`, optional):
credentials of CARTO user account. If not provided,
a default credentials (if set with :py:meth:`set_default_credentials
<cartoframes.auth.set_default_credentials>`) will be used.
Returns:
:py:class:`Subscriptions <cartoframes.data.observatory.Subscriptions>`
Raises:
CatalogError: if there's a problem when connecting to the catalog or no datasets are found.
"""
_credentials = get_credentials(credentials)
return Subscriptions(_credentials)
def datasets_filter(self, filter_dataset):
"""Get all the datasets in the Catalog filtered
Returns:
:py:class:`Dataset <cartoframes.data.observatory.Dataset>`
"""
return Dataset.get_datasets_spatial_filtered(filter_dataset)
def _global_message(self):
if self.filters and self.filters.get(COUNTRY_FILTER) != GLOBAL_COUNTRY_FILTER:
log.info('You can find more entities with the Global country filter. To apply that filter run:'
"\n\tCatalog().country('glo')")
| bsd-3-clause |
huongttlan/statsmodels | statsmodels/base/tests/test_data.py | 17 | 35047 | import numpy as np
from numpy.testing import assert_equal, assert_, assert_raises
import pandas
import pandas.util.testing as ptesting
from statsmodels.base import data as sm_data
from statsmodels.formula import handle_formula_data
#class TestDates(object):
# @classmethod
# def setupClass(cls):
# nrows = 10
# cls.dates_result = cls.dates_results = np.random.random(nrows)
#
# def test_dates(self):
# np.testing.assert_equal(data.wrap_output(self.dates_input, 'dates'),
# self.dates_result)
class TestArrays(object):
@classmethod
def setupClass(cls):
cls.endog = np.random.random(10)
cls.exog = np.c_[np.ones(10), np.random.random((10,2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y'
cls.row_labels = None
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
def test_names(self):
data = self.data
np.testing.assert_equal(data.xnames, self.xnames)
np.testing.assert_equal(data.ynames, self.ynames)
def test_labels(self):
#HACK: because numpy master after NA stuff assert_equal fails on
# pandas indices
np.testing.assert_(np.all(self.data.row_labels == self.row_labels))
class TestArrays2dEndog(TestArrays):
@classmethod
def setupClass(cls):
super(TestArrays2dEndog, cls).setupClass()
cls.endog = np.random.random((10,1))
cls.exog = np.c_[np.ones(10), np.random.random((10,2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
#cls.endog = endog.squeeze()
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
class TestArrays1dExog(TestArrays):
@classmethod
def setupClass(cls):
super(TestArrays1dExog, cls).setupClass()
cls.endog = np.random.random(10)
exog = np.random.random(10)
cls.data = sm_data.handle_data(cls.endog, exog)
cls.exog = exog[:,None]
cls.xnames = ['x1']
cls.ynames = 'y'
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog.squeeze())
class TestDataFrames(TestArrays):
@classmethod
def setupClass(cls):
cls.endog = pandas.DataFrame(np.random.random(10), columns=['y_1'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
ptesting.assert_frame_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
ptesting.assert_series_equal(data.wrap_output(self.col_input,
'columns'),
self.col_result)
ptesting.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
ptesting.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
class TestLists(TestArrays):
@classmethod
def setupClass(cls):
super(TestLists, cls).setupClass()
cls.endog = np.random.random(10).tolist()
cls.exog = np.c_[np.ones(10), np.random.random((10,2))].tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
class TestRecarrays(TestArrays):
@classmethod
def setupClass(cls):
super(TestRecarrays, cls).setupClass()
cls.endog = np.random.random(9).view([('y_1',
'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'),('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.view(float))
np.testing.assert_equal(self.data.exog, self.exog.view((float,3)))
class TestStructarrays(TestArrays):
@classmethod
def setupClass(cls):
super(TestStructarrays, cls).setupClass()
cls.endog = np.random.random(9).view([('y_1',
'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'),('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.view(float))
np.testing.assert_equal(self.data.exog, self.exog.view((float,3)))
class TestListDataFrame(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = np.random.random(10).tolist()
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameList(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.DataFrame(np.random.random(10), columns=['y_1'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x1','x2'])
exog.insert(0, 'const', 1)
cls.exog = exog.values.tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
ptesting.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestArrayDataFrame(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = np.random.random(10)
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameArray(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.DataFrame(np.random.random(10), columns=['y_1'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x1','x2']) # names mimic defaults
exog.insert(0, 'const', 1)
cls.exog = exog.values
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
ptesting.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestSeriesDataFrame(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.Series(np.random.random(10), name='y_1')
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
ptesting.assert_series_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
class TestSeriesSeries(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.Series(np.random.random(10), name='y_1')
exog = pandas.Series(np.random.random(10), name='x_1')
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 1
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index = [exog.name])
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index = exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = [exog.name],
columns = [exog.name])
cls.xnames = ['x_1']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
ptesting.assert_series_equal(self.data.orig_endog, self.endog)
ptesting.assert_series_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values[:,None])
def test_alignment():
#Fix Issue #206
from statsmodels.regression.linear_model import OLS
from statsmodels.datasets.macrodata import load_pandas
d = load_pandas().data
#growth rates
gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
lint = d['realint'][:-1] # incorrect indexing for test purposes
endog = gs_l_realinv
# re-index because they won't conform to lint
realgdp = gs_l_realgdp.reindex(lint.index, method='bfill')
data = dict(const=np.ones_like(lint), lrealgdp=realgdp, lint=lint)
exog = pandas.DataFrame(data)
# which index do we get??
np.testing.assert_raises(ValueError, OLS, *(endog, exog))
class TestMultipleEqsArrays(TestArrays):
@classmethod
def setupClass(cls):
cls.endog = np.random.random((10,4))
cls.exog = np.c_[np.ones(10), np.random.random((10,2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.cov_eq_result = cls.cov_eq_input = np.random.random((neqs,neqs))
cls.col_eq_result = cls.col_eq_input = np.array((neqs, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = ['y1', 'y2', 'y3', 'y4']
cls.row_labels = None
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
np.testing.assert_equal(data.wrap_output(self.cov_eq_input, 'cov_eq'),
self.cov_eq_result)
np.testing.assert_equal(data.wrap_output(self.col_eq_input,
'columns_eq'),
self.col_eq_result)
class TestMultipleEqsDataFrames(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = endog = pandas.DataFrame(np.random.random((10,4)),
columns=['y_1', 'y_2', 'y_3', 'y_4'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.cov_eq_input = np.random.random((neqs, neqs))
cls.cov_eq_result = pandas.DataFrame(cls.cov_eq_input,
index=endog.columns,
columns=endog.columns)
cls.col_eq_input = np.random.random((nvars, neqs))
cls.col_eq_result = pandas.DataFrame(cls.col_eq_input,
index=exog.columns,
columns=endog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = ['y_1', 'y_2', 'y_3', 'y_4']
cls.row_labels = cls.exog.index
def test_attach(self):
data = self.data
ptesting.assert_series_equal(data.wrap_output(self.col_input,
'columns'),
self.col_result)
ptesting.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
ptesting.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
ptesting.assert_frame_equal(data.wrap_output(self.cov_eq_input,
'cov_eq'),
self.cov_eq_result)
ptesting.assert_frame_equal(data.wrap_output(self.col_eq_input,
'columns_eq'),
self.col_eq_result)
class TestMissingArray(object):
@classmethod
def setupClass(cls):
X = np.random.random((25,4))
y = np.random.random(25)
y[10] = np.nan
X[2,3] = np.nan
X[14,2] = np.nan
cls.y, cls.X = y, X
def test_raise_no_missing(self):
# smoke test for #1700
sm_data.handle_data(np.random.random(20), np.random.random((20, 2)),
'raise')
def test_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, self.X, 'raise'))
def test_drop(self):
y = self.y
X = self.X
combined = np.c_[y, X]
idx = ~np.isnan(combined).any(axis=1)
y = y[idx]
X = X[idx]
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_array_equal(data.endog, y)
np.testing.assert_array_equal(data.exog, X)
def test_none(self):
data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
np.testing.assert_array_equal(data.endog, self.y)
np.testing.assert_array_equal(data.exog, self.X)
def test_endog_only_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, None, 'raise'))
def test_endog_only_drop(self):
y = self.y
y = y[~np.isnan(y)]
data = sm_data.handle_data(self.y, None, 'drop')
np.testing.assert_array_equal(data.endog, y)
def test_mv_endog(self):
y = self.X
y = y[~np.isnan(y).any(axis=1)]
data = sm_data.handle_data(self.X, None, 'drop')
np.testing.assert_array_equal(data.endog, y)
def test_extra_kwargs_2d(self):
sigma = np.random.random((25, 25))
sigma = sigma + sigma.T - np.diag(np.diag(sigma))
data = sm_data.handle_data(self.y, self.X, 'drop', sigma=sigma)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
sigma = sigma[idx][:,idx]
np.testing.assert_array_equal(data.sigma, sigma)
def test_extra_kwargs_1d(self):
weights = np.random.random(25)
data = sm_data.handle_data(self.y, self.X, 'drop', weights=weights)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
weights = weights[idx]
np.testing.assert_array_equal(data.weights, weights)
class TestMissingPandas(object):
@classmethod
def setupClass(cls):
X = np.random.random((25,4))
y = np.random.random(25)
y[10] = np.nan
X[2,3] = np.nan
X[14,2] = np.nan
cls.y, cls.X = pandas.Series(y), pandas.DataFrame(X)
def test_raise_no_missing(self):
# smoke test for #1700
sm_data.handle_data(pandas.Series(np.random.random(20)),
pandas.DataFrame(np.random.random((20, 2))),
'raise')
def test_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, self.X, 'raise'))
def test_drop(self):
y = self.y
X = self.X
combined = np.c_[y, X]
idx = ~np.isnan(combined).any(axis=1)
y = y.ix[idx]
X = X.ix[idx]
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
ptesting.assert_series_equal(data.orig_endog, self.y.ix[idx])
np.testing.assert_array_equal(data.exog, X.values)
ptesting.assert_frame_equal(data.orig_exog, self.X.ix[idx])
def test_none(self):
data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
np.testing.assert_array_equal(data.endog, self.y.values)
np.testing.assert_array_equal(data.exog, self.X.values)
def test_endog_only_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, None, 'raise'))
def test_endog_only_drop(self):
y = self.y
y = y.dropna()
data = sm_data.handle_data(self.y, None, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
def test_mv_endog(self):
y = self.X
y = y.ix[~np.isnan(y.values).any(axis=1)]
data = sm_data.handle_data(self.X, None, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
def test_labels(self):
2, 10, 14
labels = pandas.Index([0, 1, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24])
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_(data.row_labels.equals(labels))
class TestConstant(object):
@classmethod
def setupClass(cls):
from statsmodels.datasets.longley import load_pandas
cls.data = load_pandas()
def test_array_constant(self):
exog = self.data.exog.copy()
exog['const'] = 1
data = sm_data.handle_data(self.data.endog.values, exog.values)
np.testing.assert_equal(data.k_constant, 1)
np.testing.assert_equal(data.const_idx, 6)
def test_pandas_constant(self):
exog = self.data.exog.copy()
exog['const'] = 1
data = sm_data.handle_data(self.data.endog, exog)
np.testing.assert_equal(data.k_constant, 1)
np.testing.assert_equal(data.const_idx, 6)
def test_pandas_noconstant(self):
exog = self.data.exog.copy()
data = sm_data.handle_data(self.data.endog, exog)
np.testing.assert_equal(data.k_constant, 0)
np.testing.assert_equal(data.const_idx, None)
def test_array_noconstant(self):
exog = self.data.exog.copy()
data = sm_data.handle_data(self.data.endog.values, exog.values)
np.testing.assert_equal(data.k_constant, 0)
np.testing.assert_equal(data.const_idx, None)
class TestHandleMissing(object):
def test_pandas(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]]
ptesting.assert_frame_equal(data['exog'], X_exp)
ptesting.assert_series_equal(data['endog'], y_exp)
def test_arrays(self):
arr = np.random.randn(20, 4)
arr[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = arr[:,0], arr[:,1:]
data, _ = sm_data.handle_missing(y, X, missing='drop')
bools_mask = np.ones(20, dtype=bool)
bools_mask[[2, 5, 10]] = False
y_exp = arr[bools_mask, 0]
X_exp = arr[bools_mask, 1:]
np.testing.assert_array_equal(data['endog'], y_exp)
np.testing.assert_array_equal(data['exog'], X_exp)
def test_pandas_array(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]].values
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]].values
np.testing.assert_array_equal(data['exog'], X_exp)
ptesting.assert_series_equal(data['endog'], y_exp)
def test_array_pandas(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]].values, df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]].values, df[df.columns[1:]]
ptesting.assert_frame_equal(data['exog'], X_exp)
np.testing.assert_array_equal(data['endog'], y_exp)
def test_noop(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='none')
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]]
ptesting.assert_frame_equal(data['exog'], X_exp)
ptesting.assert_series_equal(data['endog'], y_exp)
class CheckHasConstant(object):
def test_hasconst(self):
for x, result in zip(self.exogs, self.results):
mod = self.mod(self.y, x)
assert_equal(mod.k_constant, result[0]) #['k_constant'])
assert_equal(mod.data.k_constant, result[0])
if result[1] is None:
assert_(mod.data.const_idx is None)
else:
assert_equal(mod.data.const_idx, result[1])
# extra check after fit, some models raise on singular
fit_kwds = getattr(self, 'fit_kwds', {})
try:
res = mod.fit(**fit_kwds)
assert_equal(res.model.k_constant, result[0])
assert_equal(res.model.data.k_constant, result[0])
except:
pass
@classmethod
def setup_class(cls):
# create data
np.random.seed(0)
cls.y_c = np.random.randn(20)
cls.y_bin = (cls.y_c > 0).astype(int)
x1 = np.column_stack((np.ones(20), np.zeros(20)))
result1 = (1, 0)
x2 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5)).astype(float)
result2 = (1, None)
x3 = np.column_stack((np.arange(20), np.zeros(20)))
result3 = (0, None)
x4 = np.column_stack((np.arange(20), np.zeros((20, 2))))
result4 = (0, None)
x5 = np.column_stack((np.zeros(20), 0.5 * np.ones(20)))
result5 = (1, 1)
x5b = np.column_stack((np.arange(20), np.ones((20, 3))))
result5b = (1, 1)
x5c = np.column_stack((np.arange(20), np.ones((20, 3)) * [0.5, 1, 1]))
result5c = (1, 2)
# implicit and zero column
x6 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5,
np.zeros(20))).astype(float)
result6 = (1, None)
x7 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5,
np.zeros((20, 2)))).astype(float)
result7 = (1, None)
cls.exogs = (x1, x2, x3, x4, x5, x5b, x5c, x6, x7)
cls.results = (result1, result2, result3, result4, result5, result5b,
result5c, result6, result7)
class TestHasConstantOLS(CheckHasConstant):
def __init__(self):
self.setup_class() # why does nose do it properly
from statsmodels.regression.linear_model import OLS
self.mod = OLS
self.y = self.y_c
class TestHasConstantGLM(CheckHasConstant):
def __init__(self):
self.setup_class() # why does nose do it properly
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
self.mod = lambda y, x : GLM(y, x, family=families.Binomial())
self.y = self.y_bin
class TestHasConstantLogit(CheckHasConstant):
def __init__(self):
self.setup_class() # why does nose do it properly
from statsmodels.discrete.discrete_model import Logit
self.mod = Logit
self.y = self.y_bin
self.fit_kwds = {'disp': False}
def test_dtype_object():
# see #880
X = np.random.random((40,2))
df = pandas.DataFrame(X)
df[2] = np.random.randint(2, size=40).astype('object')
df['constant'] = 1
y = pandas.Series(np.random.randint(2, size=40))
np.testing.assert_raises(ValueError, sm_data.handle_data, y, df)
def test_formula_missing_extra_arrays():
np.random.seed(1)
# because patsy can't turn off missing data-handling as of 0.3.0, we need
# separate tests to make sure that missing values are handled correctly
# when going through formulas
# there is a handle_formula_data step
# then there is the regular handle_data step
# see 2083
# the untested cases are endog/exog have missing. extra has missing.
# endog/exog are fine. extra has missing.
# endog/exog do or do not have missing and extra has wrong dimension
y = np.random.randn(10)
y_missing = y.copy()
y_missing[[2, 5]] = np.nan
X = np.random.randn(10)
X_missing = X.copy()
X_missing[[1, 3]] = np.nan
weights = np.random.uniform(size=10)
weights_missing = weights.copy()
weights_missing[[6]] = np.nan
weights_wrong_size = np.random.randn(12)
data = {'y': y,
'X': X,
'y_missing': y_missing,
'X_missing': X_missing,
'weights': weights,
'weights_missing': weights_missing}
data = pandas.DataFrame.from_dict(data)
data['constant'] = 1
formula = 'y_missing ~ X_missing'
((endog, exog),
missing_idx, design_info) = handle_formula_data(data, None, formula,
depth=2,
missing='drop')
kwargs = {'missing_idx': missing_idx, 'missing': 'drop',
'weights': data['weights_missing']}
model_data = sm_data.handle_data(endog, exog, **kwargs)
data_nona = data.dropna()
assert_equal(data_nona['y'].values, model_data.endog)
assert_equal(data_nona[['constant', 'X']].values, model_data.exog)
assert_equal(data_nona['weights'].values, model_data.weights)
tmp = handle_formula_data(data, None, formula, depth=2, missing='drop')
(endog, exog), missing_idx, design_info = tmp
weights_2d = np.random.randn(10, 10)
weights_2d[[8, 7], [7, 8]] = np.nan #symmetric missing values
kwargs.update({'weights': weights_2d,
'missing_idx': missing_idx})
model_data2 = sm_data.handle_data(endog, exog, **kwargs)
good_idx = [0, 4, 6, 9]
assert_equal(data.ix[good_idx, 'y'], model_data2.endog)
assert_equal(data.ix[good_idx, ['constant', 'X']], model_data2.exog)
assert_equal(weights_2d[good_idx][:, good_idx], model_data2.weights)
tmp = handle_formula_data(data, None, formula, depth=2, missing='drop')
(endog, exog), missing_idx, design_info = tmp
kwargs.update({'weights': weights_wrong_size,
'missing_idx': missing_idx})
assert_raises(ValueError, sm_data.handle_data, endog, exog, **kwargs)
if __name__ == "__main__":
import nose
#nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# exit=False)
nose.runmodule(argv=[__file__, '-vvs', '-x'], exit=False)
| bsd-3-clause |
mahmoudnabil/labr | python/labr.py | 1 | 21599 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 10 16:27:03 2013
@author: Mohamed Aly <[email protected]>
"""
import codecs
import numpy as np
import pandas as pd
import re
class LABR:
def __init__(self):
self.REVIEWS_PATH = "../data/labr_data/"
self.RAW_REVIEWS_FILE = "raw_reviews.tsv"
self.DELETED_REVIEWS_FILE = "deleted_reviews.tsv"
self.CLEAN_REVIEWS_FILE = "reviews.tsv"
self.CLEAN_NO_STOP_REVIEWS_FILE = "nostopwords_reviews"
self.CLEAN_NO_STOP_STEMMED_REVIEWS_FILE = "nostopwords_stemmed_reviews"
self.NORMALIZED_REVIEWS_FILE = "norm_reviews.tsv"
# Copied from the PyArabic package.
def arabicrange(self):
"""return a list of arabic characteres .
Return a list of characteres between \u060c to \u0652
@return: list of arabic characteres.
@rtype: unicode;
"""
mylist = [];
for i in range(0x0600, 0x00653):
try :
mylist.append(unichr(i));
except ValueError:
pass;
return mylist;
# cleans a single review
def clean_raw_review(self, body):
# patterns to remove first
pat = [\
(u'http[s]?://[a-zA-Z0-9_\-./~\?=%&]+', u''), # remove links
(u'www[a-zA-Z0-9_\-?=%&/.~]+', u''),
# u'\n+': u' ', # remove newlines
(u'<br />', u' '), # remove html line breaks
(u'</?[^>]+>', u' '), # remove html markup
# u'http': u'',
(u'[a-zA-Z]+\.org', u''),
(u'[a-zA-Z]+\.com', u''),
(u'://', u''),
(u'&[^;]+;', u' '),
(u':D', u':)'),
# (u'[0-9/]+', u''),
# u'[a-zA-Z.]+': u'',
# u'[^0-9' + u''.join(self.arabicrange()) + \
# u"!.,;:$%&*%'#(){}~`\[\]/\\\\\"" + \
# u'\s^><\-_\u201D\u00AB=\u2026]+': u'', # remove latin characters
(u'\s+', u' '), # remove spaces
(u'\.+', u'.'), # multiple dots
(u'[\u201C\u201D]', u'"'), # “
(u'[\u2665\u2764]', u''), # heart symbol
(u'[\u00BB\u00AB]', u'"'),
(u'\u2013', u'-'), # dash
]
# patterns that disqualify a review
remove_if_there = [\
(u'[^0-9' + u''.join(self.arabicrange()) + \
u"!.,;:$%&*%'#(){}~`\[\]/\\\\\"" + \
u'\s\^><\-_\u201D\u00AB=\u2026+|' + \
u'\u0660-\u066D\u201C\u201D' + \
u'\ufefb\ufef7\ufef5\ufef9]+', u''), # non arabic characters
]
# patterns that disqualify if empty after removing
remove_if_empty_after = [\
(u'[0-9a-zA-Z\-_]', u' '), # alpha-numeric
(u'[0-9' + u".,!;:$%&*%'#(){}~`\[\]/\\\\\"" + \
u'\s\^><`\-=_+]+', u''), # remove just punctuation
(u'\s+', u' '), # remove spaces
]
# remove again
# patterns to remove
pat2 = [\
# u'[^0-9' + u''.join(self.arabicrange()) + \
# u"!.,;:$%&*%'#(){}~`\[\]/\\\\\"" + \
# u'\s^><\-_\u201D\u00AB=\u2026]+': u'', # remove latin characters
]
skip = False
# if empty body, skip
if body == u'': skip = True
# do some subsitutions
for k, v in pat:
body = re.sub(k, v, body)
# remove if exist
for k, v in remove_if_there:
if re.search(k, body):
skip = True
# remove if empty after replacing
for k, v in remove_if_empty_after:
temp = re.sub(k, v, body)
if temp == u" " or temp == u"":
skip = True
# do some more subsitutions
if not skip:
for k, v in pat2:
body = re.sub(k, v, body)
# if empty string, skip
if body == u'' or body == u' ':
skip = True
if not skip:
return body
else:
return u""
# Read raw reviews from file and clean and write into clean_reviews
def clean_raw_reviews(self):
# input file
in_file = codecs.open(self.REVIEWS_PATH + self.RAW_REVIEWS_FILE,
'r', encoding="utf-8")
reviews = in_file.readlines()
# Output file: rating<tab>content
out_file = open(self.REVIEWS_PATH + self.CLEAN_REVIEWS_FILE,
'w', buffering=100)
deleted_file = open(self.REVIEWS_PATH + self.DELETED_REVIEWS_FILE,
'w', buffering=100)
counter = 1
for i in xrange(0, len(reviews)):
review = reviews[i]
skip = False
# # If line starts with #, then skip
# if review[0] == u"#": continue
# split by <tab>
parts = review.split(u"\t")
# rating is first part and body is last part
rating = parts[0]
review_id = parts[1]
user_id = parts[2]
book_id = parts[3]
body = parts[4].strip()
# clean body
body = self.clean_raw_review(body)
if body == u"": skip = True
if i % 5000 == 0:
print "review %d:" % (i)
# write output
line = u"%s\t%s\t%s\t%s\t%s\n" % (rating, review_id, user_id,
book_id, body)
if not skip:
out_file.write(line.encode('utf-8'))
counter += 1
else:
deleted_file.write(line.encode('utf-8'))
# Read the reviews file. Returns a tuple containing these lists:
# rating: the rating 1 -> 5
# review_id: the id of the review
# user_id: the id of the user
# book_id: the id of the book
# body: the text of the review
def read_review_file(self, file_name):
reviews = codecs.open(file_name, 'r', 'utf-8').readlines()
# remove comment lines and newlines
reviews = [r.strip() for r in reviews if r[0] != u'#']
# parse
rating = list()
review_id = list()
user_id = list()
book_id = list()
body = list()
for review in reviews:
# split by <tab>
parts = review.split(u"\t")
# rating is first part and body is last part
rating.append(int(parts[0]))
review_id.append(parts[1])
user_id.append(parts[2])
book_id.append(parts[3])
if len(parts) > 4:
body.append(parts[4])
else:
body.append(u"")
return (rating, review_id, user_id, book_id, body)
# Writes reviews to a file
def write_review_file(self, file_name, rating, review_id, user_id,
book_id, body):
lines = list()
# loop
for i in xrange(len(rating)):
line = u"%s\t%s\t%s\t%s\t%s\n" % (rating[i], review_id[i],
user_id[i], book_id[i],
body[i])
lines.append(line)
open(file_name, 'w').write(u''.join(lines).encode('utf-8'))
def read_clean_reviews(self):
return self.read_review_file(self.REVIEWS_PATH +
self.CLEAN_REVIEWS_FILE)
def read_raw_reviews(self):
return self.read_review_file(self.REVIEWS_PATH + self.RAW_REVIEWS_FILE)
# Splits the dataset into a training/test sets in the setting of using 5
# classes (predicting the rating value from 1 to 5)
def split_train_test_5class(self, rating, percent_test,
balanced="unbalanced"):
np.random.seed(1234)
num_reviews = len(rating)
review_ids = np.arange(0, num_reviews)
if balanced == "unbalanced":
ntest = np.floor(num_reviews * percent_test)
np.random.shuffle(review_ids)
test_ids = review_ids[:ntest]
train_ids = review_ids[ntest:]
elif balanced == "balanced":
(sizes, bins) = np.histogram(rating, [1, 2, 3, 4, 5, 6])
min_size = np.min(sizes)
print min_size
# sample review ids equally among classes
test_ids = np.zeros((0,), dtype="int32")
train_ids = np.zeros((0,), dtype="int32")
rating = np.array(rating)
ntest = np.floor(min_size * percent_test)
for c in range(1, 6):
cids = review_ids[np.nonzero(rating == c)]
np.random.shuffle(cids)
test_ids = np.r_[test_ids, cids[:ntest]]
train_ids = np.r_[train_ids, cids[ntest:min_size]]
train_file = self.REVIEWS_PATH + "5class-" + balanced + "-train.txt"
test_file = self.REVIEWS_PATH + "5class-" + balanced + "-test.txt"
open(train_file, 'w').write('\n'.join(map(str, train_ids)))
open(test_file, 'w').write('\n'.join(map(str, test_ids)))
return (train_ids, test_ids)
# Splits the dataset into a training/test sets in the setting of using 2
# classes (predicting the polarity of the review where ratings 1 & 2
# are considered negative, ratings 4 & 5 are positive, and rating 3 is
# ignored)
def split_train_test_2class(self, rating, percent_test,
balanced="unbalanced"):
np.random.seed(1234)
rating = np.array(rating, dtype='int32')
# length
num_reviews = len(rating)
review_ids = np.arange(0, num_reviews)
# convert to binary, with ratings [1, 2] --> neg and [4, 5] --> pos
rating[rating == 2] = 1
rating[rating == 4] = 5
ids = (rating == 1) + (rating == 5)
review_ids = review_ids[ids]
rating = rating[ids]
rating[rating == 1] = 0
rating[rating == 5] = 1
# get length after filtering
num_reviews = rating.shape[0]
if balanced == "unbalanced":
ntest = np.floor(num_reviews * percent_test)
np.random.shuffle(review_ids)
test_ids = review_ids[:ntest]
train_ids = review_ids[ntest:]
elif balanced == "balanced":
(sizes, bins) = np.histogram(rating, [0, 1, 2])
min_size = np.min(sizes)
print min_size
# sample review ids equally among classes
test_ids = np.zeros((0,), dtype="int32")
train_ids = np.zeros((0,), dtype="int32")
rating = np.array(rating)
ntest = np.floor(min_size * percent_test)
for c in [0, 1]:
cids = review_ids[np.nonzero(rating == c)]
np.random.shuffle(cids)
test_ids = np.r_[test_ids, cids[:ntest]]
train_ids = np.r_[train_ids, cids[ntest:min_size]]
train_file = self.REVIEWS_PATH + "2class-" + balanced + "-train.txt"
test_file = self.REVIEWS_PATH + "2class-" + balanced + "-test.txt"
open(train_file, 'w').write('\n'.join(map(str, train_ids)))
open(test_file, 'w').write('\n'.join(map(str, test_ids)))
return (train_ids, test_ids)
# Splits the dataset into a training/validation/test sets in the setting of using 3
# classes (predicting the polarity of the review where ratings 1 & 2
# are considered negative, ratings 4 & 5 are positive, and rating 3 is considered
# neutral
def split_train_validation_test_3class(self, rating, percent_test, percent_valid,
balanced="unbalanced"):
np.random.seed(1234)
rating = np.array(rating, dtype='int32')
# length
num_reviews = len(rating)
review_ids = np.arange(0, num_reviews)
# convert to binary, with ratings [1, 2] --> neg and [4, 5] --> pos
rating[rating == 2] = 1
rating[rating == 4] = 5
ids = (rating == 1) + (rating == 5) + (rating == 3)
review_ids = review_ids[ids]
rating = rating[ids]
rating[rating == 1] = 0
rating[rating == 5] = 1
rating[rating == 3] = 2
# get length after filtering
num_reviews = rating.shape[0]
if balanced == "unbalanced":
ntest = np.floor(num_reviews * percent_test)
nvalid = np.floor(num_reviews * percent_valid)
np.random.shuffle(review_ids)
test_ids = review_ids[:ntest]
validation_ids = review_ids[ntest:ntest + nvalid]
train_ids = review_ids[ntest + nvalid:]
elif balanced == "balanced":
(sizes, bins) = np.histogram(rating, [0, 1, 2, 3])
min_size = np.min(sizes)
print min_size
# sample review ids equally among classes
test_ids = np.zeros((0,), dtype="int32")
validation_ids = np.zeros((0,), dtype="int32")
train_ids = np.zeros((0,), dtype="int32")
rating = np.array(rating)
ntest = np.floor(min_size * percent_test)
nvalid = np.floor(min_size * percent_valid)
for c in [0, 1, 2]:
cids = review_ids[np.nonzero(rating == c)]
np.random.shuffle(cids)
test_ids = np.r_[test_ids, cids[:ntest]]
validation_ids = np.r_[validation_ids, cids[ntest:ntest + nvalid]]
train_ids = np.r_[train_ids, cids[ntest + nvalid:min_size]]
train_file = self.REVIEWS_PATH + "3class-" + balanced + "-train.txt"
test_file = self.REVIEWS_PATH + "3class-" + balanced + "-test.txt"
validation_file = self.REVIEWS_PATH + "3class-" + balanced + "-validation.txt"
open(train_file, 'w').write('\n'.join(map(str, train_ids)))
open(test_file, 'w').write('\n'.join(map(str, test_ids)))
open(validation_file, 'w').write('\n'.join(map(str, validation_ids)))
return (train_ids, test_ids)
# Reads a training or test file. The file contains the indices of the
# reviews from the clean reviews file.
def read_file(self, file_name):
ins = open(file_name).readlines()
ins = [int(i.strip()) for i in ins]
return ins
# A helpter function.
def set_binary_klass(self, ar):
ar[(ar == 1) + (ar == 2)] = 0
ar[(ar == 4) + (ar == 5)] = 1
# A helpter function.
def set_ternary_klass(self, ar):
ar[(ar == 1) + (ar == 2)] = 0
ar[(ar == 4) + (ar == 5)] = 1
ar[(ar == 3)] = 2
# Returns (train_x, train_y, test_x, test_y)
# where x is the review body and y is the rating (1->5 or 0->1)
def get_train_test(self, klass="2", balanced="balanced"):
(rating, a, b, c, body) = self.read_clean_reviews()
rating = np.array(rating)
body = pd.Series(body)
train_file = (self.REVIEWS_PATH + klass + "class-" +
balanced + "-train.txt")
test_file = (self.REVIEWS_PATH + klass + "class-" +
balanced + "-test.txt")
train_ids = self.read_file(train_file)
test_ids = self.read_file(test_file)
train_y = rating[train_ids]
test_y = rating[test_ids]
train_x = body[train_ids]
test_x = body[test_ids]
if klass == "2":
self.set_binary_klass(train_y)
self.set_binary_klass(test_y)
return (train_x, train_y, test_x, test_y)
# Returns (train_x, train_y, test_x, test_y)
# where x is the review body and y is the rating (1->5 or 0->1)
def get_train_test_validation(self, klass="3", balanced="balanced"):
(rating, a, b, c, body) = self.read_clean_reviews()
rating = np.array(rating)
body = pd.Series(body)
train_file = (self.REVIEWS_PATH + klass + "class-" +
balanced + "-train.txt")
test_file = (self.REVIEWS_PATH + klass + "class-" +
balanced + "-test.txt")
validation_file = (self.REVIEWS_PATH + klass + "class-" +
balanced + "-validation.txt")
train_ids = self.read_file(train_file)
test_ids = self.read_file(test_file)
validation_ids = self.read_file(validation_file)
train_y = rating[train_ids]
test_y = rating[test_ids]
valid_y = rating[validation_ids]
train_x = body[train_ids]
test_x = body[test_ids]
valid_x = body[validation_ids]
if klass == "2":
self.set_binary_klass(train_y)
self.set_binary_klass(test_y)
self.set_binary_klass(valid_y)
elif klass == "3":
self.set_ternary_klass(train_y)
self.set_ternary_klass(test_y)
self.set_ternary_klass(valid_y)
return (train_x, train_y, test_x, test_y, valid_x, valid_y)
def split_train_validation_test_3class_tiny(self, rating, tiny_precent, percent_test, percent_valid,
balanced="unbalanced"):
np.random.seed(1234)
rating = np.array(rating, dtype='int32')
# length
num_reviews = len(rating)
review_ids = np.arange(0, num_reviews)
# convert to binary, with ratings [1, 2] --> neg and [4, 5] --> pos
rating[rating == 2] = 1
rating[rating == 4] = 5
ids = (rating == 1) + (rating == 5) + (rating == 3)
review_ids = review_ids[ids]
rating = rating[ids]
rating[rating == 1] = 0
rating[rating == 5] = 1
rating[rating == 3] = 2
# get length after filtering
num_reviews = rating.shape[0]
new_data_size = int(np.floor(tiny_precent * rating.shape[0]))
positive_reviews_precent = np.sum(rating == 1) * 1.0 / rating.shape[0]
negative_reviews_precent = np.sum(rating == 0) * 1.0 / rating.shape[0]
neutral_reviews_precent = np.sum(rating == 2) * 1.0 / rating.shape[0]
new_postive_size = np.round(positive_reviews_precent * tiny_precent * num_reviews)
new_negative_size = np.round(negative_reviews_precent * tiny_precent * num_reviews)
new_neutral_size = np.round(neutral_reviews_precent * tiny_precent * num_reviews)
np.random.shuffle(review_ids)
selected_ids = np.zeros(new_data_size,dtype='int32')
i=0
j=0
count_pos=0
count_neg=0
count_neutral=0
while(j<new_data_size):
if(rating[review_ids[i]]==1 and count_pos< new_postive_size):
selected_ids[j]=np.int(review_ids[i])
count_pos+=1
j+=1
elif(rating[review_ids[i]]==0 and count_neg< new_negative_size):
selected_ids[j]=np.int(review_ids[i])
count_neg+=1
j+=1
elif(rating[review_ids[i]]==2 and count_neutral< new_neutral_size):
selected_ids[j]=np.int(review_ids[i])
count_neutral+=1
j+=1
i+=1
if balanced == "unbalanced":
ntest = np.floor(new_data_size * percent_test)
nvalid = np.floor(new_data_size * percent_valid)
np.random.shuffle(selected_ids)
test_ids = selected_ids[:ntest]
validation_ids = selected_ids[ntest:ntest + nvalid]
train_ids = selected_ids[ntest + nvalid:]
elif balanced == "balanced":
(sizes, bins) = np.histogram(rating, [0, 1, 2, 3])
min_size = np.min(sizes)
print min_size
# sample review ids equally among classes
test_ids = np.zeros((0,), dtype="int32")
validation_ids = np.zeros((0,), dtype="int32")
train_ids = np.zeros((0,), dtype="int32")
rating = np.array(rating)
ntest = np.floor(min_size * percent_test)
nvalid = np.floor(min_size * percent_valid)
for c in [0, 1, 2]:
cids = selected_ids[np.nonzero(rating == c)]
np.random.shuffle(cids)
test_ids = np.r_[test_ids, cids[:ntest]]
validation_ids = np.r_[validation_ids, cids[ntest:ntest + nvalid]]
train_ids = np.r_[train_ids, cids[ntest + nvalid:min_size]]
train_file = self.REVIEWS_PATH + "3class-" + balanced + "-tiny-train.txt"
test_file = self.REVIEWS_PATH + "3class-" + balanced + "-tiny-test.txt"
validation_file = self.REVIEWS_PATH + "3class-" + balanced + "-tiny-validation.txt"
open(train_file, 'w').write('\n'.join(map(str, train_ids)))
open(test_file, 'w').write('\n'.join(map(str, test_ids)))
open(validation_file, 'w').write('\n'.join(map(str, validation_ids)))
return (train_ids, test_ids)
# l=LABR()
# (rating, a, b, c, body)=l.read_clean_reviews()
# l.split_train_validation_test_3class_tiny(rating,0.1, 0.2, 0.2)
| gpl-2.0 |
ioam/holoviews | holoviews/plotting/mpl/renderer.py | 2 | 10857 | from __future__ import absolute_import, division, unicode_literals
import os
import sys
import base64
from io import BytesIO
from tempfile import NamedTemporaryFile
from contextlib import contextmanager
from itertools import chain
import param
import matplotlib as mpl
from matplotlib import pyplot as plt
from param.parameterized import bothmethod
from ...core import HoloMap
from ...core.options import Store
from ..renderer import Renderer, MIME_TYPES, HTML_TAGS
from .widgets import MPLSelectionWidget, MPLScrubberWidget
from .util import get_tight_bbox, mpl_version
class OutputWarning(param.Parameterized):pass
outputwarning = OutputWarning(name='Warning')
mpl_msg_handler = """
/* Backend specific body of the msg_handler, updates displayed frame */
var target = $('#fig_{plot_id}');
var img = $('<div />').html(msg);
target.children().each(function () {{ $(this).remove() }})
target.append(img)
"""
# <format name> : (animation writer, format, anim_kwargs, extra_args)
ANIMATION_OPTS = {
'webm': ('ffmpeg', 'webm', {},
['-vcodec', 'libvpx-vp9', '-b', '1000k']),
'mp4': ('ffmpeg', 'mp4', {'codec': 'libx264'},
['-pix_fmt', 'yuv420p']),
'gif': ('imagemagick', 'gif', {'fps': 10}, []),
'scrubber': ('html', None, {'fps': 5}, None)
}
if mpl_version >= '2.2':
ANIMATION_OPTS['gif'] = ('pillow', 'gif', {'fps': 10}, [])
class MPLRenderer(Renderer):
"""
Exporter used to render data from matplotlib, either to a stream
or directly to file.
The __call__ method renders an HoloViews component to raw data of
a specified matplotlib format. The save method is the
corresponding method for saving a HoloViews objects to disk.
The save_fig and save_anim methods are used to save matplotlib
figure and animation objects. These match the two primary return
types of plotting class implemented with matplotlib.
"""
drawn = {}
backend = param.String('matplotlib', doc="The backend name.")
dpi=param.Integer(72, doc="""
The render resolution in dpi (dots per inch)""")
fig = param.ObjectSelector(default='auto',
objects=['png', 'svg', 'pdf', 'html', None, 'auto'], doc="""
Output render format for static figures. If None, no figure
rendering will occur. """)
holomap = param.ObjectSelector(default='auto',
objects=['widgets', 'scrubber', 'webm','mp4', 'gif', None, 'auto'], doc="""
Output render multi-frame (typically animated) format. If
None, no multi-frame rendering will occur.""")
interactive = param.Boolean(default=False, doc="""
Whether to enable interactive plotting allowing interactive
plotting with explicitly calling show.""")
mode = param.ObjectSelector(default='default', objects=['default'])
mode_formats = {'fig': {'default': ['png', 'svg', 'pdf', 'html', None, 'auto']},
'holomap': {'default': ['widgets', 'scrubber', 'webm','mp4', 'gif',
'html', None, 'auto']}}
counter = 0
# Define appropriate widget classes
widgets = {'scrubber': MPLScrubberWidget,
'widgets': MPLSelectionWidget}
# Define the handler for updating matplotlib plots
comm_msg_handler = mpl_msg_handler
def __call__(self, obj, fmt='auto'):
"""
Render the supplied HoloViews component or MPLPlot instance
using matplotlib.
"""
plot, fmt = self._validate(obj, fmt)
if plot is None: return
if isinstance(plot, tuple(self.widgets.values())):
data = plot()
else:
with mpl.rc_context(rc=plot.fig_rcparams):
data = self._figure_data(plot, fmt, **({'dpi':self.dpi} if self.dpi else {}))
data = self._apply_post_render_hooks(data, obj, fmt)
return data, {'file-ext':fmt,
'mime_type':MIME_TYPES[fmt]}
def show(self, obj):
"""
Renders the supplied object and displays it using the active
GUI backend.
"""
if self.interactive:
if isinstance(obj, list):
return [self.get_plot(o) for o in obj]
return self.get_plot(obj)
from .plot import MPLPlot
MPLPlot._close_figures = False
try:
plots = []
objects = obj if isinstance(obj, list) else [obj]
for o in objects:
plots.append(self.get_plot(o))
plt.show()
except:
raise
finally:
MPLPlot._close_figures = True
return plots[0] if len(plots) == 1 else plots
@classmethod
def plot_options(cls, obj, percent_size):
"""
Given a holoviews object and a percentage size, apply heuristics
to compute a suitable figure size. For instance, scaling layouts
and grids linearly can result in unwieldy figure sizes when there
are a large number of elements. As ad hoc heuristics are used,
this functionality is kept separate from the plotting classes
themselves.
Used by the IPython Notebook display hooks and the save
utility. Note that this can be overridden explicitly per object
using the fig_size and size plot options.
"""
from .plot import MPLPlot
factor = percent_size / 100.0
obj = obj.last if isinstance(obj, HoloMap) else obj
options = Store.lookup_options(cls.backend, obj, 'plot').options
fig_size = options.get('fig_size', MPLPlot.fig_size)*factor
return dict({'fig_size':fig_size},
**MPLPlot.lookup_options(obj, 'plot').options)
@bothmethod
def get_size(self_or_cls, plot):
w, h = plot.state.get_size_inches()
dpi = self_or_cls.dpi if self_or_cls.dpi else plot.state.dpi
return (int(w*dpi), int(h*dpi))
def diff(self, plot):
"""
Returns the latest plot data to update an existing plot.
"""
if self.fig == 'auto':
figure_format = self.params('fig').objects[0]
else:
figure_format = self.fig
return self.html(plot, figure_format)
def _figure_data(self, plot, fmt='png', bbox_inches='tight', as_script=False, **kwargs):
"""
Render matplotlib figure object and return the corresponding
data. If as_script is True, the content will be split in an
HTML and a JS component.
Similar to IPython.core.pylabtools.print_figure but without
any IPython dependency.
"""
if fmt in ['gif', 'mp4', 'webm']:
if sys.version_info[0] == 3 and mpl.__version__[:-2] in ['1.2', '1.3']:
raise Exception("<b>Python 3 matplotlib animation support broken <= 1.3</b>")
with mpl.rc_context(rc=plot.fig_rcparams):
anim = plot.anim(fps=self.fps)
data = self._anim_data(anim, fmt)
else:
fig = plot.state
traverse_fn = lambda x: x.handles.get('bbox_extra_artists', None)
extra_artists = list(chain(*[artists for artists in plot.traverse(traverse_fn)
if artists is not None]))
kw = dict(
format=fmt,
facecolor=fig.get_facecolor(),
edgecolor=fig.get_edgecolor(),
dpi=self.dpi,
bbox_inches=bbox_inches,
bbox_extra_artists=extra_artists
)
kw.update(kwargs)
# Attempts to precompute the tight bounding box
try:
kw = self._compute_bbox(fig, kw)
except:
pass
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if as_script:
b64 = base64.b64encode(data).decode("utf-8")
(mime_type, tag) = MIME_TYPES[fmt], HTML_TAGS[fmt]
src = HTML_TAGS['base64'].format(mime_type=mime_type, b64=b64)
html = tag.format(src=src, mime_type=mime_type, css='')
return html, ''
if fmt == 'svg':
data = data.decode('utf-8')
return data
def _anim_data(self, anim, fmt):
"""
Render a matplotlib animation object and return the corresponding data.
"""
(writer, _, anim_kwargs, extra_args) = ANIMATION_OPTS[fmt]
if extra_args != []:
anim_kwargs = dict(anim_kwargs, extra_args=extra_args)
if self.fps is not None: anim_kwargs['fps'] = max([int(self.fps), 1])
if self.dpi is not None: anim_kwargs['dpi'] = self.dpi
if not hasattr(anim, '_encoded_video'):
# Windows will throw PermissionError with auto-delete
with NamedTemporaryFile(suffix='.%s' % fmt, delete=False) as f:
anim.save(f.name, writer=writer, **anim_kwargs)
video = f.read()
f.close()
os.remove(f.name)
return video
def _compute_bbox(self, fig, kw):
"""
Compute the tight bounding box for each figure once, reducing
number of required canvas draw calls from N*2 to N+1 as a
function of the number of frames.
Tight bounding box computing code here mirrors:
matplotlib.backend_bases.FigureCanvasBase.print_figure
as it hasn't been factored out as a function.
"""
fig_id = id(fig)
if kw['bbox_inches'] == 'tight':
if not fig_id in MPLRenderer.drawn:
fig.set_dpi(self.dpi)
fig.canvas.draw()
extra_artists = kw.pop("bbox_extra_artists", [])
pad = mpl.rcParams['savefig.pad_inches']
bbox_inches = get_tight_bbox(fig, extra_artists, pad=pad)
MPLRenderer.drawn[fig_id] = bbox_inches
kw['bbox_inches'] = bbox_inches
else:
kw['bbox_inches'] = MPLRenderer.drawn[fig_id]
return kw
@classmethod
@contextmanager
def state(cls):
deprecated = ['text.latex.unicode', 'examples.directory']
old_rcparams = {k: mpl.rcParams[k] for k in mpl.rcParams.keys()
if mpl_version < '3.0' or k not in deprecated}
try:
cls._rcParams = old_rcparams
yield
finally:
mpl.rcParams.clear()
mpl.rcParams.update(cls._rcParams)
@classmethod
def load_nb(cls, inline=True):
"""
Initialize matplotlib backend
"""
import matplotlib.pyplot as plt
backend = plt.get_backend()
if backend not in ['agg', 'module://ipykernel.pylab.backend_inline']:
plt.switch_backend('agg')
| bsd-3-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/io/pickle.py | 15 | 1656 | from pandas.compat import cPickle as pkl, pickle_compat as pc, PY3
def to_pickle(obj, path):
"""
Pickle (serialize) object to input file path
Parameters
----------
obj : any object
path : string
File path
"""
with open(path, 'wb') as f:
pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL)
def read_pickle(path):
"""
Load pickled pandas object (or any other pickled object) from the specified
file path
Warning: Loading pickled data received from untrusted sources can be
unsafe. See: http://docs.python.org/2.7/library/pickle.html
Parameters
----------
path : string
File path
Returns
-------
unpickled : type of object stored in file
"""
def try_read(path, encoding=None):
# try with cPickle
# try with current pickle, if we have a Type Error then
# try with the compat pickle to handle subclass changes
# pass encoding only if its not None as py2 doesn't handle
# the param
# cpickle
# GH 6899
try:
with open(path, 'rb') as fh:
return pkl.load(fh)
except (Exception) as e:
# reg/patched pickle
try:
with open(path, 'rb') as fh:
return pc.load(fh, encoding=encoding, compat=False)
# compat pickle
except:
with open(path, 'rb') as fh:
return pc.load(fh, encoding=encoding, compat=True)
try:
return try_read(path)
except:
if PY3:
return try_read(path, encoding='latin1')
raise
| gpl-2.0 |
SteveDiamond/cvxpy | cvxpy/cvxcore/tests/python/364A_scripts/spacecraft_landing_data.py | 4 | 1833 | import numpy as np
from cvxpy import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import time
TIME = 0
h = 1.
g = 0.1
m = 10.
Fmax = 10.
p0 = np.matrix('50 ;50; 100')
v0 = np.matrix('-10; 0; -10')
alpha = 0.5
gamma = 1.
K = 35
ANSWERS = []
ps = []
val = 0
while val != float('inf'):
pass #print "K=", K
v = Variable( 3, K)
p = Variable( 3, K)
f = Variable( 3, K)
# Minimizing fuel
obj_sum = norm( f[:,0] )
for i in range(1,K):
obj_sum += norm( f[:,i] )
obj = Minimize(obj_sum)
constraints = [v[:,0] == v0, p[:,0] == p0, p[:,K-1] == 0, v[:,K-1] == 0]
for i in range(K):
constraints.append( norm( f[:,i] ) <= Fmax )
for i in range(1, K):
constraints.append( v[:,i] == v[:, i - 1] + (h/m) * f[:, i - 1] - h * g * np.array([0, 0, 1]) )
constraints.append( p[:,i] == p[:,i - 1] + h * (h/2) * (v[:,i] + v[:,i - 1]) )
for i in range(K):
constraints.append( p[2,i] >= alpha * norm( p[1:,i]) )
prob = Problem(obj, constraints)
ps.append(p)
tic = time.time()
val = prob.solve()
toc = time.time()
TIME += toc - tic
ANSWERS.append(val)
K -= 1
pass #print toc - tic
p = ps[-2]# Last p which was feasible
# use the following code to plot your trajectories
# and the glide cone (don't modify)
# -------------------------------------------------------
# fig = pass #plt.figure()
# ax = fig.gca(projection='3d')
X = np.linspace(-40, 55, num=30)
Y = np.linspace(0, 55, num=30)
X, Y = np.meshgrid(X, Y)
Z = alpha*np.sqrt(X**2+Y**2)
# ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0)
#Have your solution be stored in p
# ax.plot(xs=p.value[0,:].A1,ys=p.value[1,:].A1,zs=p.value[2,:].A1)
# ax.set_xlabel('x'); ax.set_ylabel('y'); ax.set_zlabel('z')
pass #plt.title('Minimum time path ')
pass #plt.show() | gpl-3.0 |
ringw/MetaOMR | metaomr/midi_alignment.py | 1 | 4155 | import numpy as np
import pandas as pd
import skimage.measure
import scipy.spatial.distance as ssd
from metaomr import bitimage
from glob import glob
import os.path
import scipy.misc
from music21 import converter, meter
from collections import defaultdict
RESOLUTION = (2 * 3) ** 6 * (5 * 7)
def midi_beats(midi_file):
music = converter.parse(midi_file)
# Use dummy 1/4 time
for part in music.parts:
for sig in part.getTimeSignatures():
denom = max(4, sig.denominator)
part.insert(sig.offset, meter.TimeSignature('1/%d' % denom))
part.remove(sig)
measures = []
for measure in music.flat.makeMeasures():
offsetMap = defaultdict(set)
for note in measure.notes:
offset = int(round(note.offset * RESOLUTION))
offsetMap[offset].update([pitch.midi for pitch in note.pitches])
measures.append(offsetMap)
return measures
def measure_beat_dists(ma, mb):
scores = np.empty((len(ma), len(mb)), float)
notes_a = np.array([sum(map(len, ma[i].values())) for i in xrange(len(ma))])
notes_b = np.array([sum(map(len, mb[j].values())) for j in xrange(len(mb))])
for i in xrange(len(ma)):
for j in xrange(len(mb)):
numnotes_a = notes_a[i]
numnotes_b = notes_b[j]
a_in_b = 0
for offset in ma[i]:
if offset in mb[j]:
a_in_b += len(ma[i][offset].intersection(mb[j][offset]))
b_in_a = 0
for offset in mb[j]:
if offset in ma[i]:
b_in_a += len(mb[j][offset].intersection(ma[i][offset]))
if numnotes_a and numnotes_b:
sens = float(a_in_b) / numnotes_a
spec = float(b_in_a) / numnotes_b
F1 = (2 * sens * spec / (sens + spec)
if sens or spec else 0.0)
scores[i, j] = F1
elif numnotes_a == numnotes_b == 0:
scores[i, j] = 1
else:
scores[i, j] = 0
dists = 1 - scores
dists *= np.maximum(notes_a[:, None], notes_b[None, :])
return dists
def align_measures(ma, mb, gap_penalty=10):
dists = measure_beat_dists(ma, mb)
scores = np.empty((len(ma), len(mb)))
scores[0, 0] = dists[0, 0]
for i in xrange(len(ma)):
scores[i, 0] = i * gap_penalty
for j in xrange(len(mb)):
scores[0, j] = j * gap_penalty
dx = np.array([-1, -1, 0], int)
dy = np.array([-1, 0, -1], int)
ptr = np.empty_like(scores, int)
ptr[0, 0] = 0
ptr[1:, 0] = 2
ptr[0, 1:] = 1
for i in xrange(1, len(ma)):
for j in xrange(1, len(mb)):
new_scores = scores[i + dy, j + dx]
new_scores[0] += dists[i, j]
new_scores[1:] += gap_penalty
ptr[i, j] = np.argmin(new_scores)
scores[i, j] = new_scores[ptr[i, j]]
score = scores[i, j]
alignment = []
while i >= 0 and j >= 0:
direction = ptr[i, j]
alignment.append((i if direction != 1 else -1,
j if direction != 2 else -1,
dists[i, j] if direction == 0 else gap_penalty))
i += dy[direction]
j += dx[direction]
alignment = alignment[::-1]
alignment = pd.DataFrame(alignment, columns='a b score'.split())
notes_a = np.array([sum(map(len, ma[i].values())) for i in xrange(len(ma))])
notes_b = np.array([sum(map(len, mb[j].values())) for j in xrange(len(mb))])
alignment['nn_a'] = notes_a[np.array(alignment['a'], int)]
alignment['nn_b'] = notes_b[np.array(alignment['b'], int)]
a_in_b = [sum([len(ma[i][offset].intersection(mb[j][offset]))
if offset in mb[j] else 0
for offset in ma[i]])
for i, j in np.array(alignment[['a', 'b']])]
b_in_a = [sum([len(mb[j][offset].intersection(ma[i][offset]))
if offset in ma[i] else 0
for offset in mb[j]])
for i, j in np.array(alignment[['a', 'b']])]
alignment['a_in_b'] = a_in_b
alignment['b_in_a'] = b_in_a
return alignment
| gpl-3.0 |
cjayb/mne-python | mne/viz/tests/test_circle.py | 14 | 5024 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
#
# License: Simplified BSD
import numpy as np
import pytest
import matplotlib.pyplot as plt
from mne.viz import plot_connectivity_circle, circular_layout
def test_plot_connectivity_circle():
"""Test plotting connectivity circle."""
node_order = ['frontalpole-lh', 'parsorbitalis-lh',
'lateralorbitofrontal-lh', 'rostralmiddlefrontal-lh',
'medialorbitofrontal-lh', 'parstriangularis-lh',
'rostralanteriorcingulate-lh', 'temporalpole-lh',
'parsopercularis-lh', 'caudalanteriorcingulate-lh',
'entorhinal-lh', 'superiorfrontal-lh', 'insula-lh',
'caudalmiddlefrontal-lh', 'superiortemporal-lh',
'parahippocampal-lh', 'middletemporal-lh',
'inferiortemporal-lh', 'precentral-lh',
'transversetemporal-lh', 'posteriorcingulate-lh',
'fusiform-lh', 'postcentral-lh', 'bankssts-lh',
'supramarginal-lh', 'isthmuscingulate-lh', 'paracentral-lh',
'lingual-lh', 'precuneus-lh', 'inferiorparietal-lh',
'superiorparietal-lh', 'pericalcarine-lh',
'lateraloccipital-lh', 'cuneus-lh', 'cuneus-rh',
'lateraloccipital-rh', 'pericalcarine-rh',
'superiorparietal-rh', 'inferiorparietal-rh', 'precuneus-rh',
'lingual-rh', 'paracentral-rh', 'isthmuscingulate-rh',
'supramarginal-rh', 'bankssts-rh', 'postcentral-rh',
'fusiform-rh', 'posteriorcingulate-rh',
'transversetemporal-rh', 'precentral-rh',
'inferiortemporal-rh', 'middletemporal-rh',
'parahippocampal-rh', 'superiortemporal-rh',
'caudalmiddlefrontal-rh', 'insula-rh', 'superiorfrontal-rh',
'entorhinal-rh', 'caudalanteriorcingulate-rh',
'parsopercularis-rh', 'temporalpole-rh',
'rostralanteriorcingulate-rh', 'parstriangularis-rh',
'medialorbitofrontal-rh', 'rostralmiddlefrontal-rh',
'lateralorbitofrontal-rh', 'parsorbitalis-rh',
'frontalpole-rh']
label_names = ['bankssts-lh', 'bankssts-rh', 'caudalanteriorcingulate-lh',
'caudalanteriorcingulate-rh', 'caudalmiddlefrontal-lh',
'caudalmiddlefrontal-rh', 'cuneus-lh', 'cuneus-rh',
'entorhinal-lh', 'entorhinal-rh', 'frontalpole-lh',
'frontalpole-rh', 'fusiform-lh', 'fusiform-rh',
'inferiorparietal-lh', 'inferiorparietal-rh',
'inferiortemporal-lh', 'inferiortemporal-rh', 'insula-lh',
'insula-rh', 'isthmuscingulate-lh', 'isthmuscingulate-rh',
'lateraloccipital-lh', 'lateraloccipital-rh',
'lateralorbitofrontal-lh', 'lateralorbitofrontal-rh',
'lingual-lh', 'lingual-rh', 'medialorbitofrontal-lh',
'medialorbitofrontal-rh', 'middletemporal-lh',
'middletemporal-rh', 'paracentral-lh', 'paracentral-rh',
'parahippocampal-lh', 'parahippocampal-rh',
'parsopercularis-lh', 'parsopercularis-rh',
'parsorbitalis-lh', 'parsorbitalis-rh',
'parstriangularis-lh', 'parstriangularis-rh',
'pericalcarine-lh', 'pericalcarine-rh', 'postcentral-lh',
'postcentral-rh', 'posteriorcingulate-lh',
'posteriorcingulate-rh', 'precentral-lh', 'precentral-rh',
'precuneus-lh', 'precuneus-rh',
'rostralanteriorcingulate-lh',
'rostralanteriorcingulate-rh', 'rostralmiddlefrontal-lh',
'rostralmiddlefrontal-rh', 'superiorfrontal-lh',
'superiorfrontal-rh', 'superiorparietal-lh',
'superiorparietal-rh', 'superiortemporal-lh',
'superiortemporal-rh', 'supramarginal-lh',
'supramarginal-rh', 'temporalpole-lh', 'temporalpole-rh',
'transversetemporal-lh', 'transversetemporal-rh']
group_boundaries = [0, len(label_names) / 2]
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=group_boundaries)
con = np.random.RandomState(0).randn(68, 68)
plot_connectivity_circle(con, label_names, n_lines=300,
node_angles=node_angles, title='test',
)
pytest.raises(ValueError, circular_layout, label_names, node_order,
group_boundaries=[-1])
pytest.raises(ValueError, circular_layout, label_names, node_order,
group_boundaries=[20, 0])
plt.close('all')
| bsd-3-clause |
sjperkins/tensorflow | tensorflow/contrib/learn/python/learn/estimators/linear_test.py | 58 | 71789 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = linear.LinearClassifier(
n_classes=3,
feature_columns=[language_column],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [language_column, fc_core.numeric_column('age')]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('linear/feature/weight', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertEqual(
4, len(classifier.get_variable_value('linear/feature/weight')))
self.assertEqual(
3, len(classifier.get_variable_value('linear/bias_weight')))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('linear/binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.4, 0.6, 0.3]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClassifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
self.assertIn('linear//weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear//weight')
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor_weights.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertIn('linear/x/weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear/x/weight')
self.assertAllClose(
[w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
variable_names = regressor.get_variable_names()
self.assertIn('linear/price/weight', variable_names)
self.assertIn('linear/country/weights', variable_names)
no_l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
class LinearEstimatorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearEstimator(feature_columns=cont_features,
head=head_lib.regression_head()),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearEstimator)
def testLinearRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(feature_columns=[age, language],
head=head_lib.regression_head())
linear_estimator.fit(input_fn=input_fn, steps=100)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=400)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testPoissonRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(
feature_columns=[age, language],
head=head_lib.poisson_regression_head())
linear_estimator.fit(input_fn=input_fn, steps=10)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=100)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
# Here loss of 2.1 implies a prediction of ~9.9998
self.assertLess(loss2, 2.1)
def testSDCANotSupported(self):
"""Tests that we detect error for SDCA."""
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
with self.assertRaises(ValueError):
linear.LinearEstimator(
head=head_lib.regression_head(label_dimension=1),
feature_columns=[maintenance_cost, sq_footage],
optimizer=sdca_optimizer,
_joint_weights=True)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
saiwing-yeung/scikit-learn | sklearn/covariance/robust_covariance.py | 105 | 29653 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
tanayz/Kaggle | HB_ML_Challenge/test_xgboost.py | 1 | 4623 | import csv
import sys
import numpy as np
import scipy as sp
import xgboost as xgb
import sklearn.cross_validation as cv
def AMS(s, b):
'''
Approximate median significance:
s = true positive rate
b = false positive rate
'''
assert s >= 0
assert b >= 0
bReg = 10.
return np.sqrt(2.0 * ((s + b + bReg) * np.log(1 + s / (b + bReg)) - s))
def get_rates(prediction, solution, weights):
'''
Returns the true and false positive rates.
This assumes that:
label 's' corresponds to 1 (int)
label 'b' corresponds to 0 (int)
'''
assert prediction.size == solution.size
assert prediction.size == weights.size
# Compute sum of weights for true and false positives
truePos = sum(weights[(solution == 1) * (prediction == 1)])
falsePos = sum(weights[(solution == 0) * (prediction == 1)])
return truePos, falsePos
def get_training_data(training_file):
'''
Loads training data.
'''
data = list(csv.reader(open(training_file, "rb"), delimiter=','))
X = np.array([map(float, row[1:-2]) for row in data[1:]])
labels = np.array([int(row[-1] == 's') for row in data[1:]])
weights = np.array([float(row[-2]) for row in data[1:]])
return X, labels, weights
def estimate_performance_xgboost(training_file, param, num_round, folds):
'''
Cross validation for XGBoost performance
'''
# Load training data
X, labels, weights = get_training_data(training_file)
# Cross validate
kf = cv.KFold(labels.size, n_folds=folds)
npoints = 6
# Dictionary to store all the AMSs
all_AMS = {}
for curr in range(npoints):
all_AMS[curr] = []
# These are the cutoffs used for the XGBoost predictions
cutoffs = sp.linspace(0.05, 0.30, npoints)
for train_indices, test_indices in kf:
X_train, X_test = X[train_indices], X[test_indices]
y_train, y_test = labels[train_indices], labels[test_indices]
w_train, w_test = weights[train_indices], weights[test_indices]
# Rescale weights so that their sum is the same as for the entire training set
w_train *= (sum(weights) / sum(w_train))
w_test *= (sum(weights) / sum(w_test))
sum_wpos = sum(w_train[y_train == 1])
sum_wneg = sum(w_train[y_train == 0])
# construct xgboost.DMatrix from numpy array, treat -999.0 as missing value
xgmat = xgb.DMatrix(X_train, label=y_train, missing=-999.0, weight=w_train)
# scale weight of positive examples
param['scale_pos_weight'] = sum_wneg / sum_wpos
# you can directly throw param in, though we want to watch multiple metrics here
plst = param.items()#+[('eval_metric', '[email protected]')]
watchlist = []#[(xgmat, 'train')]
bst = xgb.train(plst, xgmat, num_round, watchlist)
# Construct matrix for test set
xgmat_test = xgb.DMatrix(X_test, missing=-999.0)
y_out = bst.predict(xgmat_test)
res = [(i, y_out[i]) for i in xrange(len(y_out))]
rorder = {}
for k, v in sorted(res, key = lambda x:-x[1]):
rorder[k] = len(rorder) + 1
# Explore changing threshold_ratio and compute AMS
best_AMS = -1.
for curr, threshold_ratio in enumerate(cutoffs):
y_pred = sp.zeros(len(y_out))
ntop = int(threshold_ratio * len(rorder))
for k, v in res:
if rorder[k] <= ntop:
y_pred[k] = 1
truePos, falsePos = get_rates(y_pred, y_test, w_test)
this_AMS = AMS(truePos, falsePos)
all_AMS[curr].append(this_AMS)
if this_AMS > best_AMS:
best_AMS = this_AMS
print "Best AMS =", best_AMS
print "------------------------------------------------------"
for curr, cut in enumerate(cutoffs):
print "Thresh = %.2f: AMS = %.4f, std = %.4f" % \
(cut, sp.mean(all_AMS[curr]), sp.std(all_AMS[curr]))
print "------------------------------------------------------"
def main():
# setup parameters for xgboost
param = {}
# use logistic regression loss, use raw prediction before logistic transformation
# since we only need the rank
param['objective'] = 'binary:logitraw'
param['bst:eta'] = 0.1
param['bst:max_depth'] = 6
param['eval_metric'] = 'auc'
param['silent'] = 1
param['nthread'] = 1
num_round = 120 # Number of boosted trees
folds = 5 # Folds for CV
estimate_performance_xgboost("training.csv", param, num_round, folds)
if __name__ == "__main__":
main()
| apache-2.0 |
cbmoore/statsmodels | statsmodels/datasets/nile/data.py | 25 | 1872 | """Nile River Flows."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = """Nile River flows at Ashwan 1871-1970"""
SOURCE = """
This data is first analyzed in:
Cobb, G. W. 1978. "The Problem of the Nile: Conditional Solution to a
Changepoint Problem." *Biometrika*. 65.2, 243-51.
"""
DESCRSHORT = """This dataset contains measurements on the annual flow of
the Nile as measured at Ashwan for 100 years from 1871-1970."""
DESCRLONG = DESCRSHORT + " There is an apparent changepoint near 1898."
#suggested notes
NOTE = """::
Number of observations: 100
Number of variables: 2
Variable name definitions:
year - the year of the observations
volumne - the discharge at Aswan in 10^8, m^3
"""
from numpy import recfromtxt, array
from pandas import Series, DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the Nile data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
names = list(data.dtype.names)
endog_name = 'volume'
endog = array(data[endog_name], dtype=float)
dataset = Dataset(data=data, names=[endog_name], endog=endog,
endog_name=endog_name)
return dataset
def load_pandas():
data = DataFrame(_get_data())
# TODO: time series
endog = Series(data['volume'], index=data['year'].astype(int))
dataset = Dataset(data=data, names=list(data.columns),
endog=endog, endog_name='volume')
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/nile.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
huobaowangxi/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
askalbania/piernik | python/plot_tsl.py | 1 | 1065 | #!/usr/bin/python
import sys
import re
import numpy as np
import matplotlib.pyplot as plt
import argparse
remove_comments = re.compile("(?!\#)", re.VERBOSE)
parser = argparse.ArgumentParser()
parser.add_argument("-f", nargs=1, default=None)
parser.add_argument("files", nargs='*')
args = parser.parse_args()
if len(args.files) < 1:
parser.error("I need at least one tsl file")
data = []
for fn in args.files:
f = open(fn,"rb")
tab = [line.strip() for line in f.readlines()]
f.close()
header = np.array(tab[0][1:].split())
if args.f == None:
print ("There are following fields available in %s" % fn)
print header
else:
field = args.f[0]
fno = np.where(header == field)[0][0]
tab = np.array([
map(np.float64, line.split()) for line in filter(remove_comments.match, tab)
])
data.append(tab)
fig = plt.figure()
ax = fig.add_subplot(111)
for i, fn in enumerate(data):
ax.plot(fn[:, 1], fn[:, fno], label=args.files[i])
ax.legend()
plt.ylabel(field)
plt.xlabel(header[1])
plt.draw()
plt.show()
| gpl-3.0 |
MicrosoftGenomics/FaST-LMM | fastlmm/association/tests/test_gwas.py | 1 | 16451 | import numpy as np
import scipy as sp
import logging
from scipy import stats
from fastlmm.pyplink.snpreader.Bed import Bed
#from fastlmm.association.gwas import LeaveOneChromosomeOut, LocoGwas, FastGwas, load_intersect
from fastlmm.association.LeaveOneChromosomeOut import LeaveOneChromosomeOut
from fastlmm.association.PrecomputeLocoPcs import load_intersect
from fastlmm.association.LocoGwas import FastGwas, LocoGwas
from fastlmm.util import run_fastlmmc
from fastlmm.inference import LMM
import unittest
import os.path
import time
currentFolder = os.path.dirname(os.path.realpath(__file__))
class TestGwas(unittest.TestCase):
@classmethod
def setUpClass(self):
#self.snpreader_bed = Bed(currentFolder + "../feature_selection/examples/toydata")
#self.pheno_fn = currentFolder + "../feature_selection/examples/toydata.phe"
self.meh = True
def test_loco(self):
"""
test leave one chromosome out iterator
"""
names = ["a", "b", "a", "c", "b", "c", "b"]
loco = LeaveOneChromosomeOut(names)
expect = [[[1,3,4,5,6],[0,2]],
[[0,2,3,5],[1,4,6]],
[[0,1,2,4,6],[3,5]]]
for i, (train_idx, test_idx) in enumerate(loco):
assert (expect[i][0] == train_idx).all()
assert (expect[i][1] == test_idx).all()
#def xtest_results_identical_with_fastlmmcX(self):
# """
# make sure gwas yields same results as fastlmmC
# """
# os.chdir(r"d:\data\carlk\cachebio\genetics\wtccc\data")
# bed_fn = "filtered/wtcfb"
# pheno_fn = r'pheno\cad.txt'
# logging.info("Loading Bed")
# snp_reader = Bed(bed_fn)
# import fastlmm.pyplink.snpset.PositionRange as PositionRange
# snp_set = PositionRange(0,201)
# logging.info("Intersecting and standardizing")
# G, y, _, _ = load_intersect(snp_reader, pheno_fn, snp_set)
# snp_pos = snp_reader.rs
# idx_sim = range(0, 200)
# idx_test = range(200,201)
# #snp_pos_sim = snp_pos[idx_sim]
# #snp_pos_test = snp_pos[idx_test]
# G_chr1, G_chr2 = G[:,idx_sim], G[:,idx_test]
# delta = 4.0
# REML = False
# #gwas_c = GwasTest(bed_fn, pheno_fn, snp_pos_sim, snp_pos_test, delta, REML=REML)
# #gwas_c.run_gwas()
# logging.info("Creating GwasPrototype")
# gwas = GwasPrototype(G_chr1, G_chr2, y, delta, REML=REML)
# logging.info("running GwasPrototype")
# gwas.run_gwas()
# logging.info("finished GwasPrototype")
# #gwas_f = FastGwas(G_chr1, G_chr2, y, delta, findh2=False)
# #gwas_f.run_gwas()
# sorted_snps = snp_pos_test[gwas.p_idx]
## make sure we get p-values right
#np.testing.assert_array_almost_equal(gwas.p_values, gwas_c.p_values, decimal=3)
#np.testing.assert_array_almost_equal(gwas.p_values, gwas_f.p_values, decimal=3)
#np.testing.assert_array_almost_equal(gwas.sorted_p_values, gwas_c.sorted_p_values, decimal=3)
#np.testing.assert_array_almost_equal(gwas.sorted_p_values, gwas_f.sorted_p_values, decimal=3)
def test_results_identical_with_fastlmmc(self):
"""
make sure gwas yields same results as fastlmmC
"""
currentFolder = os.path.dirname(os.path.realpath(__file__))
#prefix = r"C:\Users\chwidmer\Documents\Projects\sandbox\data\test"
#bed_fn = prefix + "/jax_gt.up.filt.M"
#dat_fn = prefix + "/jax_M_expression.1-18.dat"
#pheno_fn = prefix + "/jax_M_expression.19.phe.txt"
bed_fn = os.path.join(currentFolder, "../../feature_selection/examples/toydata")
pheno_fn = os.path.join(currentFolder, "../../feature_selection/examples/toydata.phe")
#prefix = "../../../tests\datasets\mouse"
#bed_fn = os.path.join(prefix, "alldata")
#pheno_fn = os.path.join(prefix, "pheno.txt")
snp_reader = Bed(bed_fn)
G, y, _, _ = load_intersect(snp_reader, pheno_fn)
snp_pos = snp_reader.rs
idx_sim = range(0, 5000)
idx_test = range(5000, 10000)
snp_pos_sim = snp_pos[idx_sim]
snp_pos_test = snp_pos[idx_test]
G_chr1, G_chr2 = G[:,idx_sim], G[:,idx_test]
delta = 1.0
###################################
# REML IN lmm.py is BROKEN!!
# we compare REML=False in lmm.py to fastlmmc
REML = False
gwas_c_reml = GwasTest(bed_fn, pheno_fn, snp_pos_sim, snp_pos_test, delta, REML=REML)
gwas_c_reml.run_gwas()
gwas = GwasPrototype(G_chr1, G_chr2, y, delta, REML=False)
gwas.run_gwas()
# check p-values in log-space!
np.testing.assert_array_almost_equal(np.log(gwas.p_values), np.log(gwas_c_reml.p_values), decimal=3)
if False:
import pylab
pylab.plot(np.log(gwas_c_reml.p_values), np.log(gwas_f.p_values_F), "x")
pylab.plot(range(-66,0,1), range(-66,0,1))
pylab.show()
# we compare lmm_cov.py to fastlmmc with REML=False
gwas_c = GwasTest(bed_fn, pheno_fn, snp_pos_sim, snp_pos_test, delta, REML=True)
gwas_c.run_gwas()
gwas_f = FastGwas(G_chr1, G_chr2, y, delta, findh2=False)
gwas_f.run_gwas()
np.testing.assert_array_almost_equal(np.log(gwas_c.p_values), np.log(gwas_f.p_values_F), decimal=2)
# additional testing code for the new wrapper functions
# Fix delta
from pysnptools.snpreader import Bed as BedSnpReader
from fastlmm.association.single_snp import single_snp
snpreader = BedSnpReader(bed_fn,count_A1=False)
frame = single_snp(test_snps=snpreader[:,idx_test], pheno=pheno_fn, G0=snpreader[:,idx_sim],h2=1.0/(delta+1.0),leave_out_one_chrom=False,count_A1=False)
sid_list,pvalue_list = frame['SNP'].values,frame['PValue'].values
np.testing.assert_allclose(gwas_f.sorted_p_values_F, pvalue_list, rtol=1e-10)
p_vals_by_genomic_pos = frame.sort_values(["Chr", "ChrPos"])["PValue"].tolist()
np.testing.assert_allclose(gwas_c_reml.p_values, p_vals_by_genomic_pos, rtol=.1)
np.testing.assert_allclose(gwas_c_reml.p_values, gwas_f.p_values_F, rtol=.1)
np.testing.assert_allclose(gwas_f.sorted_p_values_F, gwas_c_reml.sorted_p_values, rtol=.1)
# Search over delta
gwas_c_reml_search = GwasTest(bed_fn, pheno_fn, snp_pos_sim, snp_pos_test, delta=None, REML=True)
gwas_c_reml_search.run_gwas()
frame_search = single_snp(test_snps=snpreader[:,idx_test], pheno=pheno_fn, G0=snpreader[:,idx_sim],h2=None,leave_out_one_chrom=False,count_A1=False)
_,pvalue_list_search = frame_search['SNP'].values,frame_search['PValue'].values
p_vals_by_genomic_pos = frame_search.sort_values(["Chr", "ChrPos"])["PValue"].tolist()
np.testing.assert_allclose(gwas_c_reml_search.p_values, p_vals_by_genomic_pos, rtol=.001)
np.testing.assert_allclose(gwas_c_reml_search.sorted_p_values, pvalue_list_search, rtol=.001)
class GwasPrototype(object):
"""
class to perform genome-wide scan
"""
def __init__(self, train_snps, test_snps, phen, delta=None, cov=None, REML=False, train_pcs=None, mixing=0.0):
"""
set up GWAS object
"""
self.REML = REML
self.train_snps = train_snps
self.test_snps = test_snps
self.phen = phen
if delta is None:
self.delta=None
else:
self.delta = delta * train_snps.shape[1]
self.n_test = test_snps.shape[1]
self.n_ind = len(self.phen)
self.train_pcs = train_pcs
self.mixing = mixing
# add bias if no covariates are used
if cov is None:
self.cov = np.ones((self.n_ind, 1))
else:
self.cov = cov
self.n_cov = self.cov.shape[1]
self.lmm = None
self.res_null = None
self.res_alt = []
self.ll_null = None
self.ll_alt = np.zeros(self.n_test)
self.p_values = np.zeros(self.n_test)
self.sorted_p_values = np.zeros(self.n_test)
# merge covariates and test snps
self.X = np.hstack((self.cov, self.test_snps))
def precompute_UX(self, X):
'''
precompute UX for all snps to be tested
--------------------------------------------------------------------------
Input:
X : [N*D] 2-dimensional array of covariates
--------------------------------------------------------------------------
'''
logging.info("precomputing UX")
self.UX = self.lmm.U.T.dot(X)
self.k = self.lmm.S.shape[0]
self.N = self.lmm.X.shape[0]
if (self.k<self.N):
self.UUX = X - self.lmm.U.dot(self.UX)
logging.info("done.")
def train_null(self):
"""
train model under null hypothesis
"""
logging.info("training null model")
# use LMM
self.lmm = LMM()
self.lmm.setG(self.train_snps, self.train_pcs, a2=self.mixing)
self.lmm.setX(self.cov)
self.lmm.sety(self.phen)
logging.info("finding delta")
if self.delta is None:
result = self.lmm.findH2(REML=self.REML, minH2=0.00001 )
self.delta = 1.0/result['h2']-1.0
# UX = lmm_null.U.dot(test_snps)
self.res_null = self.lmm.nLLeval(delta=self.delta, REML=self.REML)
self.ll_null = -self.res_null["nLL"]
def set_current_UX(self, idx):
"""
set the current UX to pre-trained LMM
"""
si = idx + self.n_cov
self.lmm.X = np.hstack((self.X[:,0:self.n_cov], self.X[:,si:si+1]))
self.lmm.UX = np.hstack((self.UX[:,0:self.n_cov], self.UX[:,si:si+1]))
if (self.k<self.N):
self.lmm.UUX = np.hstack((self.UUX[:,0:self.n_cov], self.UUX[:,si:si+1]))
def train_alt(self):
"""
train alternative model
"""
assert self.lmm != None
self.precompute_UX(self.X)
for idx in xrange(self.n_test):
self.set_current_UX(idx)
res = self.lmm.nLLeval(delta=self.delta, REML=self.REML)
self.res_alt.append(res)
self.ll_alt[idx] = -res["nLL"]
if idx % 1000 == 0:
logging.info("processing snp {0}".format(idx))
def compute_p_values(self):
"""
given trained null and alt models, compute p-values
"""
# from C++ (?)
#real df = rank_beta[ snp ] - ((real)1.0 * rank_beta_0[ snp ]) ;
#pvals[ snp ] = PvalFromLikelihoodRatioTest( LL[ snp ] - LL_0[ snp ], ((real)0.5 * df) );
degrees_of_freedom = 1
assert len(self.res_alt) == self.n_test
for idx in xrange(self.n_test):
test_statistic = self.ll_alt[idx] - self.ll_null
self.p_values[idx] = stats.chi2.sf(2.0 * test_statistic, degrees_of_freedom)
self.p_idx = np.argsort(self.p_values)
self.sorted_p_values = self.p_values[self.p_idx]
def plot_result(self):
"""
plot results
"""
import pylab
pylab.semilogy(self.p_values)
pylab.show()
dummy = [self.res_alt[idx]["nLL"] for idx in xrange(self.n_test)]
pylab.hist(dummy, bins=100)
pylab.title("neg likelihood")
pylab.show()
pylab.hist(self.p_values, bins=100)
pylab.title("p-values")
pylab.show()
def run_gwas(self):
"""
invoke all steps in the right order
"""
self.train_null()
self.train_alt()
self.compute_p_values()
#self.plot_result()
class GwasTest(object):
"""
genome-wide scan using FastLmmC
--------------------------------------------------------------------------
Input:
bfile basename for PLINK's binary .bed,.fam and .bin files
pheno name of phenotype file
bfileSim basename for PLINK's binary files for building genetic similarity
sim specifies that genetic similarities are to be read directly from this file
linreg specifies that linear regression will be performed. when this option is used, no genetic similarities should be specified (boolean)
covar optional file containing the covariates
out the name of the output file
optLogdelta if set, delta is not optimized
extract FaSTLMM will only analyze the SNPs explicitly listed in the filename
extractSim FastLMM will only use the SNPs explicitly listed for computing genetic similarity
autoSelect determines the SNPs to be included in the similarity matrix. When this option is used, GWAS is not run. SNPs are written to filename.snps.txt
and statistics to filename.xval.txt
autoSelectCriterionMSE directs AutoSelect to use out-of-sample mean-squared error for the selection criterion. Otherwise out-of-sample log likelihood is used
excludeByPosition excludes the SNP tested and those within this distance from the genetic similarity matrix
excludeByGeneticDistance excludes the SNP tested and those within this distance from the genetic similarity matrix
eigen load the spectral decomposition object from the directory name
eigenOut save the spectral decomposition object to the directory name
maxThreads suggests the level of parallelism to use
simOut specifies that genetic similarities are to be written to this file
topKbyLinRegdirects AutoSelect to use only the top <int> SNPs, as determined by linear regression, while selecting SNPs
for more information, we refer to the user-manual of fast-lmm
# run linear mixed model
run_fastlmmc.run(bfile=self.bedFile,pheno=self.phenoFile,bfileSim=self.bedFileSim,linreg=self.linreg,
covar=self.covarFile,optLogdelta=self.logdelta,extractSim=self.extractSim,
excludeByPosition=excludeByPosition,excludeByGeneticDistance=excludeByGeneticDistance,
fastlmm_path=self.fastlmmPath,out=self.outFile,extract=self.extract,numJobs=self.numJobs,
thisJob=self.thisJob)
"""
def __init__(self, bed_fn, pheno_fn, snp_idx_sim, snp_idx_test, delta, REML=False, excludeByPosition=None):
"make a call to fastlmm c"
self.extract = "tmp_extract.txt"
self.extractSim = "tmp_extract_sim.txt"
self.write_snp_file(self.extract, snp_idx_test)
self.write_snp_file(self.extractSim, snp_idx_sim)
self.bedFile = bed_fn
self.bedFileSim = bed_fn
self.phenoFile = pheno_fn
self.optLogdelta = np.log(delta) if delta is not None else None
self.REML =REML
currentFolder = os.path.dirname(os.path.realpath(__file__))
self.fastlmm_path = os.path.join(currentFolder,"../Fastlmm_autoselect")
self.out_file = "out.txt"
self.sorted_p_values = None
self.excludeByPosition = excludeByPosition
def write_snp_file(self, out_fn, snp_ids):
"""
write out snps to flat file
"""
with open(out_fn, "w") as f:
for sid in snp_ids:
f.write(str(sid) + "\n")
def run_gwas(self):
"""
"""
# run linear mixed model
run_fastlmmc.run(bfile=self.bedFile, pheno=self.phenoFile, bfileSim=self.bedFileSim,
optLogdelta=self.optLogdelta, extractSim=self.extractSim,
fastlmm_path=self.fastlmm_path, out=self.out_file, extract=self.extract,
REML=self.REML, excludeByPosition=self.excludeByPosition)
self.read_results()
def read_results(self):
"""
read results file
"""
import pandas as pd
table = pd.read_table(self.out_file)
self.sorted_p_values = table["Pvalue"].tolist()
self.sorted_snps = table["SNP"].tolist()
self.p_values = table.sort_values(["Chromosome", "Position"])["Pvalue"].tolist()
def getTestSuite():
"""
set up composite test suite
"""
suite1 = unittest.TestLoader().loadTestsFromTestCase(TestGwas)
return unittest.TestSuite([suite1])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
tkaitchuck/nupic | external/darwin64/lib/python2.6/site-packages/matplotlib/projections/geo.py | 69 | 19738 | import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.artist import kwdocd
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return u"%0.0f\u00b0" % degrees
RESOLUTION = 75
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self):
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self):
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, long, lat):
'return a format string formatting the coordinate'
long = long * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if long >= 0.0:
ew = 'E'
else:
ew = 'W'
return u'%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(long), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array, or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * np.sin(half_long)) / sinc_alpha
y = (np.sin(latitude) / sinc_alpha)
x.set_fill_value(0.0)
y.set_fill_value(0.0)
return np.concatenate((x.filled(), y.filled()), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
aux = 2.0 * np.arcsin((2.0 * latitude) / np.pi)
x = (2.0 * np.sqrt(2.0) * longitude * np.cos(aux)) / np.pi
y = (np.sqrt(2.0) * np.sin(aux))
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
long = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((long, lat), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
| gpl-3.0 |
ofrei/ldsc | munge_sumstats.py | 1 | 29935 | #!/usr/bin/env python
from __future__ import division
import pandas as pd
import numpy as np
import os
import sys
import traceback
import gzip
import bz2
import argparse
from scipy.stats import chi2
from ldscore import sumstats
from ldsc import MASTHEAD, Logger, sec_to_str
import time
np.seterr(invalid='ignore')
try:
x = pd.DataFrame({'A': [1, 2, 3]})
x.sort_values(by='A')
except AttributeError:
raise ImportError('LDSC requires pandas version >= 0.17.0')
null_values = {
'LOG_ODDS': 0,
'BETA': 0,
'OR': 1,
'Z': 0
}
default_cnames = {
# RS NUMBER
'SNP': 'SNP',
'MARKERNAME': 'SNP',
'SNPID': 'SNP',
'RS': 'SNP',
'RSID': 'SNP',
'RS_NUMBER': 'SNP',
'RS_NUMBERS': 'SNP',
# NUMBER OF STUDIES
'NSTUDY': 'NSTUDY',
'N_STUDY': 'NSTUDY',
'NSTUDIES': 'NSTUDY',
'N_STUDIES': 'NSTUDY',
# P-VALUE
'P': 'P',
'PVALUE': 'P',
'P_VALUE': 'P',
'PVAL': 'P',
'P_VAL': 'P',
'GC_PVALUE': 'P',
# ALLELE 1
'A1': 'A1',
'ALLELE1': 'A1',
'ALLELE_1': 'A1',
'EFFECT_ALLELE': 'A1',
'REFERENCE_ALLELE': 'A1',
'INC_ALLELE': 'A1',
'EA': 'A1',
# ALLELE 2
'A2': 'A2',
'ALLELE2': 'A2',
'ALLELE_2': 'A2',
'OTHER_ALLELE': 'A2',
'NON_EFFECT_ALLELE': 'A2',
'DEC_ALLELE': 'A2',
'NEA': 'A2',
# N
'N': 'N',
'NCASE': 'N_CAS',
'CASES_N': 'N_CAS',
'N_CASE': 'N_CAS',
'N_CASES': 'N_CAS',
'N_CONTROLS': 'N_CON',
'N_CAS': 'N_CAS',
'N_CON': 'N_CON',
'N_CASE': 'N_CAS',
'NCONTROL': 'N_CON',
'CONTROLS_N': 'N_CON',
'N_CONTROL': 'N_CON',
'WEIGHT': 'N', # metal does this. possibly risky.
# SIGNED STATISTICS
'ZSCORE': 'Z',
'Z-SCORE': 'Z',
'GC_ZSCORE': 'Z',
'Z': 'Z',
'OR': 'OR',
'B': 'BETA',
'BETA': 'BETA',
'LOG_ODDS': 'LOG_ODDS',
'EFFECTS': 'BETA',
'EFFECT': 'BETA',
'SIGNED_SUMSTAT': 'SIGNED_SUMSTAT',
# INFO
'INFO': 'INFO',
# MAF
'EAF': 'FRQ',
'FRQ': 'FRQ',
'MAF': 'FRQ',
'FRQ_U': 'FRQ',
'F_U': 'FRQ',
}
describe_cname = {
'SNP': 'Variant ID (e.g., rs number)',
'P': 'p-Value',
'A1': 'Allele 1, interpreted as ref allele for signed sumstat.',
'A2': 'Allele 2, interpreted as non-ref allele for signed sumstat.',
'N': 'Sample size',
'N_CAS': 'Number of cases',
'N_CON': 'Number of controls',
'Z': 'Z-score (0 --> no effect; above 0 --> A1 is trait/risk increasing)',
'OR': 'Odds ratio (1 --> no effect; above 1 --> A1 is risk increasing)',
'BETA': '[linear/logistic] regression coefficient (0 --> no effect; above 0 --> A1 is trait/risk increasing)',
'LOG_ODDS': 'Log odds ratio (0 --> no effect; above 0 --> A1 is risk increasing)',
'INFO': 'INFO score (imputation quality; higher --> better imputation)',
'FRQ': 'Allele frequency',
'SIGNED_SUMSTAT': 'Directional summary statistic as specified by --signed-sumstats.',
'NSTUDY': 'Number of studies in which the SNP was genotyped.'
}
def read_header(fh):
'''Read the first line of a file and returns a list with the column names.'''
(openfunc, compression) = get_compression(fh)
return [x.rstrip('\n') for x in openfunc(fh).readline().split()]
def get_cname_map(flag, default, ignore):
'''
Figure out which column names to use.
Priority is
(1) ignore everything in ignore
(2) use everything in flags that is not in ignore
(3) use everything in default that is not in ignore or in flags
The keys of flag are cleaned. The entries of ignore are not cleaned. The keys of defualt
are cleaned. But all equality is modulo clean_header().
'''
clean_ignore = [clean_header(x) for x in ignore]
cname_map = {x: flag[x] for x in flag if x not in clean_ignore}
cname_map.update(
{x: default[x] for x in default if x not in clean_ignore + flag.keys()})
return cname_map
def get_compression(fh):
'''
Read filename suffixes and figure out whether it is gzipped,bzip2'ed or not compressed
'''
if fh.endswith('gz'):
compression = 'gzip'
openfunc = gzip.open
elif fh.endswith('bz2'):
compression = 'bz2'
openfunc = bz2.BZ2File
else:
openfunc = open
compression = None
return openfunc, compression
def clean_header(header):
'''
For cleaning file headers.
- convert to uppercase
- replace dashes '-' with underscores '_'
- replace dots '.' (as in R) with underscores '_'
- remove newlines ('\n')
'''
return header.upper().replace('-', '_').replace('.', '_').replace('\n', '')
def filter_pvals(P, log, args):
'''Remove out-of-bounds P-values'''
ii = (P > 0) & (P <= 1)
bad_p = (~ii).sum()
if bad_p > 0:
msg = 'WARNING: {N} SNPs had P outside of (0,1]. The P column may be mislabeled.'
log.log(msg.format(N=bad_p))
return ii
def filter_info(info, log, args):
'''Remove INFO < args.info_min (default 0.9) and complain about out-of-bounds INFO.'''
if type(info) is pd.Series: # one INFO column
jj = ((info > 2.0) | (info < 0)) & info.notnull()
ii = info >= args.info_min
elif type(info) is pd.DataFrame: # several INFO columns
jj = (((info > 2.0) & info.notnull()).any(axis=1) | (
(info < 0) & info.notnull()).any(axis=1))
ii = (info.sum(axis=1) >= args.info_min * (len(info.columns)))
else:
raise ValueError('Expected pd.DataFrame or pd.Series.')
bad_info = jj.sum()
if bad_info > 0:
msg = 'WARNING: {N} SNPs had INFO outside of [0,1.5]. The INFO column may be mislabeled.'
log.log(msg.format(N=bad_info))
return ii
def filter_frq(frq, log, args):
'''
Filter on MAF. Remove MAF < args.maf_min and out-of-bounds MAF.
'''
jj = (frq < 0) | (frq > 1)
bad_frq = jj.sum()
if bad_frq > 0:
msg = 'WARNING: {N} SNPs had FRQ outside of [0,1]. The FRQ column may be mislabeled.'
log.log(msg.format(N=bad_frq))
frq = np.minimum(frq, 1 - frq)
ii = frq > args.maf_min
return ii & ~jj
def filter_alleles(a):
'''Remove alleles that do not describe strand-unambiguous SNPs'''
return a.isin(sumstats.VALID_SNPS)
def parse_dat(dat_gen, convert_colname, merge_alleles, log, args):
'''Parse and filter a sumstats file chunk-wise'''
tot_snps = 0
dat_list = []
msg = 'Reading sumstats from {F} into memory {N} SNPs at a time.'
log.log(msg.format(F=args.sumstats, N=int(args.chunksize)))
drops = {'NA': 0, 'P': 0, 'INFO': 0,
'FRQ': 0, 'A': 0, 'SNP': 0, 'MERGE': 0}
for block_num, dat in enumerate(dat_gen):
sys.stdout.write('.')
tot_snps += len(dat)
old = len(dat)
dat = dat.dropna(axis=0, how="any", subset=filter(
lambda x: x != 'INFO', dat.columns)).reset_index(drop=True)
drops['NA'] += old - len(dat)
dat.columns = map(lambda x: convert_colname[x], dat.columns)
ii = np.array([True for i in xrange(len(dat))])
if args.merge_alleles:
old = ii.sum()
ii = dat.SNP.isin(merge_alleles.SNP)
drops['MERGE'] += old - ii.sum()
if ii.sum() == 0:
continue
dat = dat[ii].reset_index(drop=True)
ii = np.array([True for i in xrange(len(dat))])
if 'INFO' in dat.columns:
old = ii.sum()
ii &= filter_info(dat['INFO'], log, args)
new = ii.sum()
drops['INFO'] += old - new
old = new
if 'FRQ' in dat.columns:
old = ii.sum()
ii &= filter_frq(dat['FRQ'], log, args)
new = ii.sum()
drops['FRQ'] += old - new
old = new
old = ii.sum()
if args.keep_maf:
dat.drop(
[x for x in ['INFO'] if x in dat.columns], inplace=True, axis=1)
else:
dat.drop(
[x for x in ['INFO', 'FRQ'] if x in dat.columns], inplace=True, axis=1)
ii &= filter_pvals(dat.P, log, args)
new = ii.sum()
drops['P'] += old - new
old = new
if not args.no_alleles:
dat.A1 = dat.A1.str.upper()
dat.A2 = dat.A2.str.upper()
ii &= filter_alleles(dat.A1 + dat.A2)
new = ii.sum()
drops['A'] += old - new
old = new
if ii.sum() == 0:
continue
dat_list.append(dat[ii].reset_index(drop=True))
sys.stdout.write(' done\n')
dat = pd.concat(dat_list, axis=0).reset_index(drop=True)
msg = 'Read {N} SNPs from --sumstats file.\n'.format(N=tot_snps)
if args.merge_alleles:
msg += 'Removed {N} SNPs not in --merge-alleles.\n'.format(
N=drops['MERGE'])
msg += 'Removed {N} SNPs with missing values.\n'.format(N=drops['NA'])
msg += 'Removed {N} SNPs with INFO <= {I}.\n'.format(
N=drops['INFO'], I=args.info_min)
msg += 'Removed {N} SNPs with MAF <= {M}.\n'.format(
N=drops['FRQ'], M=args.maf_min)
msg += 'Removed {N} SNPs with out-of-bounds p-values.\n'.format(
N=drops['P'])
msg += 'Removed {N} variants that were not SNPs or were strand-ambiguous.\n'.format(
N=drops['A'])
msg += '{N} SNPs remain.'.format(N=len(dat))
log.log(msg)
return dat
def process_n(dat, args, log):
'''Determine sample size from --N* flags or N* columns. Filter out low N SNPs.s'''
if all(i in dat.columns for i in ['N_CAS', 'N_CON']):
N = dat.N_CAS + dat.N_CON
P = dat.N_CAS / N
dat['N'] = N * P / P[N == N.max()].mean()
dat.drop(['N_CAS', 'N_CON'], inplace=True, axis=1)
# NB no filtering on N done here -- that is done in the next code block
if 'N' in dat.columns:
n_min = args.n_min if args.n_min else dat.N.quantile(0.9) / 1.5
old = len(dat)
dat = dat[dat.N >= n_min].reset_index(drop=True)
new = len(dat)
log.log('Removed {M} SNPs with N < {MIN} ({N} SNPs remain).'.format(
M=old - new, N=new, MIN=n_min))
elif 'NSTUDY' in dat.columns and 'N' not in dat.columns:
nstudy_min = args.nstudy_min if args.nstudy_min else dat.NSTUDY.max()
old = len(dat)
dat = dat[dat.NSTUDY >= nstudy_min].drop(
['NSTUDY'], axis=1).reset_index(drop=True)
new = len(dat)
log.log('Removed {M} SNPs with NSTUDY < {MIN} ({N} SNPs remain).'.format(
M=old - new, N=new, MIN=nstudy_min))
if 'N' not in dat.columns:
if args.N:
dat['N'] = args.N
log.log('Using N = {N}'.format(N=args.N))
elif args.N_cas and args.N_con:
dat['N'] = args.N_cas + args.N_con
if args.daner is None:
msg = 'Using N_cas = {N1}; N_con = {N2}'
log.log(msg.format(N1=args.N_cas, N2=args.N_con))
else:
raise ValueError('Cannot determine N. This message indicates a bug.\n'
'N should have been checked earlier in the program.')
return dat
def p_to_z(P, N):
'''Convert P-value and N to standardized beta.'''
return np.sqrt(chi2.isf(P, 1))
def check_median(x, expected_median, tolerance, name):
'''Check that median(x) is within tolerance of expected_median.'''
m = np.median(x)
if np.abs(m - expected_median) > tolerance:
msg = 'WARNING: median value of {F} is {V} (should be close to {M}). This column may be mislabeled.'
raise ValueError(msg.format(F=name, M=expected_median, V=round(m, 2)))
else:
msg = 'Median value of {F} was {C}, which seems sensible.'.format(
C=m, F=name)
return msg
def parse_flag_cnames(log, args):
'''
Parse flags that specify how to interpret nonstandard column names.
flag_cnames is a dict that maps (cleaned) arguments to internal column names
'''
cname_options = [
[args.nstudy, 'NSTUDY', '--nstudy'],
[args.snp, 'SNP', '--snp'],
[args.N_col, 'N', '--N'],
[args.N_cas_col, 'N_CAS', '--N-cas-col'],
[args.N_con_col, 'N_CON', '--N-con-col'],
[args.a1, 'A1', '--a1'],
[args.a2, 'A2', '--a2'],
[args.p, 'P', '--P'],
[args.frq, 'FRQ', '--nstudy'],
[args.info, 'INFO', '--info']
]
flag_cnames = {clean_header(x[0]): x[1]
for x in cname_options if x[0] is not None}
if args.info_list:
try:
flag_cnames.update(
{clean_header(x): 'INFO' for x in args.info_list.split(',')})
except ValueError:
log.log(
'The argument to --info-list should be a comma-separated list of column names.')
raise
null_value = None
if args.signed_sumstats:
try:
cname, null_value = args.signed_sumstats.split(',')
null_value = float(null_value)
flag_cnames[clean_header(cname)] = 'SIGNED_SUMSTAT'
except ValueError:
log.log(
'The argument to --signed-sumstats should be column header comma number.')
raise
return [flag_cnames, null_value]
def allele_merge(dat, alleles, log):
'''
WARNING: dat now contains a bunch of NA's~
Note: dat now has the same SNPs in the same order as --merge alleles.
'''
dat = pd.merge(
alleles, dat, how='left', on='SNP', sort=False).reset_index(drop=True)
ii = dat.A1.notnull()
a1234 = dat.A1[ii] + dat.A2[ii] + dat.MA[ii]
match = a1234.apply(lambda y: y in sumstats.MATCH_ALLELES)
jj = pd.Series(np.zeros(len(dat), dtype=bool))
jj[ii] = match
old = ii.sum()
n_mismatch = (~match).sum()
if n_mismatch < old:
log.log('Removed {M} SNPs whose alleles did not match --merge-alleles ({N} SNPs remain).'.format(M=n_mismatch,
N=old - n_mismatch))
else:
raise ValueError(
'All SNPs have alleles that do not match --merge-alleles.')
dat.loc[~jj, [i for i in dat.columns if i != 'SNP']] = float('nan')
dat.drop(['MA'], axis=1, inplace=True)
return dat
parser = argparse.ArgumentParser()
parser.add_argument('--sumstats', default=None, type=str,
help="Input filename.")
parser.add_argument('--N', default=None, type=float,
help="Sample size If this option is not set, will try to infer the sample "
"size from the input file. If the input file contains a sample size "
"column, and this flag is set, the argument to this flag has priority.")
parser.add_argument('--N-cas', default=None, type=float,
help="Number of cases. If this option is not set, will try to infer the number "
"of cases from the input file. If the input file contains a number of cases "
"column, and this flag is set, the argument to this flag has priority.")
parser.add_argument('--N-con', default=None, type=float,
help="Number of controls. If this option is not set, will try to infer the number "
"of controls from the input file. If the input file contains a number of controls "
"column, and this flag is set, the argument to this flag has priority.")
parser.add_argument('--out', default=None, type=str,
help="Output filename prefix.")
parser.add_argument('--info-min', default=0.9, type=float,
help="Minimum INFO score.")
parser.add_argument('--maf-min', default=0.01, type=float,
help="Minimum MAF.")
parser.add_argument('--daner', default=False, action='store_true',
help="Use this flag to parse Stephan Ripke's daner* file format.")
parser.add_argument('--daner-n', default=False, action='store_true',
help="Use this flag to parse more recent daner* formatted files, which "
"include sample size column 'Nca' and 'Nco'.")
parser.add_argument('--no-alleles', default=False, action="store_true",
help="Don't require alleles. Useful if only unsigned summary statistics are available "
"and the goal is h2 / partitioned h2 estimation rather than rg estimation.")
parser.add_argument('--merge-alleles', default=None, type=str,
help="Same as --merge, except the file should have three columns: SNP, A1, A2, "
"and all alleles will be matched to the --merge-alleles file alleles.")
parser.add_argument('--n-min', default=None, type=float,
help='Minimum N (sample size). Default is (90th percentile N) / 2.')
parser.add_argument('--chunksize', default=5e6, type=int,
help='Chunksize.')
# optional args to specify column names
parser.add_argument('--snp', default=None, type=str,
help='Name of SNP column (if not a name that ldsc understands). NB: case insensitive.')
parser.add_argument('--N-col', default=None, type=str,
help='Name of N column (if not a name that ldsc understands). NB: case insensitive.')
parser.add_argument('--N-cas-col', default=None, type=str,
help='Name of N column (if not a name that ldsc understands). NB: case insensitive.')
parser.add_argument('--N-con-col', default=None, type=str,
help='Name of N column (if not a name that ldsc understands). NB: case insensitive.')
parser.add_argument('--a1', default=None, type=str,
help='Name of A1 column (if not a name that ldsc understands). NB: case insensitive.')
parser.add_argument('--a2', default=None, type=str,
help='Name of A2 column (if not a name that ldsc understands). NB: case insensitive.')
parser.add_argument('--p', default=None, type=str,
help='Name of p-value column (if not a name that ldsc understands). NB: case insensitive.')
parser.add_argument('--frq', default=None, type=str,
help='Name of FRQ or MAF column (if not a name that ldsc understands). NB: case insensitive.')
parser.add_argument('--signed-sumstats', default=None, type=str,
help='Name of signed sumstat column, comma null value (e.g., Z,0 or OR,1). NB: case insensitive.')
parser.add_argument('--info', default=None, type=str,
help='Name of INFO column (if not a name that ldsc understands). NB: case insensitive.')
parser.add_argument('--info-list', default=None, type=str,
help='Comma-separated list of INFO columns. Will filter on the mean. NB: case insensitive.')
parser.add_argument('--nstudy', default=None, type=str,
help='Name of NSTUDY column (if not a name that ldsc understands). NB: case insensitive.')
parser.add_argument('--nstudy-min', default=None, type=float,
help='Minimum # of studies. Default is to remove everything below the max, unless there is an N column,'
' in which case do nothing.')
parser.add_argument('--ignore', default=None, type=str,
help='Comma-separated list of column names to ignore.')
parser.add_argument('--a1-inc', default=False, action='store_true',
help='A1 is the increasing allele.')
parser.add_argument('--keep-maf', default=False, action='store_true',
help='Keep the MAF column (if one exists).')
# set p = False for testing in order to prevent printing
def munge_sumstats(args, p=True):
if args.out is None:
raise ValueError('The --out flag is required.')
START_TIME = time.time()
log = Logger(args.out + '.log')
try:
if args.sumstats is None:
raise ValueError('The --sumstats flag is required.')
if args.no_alleles and args.merge_alleles:
raise ValueError(
'--no-alleles and --merge-alleles are not compatible.')
if args.daner and args.daner_n:
raise ValueError('--daner and --daner-n are not compatible. Use --daner for sample ' +
'size from FRQ_A/FRQ_U headers, use --daner-n for values from Nca/Nco columns')
if p:
defaults = vars(parser.parse_args(''))
opts = vars(args)
non_defaults = [x for x in opts.keys() if opts[x] != defaults[x]]
header = MASTHEAD
header += "Call: \n"
header += './munge_sumstats.py \\\n'
options = ['--'+x.replace('_','-')+' '+str(opts[x])+' \\' for x in non_defaults]
header += '\n'.join(options).replace('True','').replace('False','')
header = header[0:-1]+'\n'
log.log(header)
file_cnames = read_header(args.sumstats) # note keys not cleaned
flag_cnames, signed_sumstat_null = parse_flag_cnames(log, args)
if args.ignore:
ignore_cnames = [clean_header(x) for x in args.ignore.split(',')]
else:
ignore_cnames = []
# remove LOG_ODDS, BETA, Z, OR from the default list
if args.signed_sumstats is not None or args.a1_inc:
mod_default_cnames = {x: default_cnames[
x] for x in default_cnames if default_cnames[x] not in null_values}
else:
mod_default_cnames = default_cnames
cname_map = get_cname_map(
flag_cnames, mod_default_cnames, ignore_cnames)
if args.daner:
frq_u = filter(lambda x: x.startswith('FRQ_U_'), file_cnames)[0]
frq_a = filter(lambda x: x.startswith('FRQ_A_'), file_cnames)[0]
N_cas = float(frq_a[6:])
N_con = float(frq_u[6:])
log.log(
'Inferred that N_cas = {N1}, N_con = {N2} from the FRQ_[A/U] columns.'.format(N1=N_cas, N2=N_con))
args.N_cas = N_cas
args.N_con = N_con
# drop any N, N_cas, N_con or FRQ columns
for c in ['N', 'N_CAS', 'N_CON', 'FRQ']:
for d in [x for x in cname_map if cname_map[x] == 'c']:
del cname_map[d]
cname_map[frq_u] = 'FRQ'
if args.daner_n:
frq_u = filter(lambda x: x.startswith('FRQ_U_'), file_cnames)[0]
cname_map[frq_u] = 'FRQ'
try:
dan_cas = clean_header(file_cnames[file_cnames.index('Nca')])
except ValueError:
raise ValueError('Could not find Nca column expected for daner-n format')
try:
dan_con = clean_header(file_cnames[file_cnames.index('Nco')])
except ValueError:
raise ValueError('Could not find Nco column expected for daner-n format')
cname_map[dan_cas] = 'N_CAS'
cname_map[dan_con] = 'N_CON'
cname_translation = {x: cname_map[clean_header(x)] for x in file_cnames if
clean_header(x) in cname_map} # note keys not cleaned
cname_description = {
x: describe_cname[cname_translation[x]] for x in cname_translation}
if args.signed_sumstats is None and not args.a1_inc:
sign_cnames = [
x for x in cname_translation if cname_translation[x] in null_values]
if len(sign_cnames) > 1:
raise ValueError(
'Too many signed sumstat columns. Specify which to ignore with the --ignore flag.')
if len(sign_cnames) == 0:
raise ValueError(
'Could not find a signed summary statistic column.')
sign_cname = sign_cnames[0]
signed_sumstat_null = null_values[cname_translation[sign_cname]]
cname_translation[sign_cname] = 'SIGNED_SUMSTAT'
else:
sign_cname = 'SIGNED_SUMSTATS'
# check that we have all the columns we need
if not args.a1_inc:
req_cols = ['SNP', 'P', 'SIGNED_SUMSTAT']
else:
req_cols = ['SNP', 'P']
for c in req_cols:
if c not in cname_translation.values():
raise ValueError('Could not find {C} column.'.format(C=c))
# check aren't any duplicated column names in mapping
for field in cname_translation:
numk = file_cnames.count(field)
if numk > 1:
raise ValueError('Found {num} columns named {C}'.format(C=field,num=str(numk)))
# check multiple different column names don't map to same data field
for head in cname_translation.values():
numc = cname_translation.values().count(head)
if numc > 1:
raise ValueError('Found {num} different {C} columns'.format(C=head,num=str(numc)))
if (not args.N) and (not (args.N_cas and args.N_con)) and ('N' not in cname_translation.values()) and\
(any(x not in cname_translation.values() for x in ['N_CAS', 'N_CON'])):
raise ValueError('Could not determine N.')
if ('N' in cname_translation.values() or all(x in cname_translation.values() for x in ['N_CAS', 'N_CON']))\
and 'NSTUDY' in cname_translation.values():
nstudy = [
x for x in cname_translation if cname_translation[x] == 'NSTUDY']
for x in nstudy:
del cname_translation[x]
if not args.no_alleles and not all(x in cname_translation.values() for x in ['A1', 'A2']):
raise ValueError('Could not find A1/A2 columns.')
log.log('Interpreting column names as follows:')
log.log('\n'.join([x + ':\t' + cname_description[x]
for x in cname_description]) + '\n')
if args.merge_alleles:
log.log(
'Reading list of SNPs for allele merge from {F}'.format(F=args.merge_alleles))
(openfunc, compression) = get_compression(args.merge_alleles)
merge_alleles = pd.read_csv(args.merge_alleles, compression=compression, header=0,
delim_whitespace=True, na_values='.')
if any(x not in merge_alleles.columns for x in ["SNP", "A1", "A2"]):
raise ValueError(
'--merge-alleles must have columns SNP, A1, A2.')
log.log(
'Read {N} SNPs for allele merge.'.format(N=len(merge_alleles)))
merge_alleles['MA'] = (
merge_alleles.A1 + merge_alleles.A2).apply(lambda y: y.upper())
merge_alleles.drop(
[x for x in merge_alleles.columns if x not in ['SNP', 'MA']], axis=1, inplace=True)
else:
merge_alleles = None
(openfunc, compression) = get_compression(args.sumstats)
# figure out which columns are going to involve sign information, so we can ensure
# they're read as floats
signed_sumstat_cols = [k for k,v in cname_translation.items() if v=='SIGNED_SUMSTAT']
dat_gen = pd.read_csv(args.sumstats, delim_whitespace=True, header=0,
compression=compression, usecols=cname_translation.keys(),
na_values=['.', 'NA'], iterator=True, chunksize=args.chunksize,
dtype={c:np.float64 for c in signed_sumstat_cols})
dat = parse_dat(dat_gen, cname_translation, merge_alleles, log, args)
if len(dat) == 0:
raise ValueError('After applying filters, no SNPs remain.')
old = len(dat)
dat = dat.drop_duplicates(subset='SNP').reset_index(drop=True)
new = len(dat)
log.log('Removed {M} SNPs with duplicated rs numbers ({N} SNPs remain).'.format(
M=old - new, N=new))
# filtering on N cannot be done chunkwise
dat = process_n(dat, args, log)
dat.P = p_to_z(dat.P, dat.N)
dat.rename(columns={'P': 'Z'}, inplace=True)
if not args.a1_inc:
log.log(
check_median(dat.SIGNED_SUMSTAT, signed_sumstat_null, 0.1, sign_cname))
dat.Z *= (-1) ** (dat.SIGNED_SUMSTAT < signed_sumstat_null)
dat.drop('SIGNED_SUMSTAT', inplace=True, axis=1)
# do this last so we don't have to worry about NA values in the rest of
# the program
if args.merge_alleles:
dat = allele_merge(dat, merge_alleles, log)
out_fname = args.out + '.sumstats'
print_colnames = [
c for c in dat.columns if c in ['SNP', 'N', 'Z', 'A1', 'A2']]
if args.keep_maf and 'FRQ' in dat.columns:
print_colnames.append('FRQ')
msg = 'Writing summary statistics for {M} SNPs ({N} with nonmissing beta) to {F}.'
log.log(
msg.format(M=len(dat), F=out_fname + '.gz', N=dat.N.notnull().sum()))
if p:
dat.to_csv(out_fname, sep="\t", index=False,
columns=print_colnames, float_format='%.3f')
os.system('gzip -f {F}'.format(F=out_fname))
log.log('\nMetadata:')
CHISQ = (dat.Z ** 2)
mean_chisq = CHISQ.mean()
log.log('Mean chi^2 = ' + str(round(mean_chisq, 3)))
if mean_chisq < 1.02:
log.log("WARNING: mean chi^2 may be too small.")
log.log('Lambda GC = ' + str(round(CHISQ.median() / 0.4549, 3)))
log.log('Max chi^2 = ' + str(round(CHISQ.max(), 3)))
log.log('{N} Genome-wide significant SNPs (some may have been removed by filtering).'.format(N=(CHISQ
> 29).sum()))
return dat
except Exception:
log.log('\nERROR converting summary statistics:\n')
ex_type, ex, tb = sys.exc_info()
log.log(traceback.format_exc(ex))
raise
finally:
log.log('\nConversion finished at {T}'.format(T=time.ctime()))
log.log('Total time elapsed: {T}'.format(
T=sec_to_str(round(time.time() - START_TIME, 2))))
if __name__ == '__main__':
munge_sumstats(parser.parse_args(), p=True)
| gpl-3.0 |
liberatorqjw/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
impactlab/oeem-python-api-client | oeem_uploader/tests/test_upload.py | 2 | 2103 | from unittest import TestCase
from oeem_uploader.request import Request
from oeem_uploader.upload import upload_record
import pandas as pd
import os
class TestUpload(TestCase):
@classmethod
def setUp(self):
self.records = [{"project_id": "4848c118-7167-4929-8979-e839a20772db",
"baseline_period_end": "2014-05-27",
"baseline_period_start": "2013-08-27",
"reporting_period_start": "2014-08-25",
"reporting_period_end": "2016-02-11",
"latitude": "41.26996057364848",
"longitude": "-95.97935449486408",
"zipcode": "68111",
"weather_station": "725500",
"predicted_electricity_savings": "-1558.3948758637775",
"predicted_natural_gas_savings": "-43.28523139881372",
"project_cost": "6592.515721671437"
}]
self.request = Request()
def test_upload_project(self):
response = upload_record(self.records[0],self.request)
resp_record_singleton = None
for project in response.json():
if project['project_id'] == self.records[0]['project_id']:
resp_record_singleton = project
self.assertEqual(resp_record_singleton['project_id']
, self.records[0]['project_id'])
def test_upload_and_update_project(self):
pass
class TestUploadWithFixtures(TestCase):
@classmethod
def setUp(self):
self.consumption_records = os.path.join(
os.path.dirname(__file__),
"fixtures/consumption.csv"
)
self.project_records = os.path.join(
os.path.dirname(__file__),
"fixtures/projects.csv"
)
| mit |
bradysalz/DCI-Scores-Bot-v2 | src/show_manager.py | 1 | 1799 | #!/usr/bin/env python3
import time
from typing import Dict
import pandas as pd
from src.web_bot import WebBot
from config import show_file, subreddit
class ShowManager(object):
"""Checks show lists for updates."""
def __init__(self):
self.bot = WebBot(subreddit)
self.shows = None
def _add_show(self, show_info: Dict):
"""Adds a show to the current dataframe."""
df = pd.DataFrame(
{
'Name': [show_info['name']],
'Date': [show_info['competitionDate']],
'GUID': [show_info['competitionGuid']],
},
columns=['Name', 'Date', 'GUID'])
self.shows = self.shows.append(df)
def check_if_new_shows(self, post: bool=True):
"""Checks for shows and posts to reddit if selected.
post: if True, posts new shows to reddit
If the show_file in config doesn't exist, it creates it and tries
to run the script again.
"""
try:
self.shows = pd.read_csv(show_file, names=['Name', 'Date', 'GUID'])
except FileNotFoundError:
# If file doesn't exist, create w/header and redo
with open(show_file, 'w') as f:
f.write('Name,Date,GUID\n')
self.check_if_new_shows()
self.bot.connect()
web_shows = self.bot.get_show_list()
for show in web_shows:
if show['competitionGuid'] not in self.shows.GUID.values:
self._add_show(show)
self.shows.to_csv(show_file, index=False, header=False)
if post:
self.bot.post_thread(show)
print('Added {}'.format(show['name']))
time.sleep(9 * 60) # Reddit API timeout
| mit |
bdecost/pymisori | test_misori.py | 1 | 1621 | #!/usr/bin/env python
import numpy as np
import h5py
import matplotlib.pyplot as plt
import os
import misori as m
# check misorientation code by comparing to DREAM3D-produced misorientations
dataset_path = os.path.join('data', 'CubicSingleEquiaxedOut.dream3d')
f = h5py.File(dataset_path)
# just presume a DREAM3Dv6 file format with default synthetic dataset name
data = f['DataContainers/SyntheticVolume/CellFeatureData']
MisorientationList = data['MisorientationList']
NumNeighbors = data['NumNeighbors']
NeighborList = data['NeighborList']
AvgQuats = np.array(data['AvgQuats'],dtype=np.float32)
# NeighborList is a linked list stored in a flat array
# use NumNeighbors to traverse the list and build a MisorientationList
ctr = 0;
misorientations = np.zeros(len(MisorientationList),dtype=np.float32)
for i,nneigh in enumerate(NumNeighbors):
qa = AvgQuats[i]
n = nneigh[0]
for j in range(n):
index = ctr + j
qb = AvgQuats[NeighborList[index]]
misorientations[index] = m.misori(qa,qb)
ctr += n
misorientations = misorientations * (180/np.pi)
s_error = np.square(np.array(MisorientationList) - misorientations)
max_error = np.max(np.sqrt(s_error))
RMS_error = np.sqrt(np.mean(s_error))
print('Max error: {0} degrees'.format(max_error))
print('RMS error: {0} degrees'.format(RMS_error))
plt.hist(misorientations, 25, normed=True, histtype='step',label='calculated')
plt.hist(np.array(MisorientationList), 25, normed=True, histtype='step',label='DREAM3D')
plt.legend(loc='best')
plt.xlabel('Misorientation angle (degrees)')
plt.ylabel('Probability density')
plt.title('Approximate MDF')
plt.show()
| mit |
magne-max/zipline-ja | zipline/data/data_portal.py | 1 | 43044 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import mul
from logbook import Logger
import numpy as np
import pandas as pd
from pandas.tslib import normalize_date
from six import iteritems
from six.moves import reduce
from zipline.assets import Asset, Future, Equity
from zipline.data.dispatch_bar_reader import (
AssetDispatchMinuteBarReader,
AssetDispatchSessionBarReader
)
from zipline.data.resample import (
DailyHistoryAggregator,
ReindexMinuteBarReader,
ReindexSessionBarReader,
)
from zipline.data.history_loader import (
DailyHistoryLoader,
MinuteHistoryLoader,
)
from zipline.data.us_equity_pricing import NoDataOnDate
from zipline.utils.math_utils import (
nansum,
nanmean,
nanstd
)
from zipline.utils.memoize import remember_last, weak_lru_cache
from zipline.errors import (
NoTradeDataAvailableTooEarly,
NoTradeDataAvailableTooLate,
HistoryWindowStartsBeforeData,
)
log = Logger('DataPortal')
BASE_FIELDS = frozenset([
"open", "high", "low", "close", "volume", "price", "last_traded"
])
OHLCV_FIELDS = frozenset([
"open", "high", "low", "close", "volume"
])
OHLCVP_FIELDS = frozenset([
"open", "high", "low", "close", "volume", "price"
])
HISTORY_FREQUENCIES = set(["1m", "1d"])
class DataPortal(object):
"""Interface to all of the data that a zipline simulation needs.
This is used by the simulation runner to answer questions about the data,
like getting the prices of assets on a given day or to service history
calls.
Parameters
----------
asset_finder : zipline.assets.assets.AssetFinder
The AssetFinder instance used to resolve assets.
trading_calendar: zipline.utils.calendar.exchange_calendar.TradingCalendar
The calendar instance used to provide minute->session information.
first_trading_day : pd.Timestamp
The first trading day for the simulation.
equity_daily_reader : BcolzDailyBarReader, optional
The daily bar reader for equities. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
equity_minute_reader : BcolzMinuteBarReader, optional
The minute bar reader for equities. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
future_daily_reader : BcolzDailyBarReader, optional
The daily bar ready for futures. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
future_minute_reader : BcolzFutureMinuteBarReader, optional
The minute bar reader for futures. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
adjustment_reader : SQLiteAdjustmentWriter, optional
The adjustment reader. This is used to apply splits, dividends, and
other adjustment data to the raw data from the readers.
"""
def __init__(self,
asset_finder,
trading_calendar,
first_trading_day,
equity_daily_reader=None,
equity_minute_reader=None,
future_daily_reader=None,
future_minute_reader=None,
adjustment_reader=None):
self.trading_calendar = trading_calendar
self.asset_finder = asset_finder
self._adjustment_reader = adjustment_reader
# caches of sid -> adjustment list
self._splits_dict = {}
self._mergers_dict = {}
self._dividends_dict = {}
# Cache of sid -> the first trading day of an asset.
self._asset_start_dates = {}
self._asset_end_dates = {}
# Handle extra sources, like Fetcher.
self._augmented_sources_map = {}
self._extra_source_df = None
self._first_trading_session = first_trading_day
_last_sessions = [r.last_available_dt
for r in [equity_daily_reader, future_daily_reader]
if r is not None]
if _last_sessions:
self._last_trading_session = min(_last_sessions)
else:
self._last_trading_session = None
aligned_equity_minute_reader = self._ensure_reader_aligned(
equity_minute_reader)
aligned_equity_session_reader = self._ensure_reader_aligned(
equity_daily_reader)
aligned_future_minute_reader = self._ensure_reader_aligned(
future_minute_reader)
aligned_future_session_reader = self._ensure_reader_aligned(
future_daily_reader)
aligned_minute_readers = {}
aligned_session_readers = {}
if aligned_equity_minute_reader is not None:
aligned_minute_readers[Equity] = aligned_equity_minute_reader
if aligned_equity_session_reader is not None:
aligned_session_readers[Equity] = aligned_equity_session_reader
if aligned_future_minute_reader is not None:
aligned_minute_readers[Future] = aligned_future_minute_reader
if aligned_future_session_reader is not None:
aligned_session_readers[Future] = aligned_future_session_reader
_dispatch_minute_reader = AssetDispatchMinuteBarReader(
self.trading_calendar,
self.asset_finder,
aligned_minute_readers,
)
_dispatch_session_reader = AssetDispatchSessionBarReader(
self.trading_calendar,
self.asset_finder,
aligned_session_readers,
)
self._pricing_readers = {
'minute': _dispatch_minute_reader,
'daily': _dispatch_session_reader,
}
self._daily_aggregator = DailyHistoryAggregator(
self.trading_calendar.schedule.market_open,
_dispatch_minute_reader,
self.trading_calendar
)
self._history_loader = DailyHistoryLoader(
self.trading_calendar,
_dispatch_session_reader,
self._adjustment_reader
)
self._minute_history_loader = MinuteHistoryLoader(
self.trading_calendar,
_dispatch_minute_reader,
self._adjustment_reader
)
self._first_trading_day = first_trading_day
# Get the first trading minute
self._first_trading_minute, _ = (
self.trading_calendar.open_and_close_for_session(
self._first_trading_day
)
if self._first_trading_day is not None else (None, None)
)
# Store the locs of the first day and first minute
self._first_trading_day_loc = (
self.trading_calendar.all_sessions.get_loc(self._first_trading_day)
if self._first_trading_day is not None else None
)
self._first_trading_minute_loc = (
self.trading_calendar.all_minutes.get_loc(
self._first_trading_minute
)
if self._first_trading_minute is not None else None
)
def _ensure_reader_aligned(self, reader):
if reader is None:
return
if reader.trading_calendar.name == self.trading_calendar.name:
return reader
elif reader.data_frequency == 'minute':
return ReindexMinuteBarReader(
self.trading_calendar,
reader,
self._first_trading_session,
self._last_trading_session
)
elif reader.data_frequency == 'session':
return ReindexSessionBarReader(
self.trading_calendar,
reader,
self._first_trading_session,
self._last_trading_session
)
def _reindex_extra_source(self, df, source_date_index):
return df.reindex(index=source_date_index, method='ffill')
def handle_extra_source(self, source_df, sim_params):
"""
Extra sources always have a sid column.
We expand the given data (by forward filling) to the full range of
the simulation dates, so that lookup is fast during simulation.
"""
if source_df is None:
return
# Normalize all the dates in the df
source_df.index = source_df.index.normalize()
# source_df's sid column can either consist of assets we know about
# (such as sid(24)) or of assets we don't know about (such as
# palladium).
#
# In both cases, we break up the dataframe into individual dfs
# that only contain a single asset's information. ie, if source_df
# has data for PALLADIUM and GOLD, we split source_df into two
# dataframes, one for each. (same applies if source_df has data for
# AAPL and IBM).
#
# We then take each child df and reindex it to the simulation's date
# range by forward-filling missing values. this makes reads simpler.
#
# Finally, we store the data. For each column, we store a mapping in
# self.augmented_sources_map from the column to a dictionary of
# asset -> df. In other words,
# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df
# holding that data.
source_date_index = self.trading_calendar.sessions_in_range(
sim_params.start_session,
sim_params.end_session
)
# Break the source_df up into one dataframe per sid. This lets
# us (more easily) calculate accurate start/end dates for each sid,
# de-dup data, and expand the data to fit the backtest start/end date.
grouped_by_sid = source_df.groupby(["sid"])
group_names = grouped_by_sid.groups.keys()
group_dict = {}
for group_name in group_names:
group_dict[group_name] = grouped_by_sid.get_group(group_name)
# This will be the dataframe which we query to get fetcher assets at
# any given time. Get's overwritten every time there's a new fetcher
# call
extra_source_df = pd.DataFrame()
for identifier, df in iteritems(group_dict):
# Before reindexing, save the earliest and latest dates
earliest_date = df.index[0]
latest_date = df.index[-1]
# Since we know this df only contains a single sid, we can safely
# de-dupe by the index (dt). If minute granularity, will take the
# last data point on any given day
df = df.groupby(level=0).last()
# Reindex the dataframe based on the backtest start/end date.
# This makes reads easier during the backtest.
df = self._reindex_extra_source(df, source_date_index)
if not isinstance(identifier, Asset):
# for fake assets we need to store a start/end date
self._asset_start_dates[identifier] = earliest_date
self._asset_end_dates[identifier] = latest_date
for col_name in df.columns.difference(['sid']):
if col_name not in self._augmented_sources_map:
self._augmented_sources_map[col_name] = {}
self._augmented_sources_map[col_name][identifier] = df
# Append to extra_source_df the reindexed dataframe for the single
# sid
extra_source_df = extra_source_df.append(df)
self._extra_source_df = extra_source_df
def _get_pricing_reader(self, data_frequency):
return self._pricing_readers[data_frequency]
def get_last_traded_dt(self, asset, dt, data_frequency):
"""
Given an asset and dt, returns the last traded dt from the viewpoint
of the given dt.
If there is a trade on the dt, the answer is dt provided.
"""
return self._get_pricing_reader(data_frequency).get_last_traded_dt(
asset, dt)
@staticmethod
def _is_extra_source(asset, field, map):
"""
Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup.
"""
# If we have an extra source with a column called "price", only look
# at it if it's on something like palladium and not AAPL (since our
# own price data always wins when dealing with assets).
return not (field in BASE_FIELDS and isinstance(asset, Asset))
def _get_fetcher_value(self, asset, field, dt):
day = normalize_date(dt)
try:
return \
self._augmented_sources_map[field][asset].loc[day, field]
except KeyError:
return np.NaN
def get_spot_value(self, asset, field, dt, data_frequency):
"""
Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp.
"""
if self._is_extra_source(asset, field, self._augmented_sources_map):
return self._get_fetcher_value(asset, field, dt)
if field not in BASE_FIELDS:
raise KeyError("Invalid column: " + str(field))
session_label = self.trading_calendar.minute_to_session_label(dt)
if dt < asset.start_date or \
(data_frequency == "daily" and
session_label > asset.end_date) or \
(data_frequency == "minute" and
session_label > asset.end_date):
if field == "volume":
return 0
elif field != "last_traded":
return np.NaN
if data_frequency == "daily":
return self._get_daily_data(asset, field, session_label)
else:
if field == "last_traded":
return self.get_last_traded_dt(asset, dt, 'minute')
elif field == "price":
return self._get_minute_spot_value(asset, "close", dt,
ffill=True)
else:
return self._get_minute_spot_value(asset, field, dt)
def get_adjustments(self, assets, field, dt, perspective_dt):
"""
Returns a list of adjustments between the dt and perspective_dt for the
given field and list of assets
Parameters
----------
assets : list of type Asset, or Asset
The asset, or assets whose adjustments are desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
adjustments : list[Adjustment]
The adjustments to that field.
"""
if isinstance(assets, Asset):
assets = [assets]
adjustment_ratios_per_asset = []
split_adj_factor = lambda x: x if field != 'volume' else 1.0 / x
for asset in assets:
adjustments_for_asset = []
split_adjustments = self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
)
for adj_dt, adj in split_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(split_adj_factor(adj))
elif adj_dt > perspective_dt:
break
if field != 'volume':
merger_adjustments = self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
)
for adj_dt, adj in merger_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
dividend_adjustments = self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS",
)
for adj_dt, adj in dividend_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
ratio = reduce(mul, adjustments_for_asset, 1.0)
adjustment_ratios_per_asset.append(ratio)
return adjustment_ratios_per_asset
def get_adjusted_value(self, asset, field, dt,
perspective_dt,
data_frequency,
spot_value=None):
"""
Returns a scalar value representing the value
of the desired asset's field at the given dt with adjustments applied.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The value of the given ``field`` for ``asset`` at ``dt`` with any
adjustments known by ``perspective_dt`` applied. The return type is
based on the ``field`` requested. If the field is one of 'open',
'high', 'low', 'close', or 'price', the value will be a float. If
the ``field`` is 'volume' the value will be a int. If the ``field``
is 'last_traded' the value will be a Timestamp.
"""
if spot_value is None:
# if this a fetcher field, we want to use perspective_dt (not dt)
# because we want the new value as of midnight (fetcher only works
# on a daily basis, all timestamps are on midnight)
if self._is_extra_source(asset, field,
self._augmented_sources_map):
spot_value = self.get_spot_value(asset, field, perspective_dt,
data_frequency)
else:
spot_value = self.get_spot_value(asset, field, dt,
data_frequency)
if isinstance(asset, Equity):
ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]
spot_value *= ratio
return spot_value
def _get_minute_spot_value(self, asset, column, dt, ffill=False):
reader = self._get_pricing_reader('minute')
result = reader.get_value(
asset.sid, dt, column
)
if not ffill:
return result
# we are looking for price, and didn't find one. have to go hunting.
last_traded_dt = reader.get_last_traded_dt(asset, dt)
if last_traded_dt is pd.NaT:
# no last traded dt, bail
if column == 'volume':
return 0
else:
return np.nan
# get the value as of the last traded dt
result = reader.get_value(
asset.sid,
last_traded_dt,
column
)
if np.isnan(result):
return np.nan
if dt == last_traded_dt or dt.date() == last_traded_dt.date():
return result
# the value we found came from a different day, so we have to adjust
# the data if there are any adjustments on that day barrier
return self.get_adjusted_value(
asset, column, last_traded_dt,
dt, "minute", spot_value=result
)
def _get_daily_data(self, asset, column, dt):
reader = self._get_pricing_reader('daily')
if column == "last_traded":
last_traded_dt = reader.get_last_traded_dt(asset, dt)
if pd.isnull(last_traded_dt):
return pd.NaT
else:
return last_traded_dt
elif column in OHLCV_FIELDS:
# don't forward fill
try:
val = reader.get_value(asset, dt, column)
if val == -1:
if column == "volume":
return 0
else:
return np.nan
else:
return val
except NoDataOnDate:
return np.nan
elif column == "price":
found_dt = dt
while True:
try:
value = reader.get_value(
asset, found_dt, "close"
)
if value != -1:
if dt == found_dt:
return value
else:
# adjust if needed
return self.get_adjusted_value(
asset, column, found_dt, dt, "minute",
spot_value=value
)
else:
found_dt -= self.trading_calendar.day
except NoDataOnDate:
return np.nan
@remember_last
def _get_days_for_window(self, end_date, bar_count):
tds = self.trading_calendar.all_sessions
end_loc = tds.get_loc(end_date)
start_loc = end_loc - bar_count + 1
if start_loc < self._first_trading_day_loc:
raise HistoryWindowStartsBeforeData(
first_trading_day=self._first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=tds[
self._first_trading_day_loc + bar_count
].date(),
)
return tds[start_loc:end_loc + 1]
def _get_history_daily_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of daily frequency for the given sids.
"""
session = self.trading_calendar.minute_to_session_label(end_dt)
days_for_window = self._get_days_for_window(session, bar_count)
if len(assets) == 0:
return pd.DataFrame(None,
index=days_for_window,
columns=None)
data = self._get_history_daily_window_data(
assets, days_for_window, end_dt, field_to_use
)
return pd.DataFrame(
data,
index=days_for_window,
columns=assets
)
def _get_history_daily_window_data(
self, assets, days_for_window, end_dt, field_to_use):
ends_at_midnight = end_dt.hour == 0 and end_dt.minute == 0
if ends_at_midnight:
# two cases where we use daily data for the whole range:
# 1) the history window ends at midnight utc.
# 2) the last desired day of the window is after the
# last trading day, use daily data for the whole range.
return self._get_daily_window_for_sids(
assets,
field_to_use,
days_for_window,
extra_slot=False
)
else:
# minute mode, requesting '1d'
daily_data = self._get_daily_window_for_sids(
assets,
field_to_use,
days_for_window[0:-1]
)
if field_to_use == 'open':
minute_value = self._daily_aggregator.opens(
assets, end_dt)
elif field_to_use == 'high':
minute_value = self._daily_aggregator.highs(
assets, end_dt)
elif field_to_use == 'low':
minute_value = self._daily_aggregator.lows(
assets, end_dt)
elif field_to_use == 'close':
minute_value = self._daily_aggregator.closes(
assets, end_dt)
elif field_to_use == 'volume':
minute_value = self._daily_aggregator.volumes(
assets, end_dt)
# append the partial day.
daily_data[-1] = minute_value
return daily_data
def _handle_history_out_of_bounds(self, bar_count):
suggested_start_day = (
self.trading_calendar.all_minutes[
self._first_trading_minute_loc + bar_count
] + self.trading_calendar.day
).date()
raise HistoryWindowStartsBeforeData(
first_trading_day=self._first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=suggested_start_day,
)
def _get_history_minute_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of minute frequency for the given sids.
"""
# get all the minutes for this window
try:
minutes_for_window = self.trading_calendar.minutes_window(
end_dt, -bar_count
)
except KeyError:
self._handle_history_out_of_bounds(bar_count)
if minutes_for_window[0] < self._first_trading_minute:
self._handle_history_out_of_bounds(bar_count)
asset_minute_data = self._get_minute_window_for_assets(
assets,
field_to_use,
minutes_for_window,
)
return pd.DataFrame(
asset_minute_data,
index=minutes_for_window,
columns=assets
)
def get_history_window(self, assets, end_dt, bar_count, frequency, field,
ffill=True):
"""
Public API method that returns a dataframe containing the requested
history window. Data is fully adjusted.
Parameters
----------
assets : list of zipline.data.Asset objects
The assets whose data is desired.
bar_count: int
The number of bars desired.
frequency: string
"1d" or "1m"
field: string
The desired field of the asset.
ffill: boolean
Forward-fill missing values. Only has effect if field
is 'price'.
Returns
-------
A dataframe containing the requested data.
"""
if field not in OHLCVP_FIELDS:
raise ValueError("Invalid field: {0}".format(field))
if frequency == "1d":
if field == "price":
df = self._get_history_daily_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_daily_window(assets, end_dt, bar_count,
field)
elif frequency == "1m":
if field == "price":
df = self._get_history_minute_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_minute_window(assets, end_dt, bar_count,
field)
else:
raise ValueError("Invalid frequency: {0}".format(frequency))
# forward-fill price
if field == "price":
if frequency == "1m":
data_frequency = 'minute'
elif frequency == "1d":
data_frequency = 'daily'
else:
raise Exception(
"Only 1d and 1m are supported for forward-filling.")
dt_to_fill = df.index[0]
perspective_dt = df.index[-1]
assets_with_leading_nan = np.where(pd.isnull(df.iloc[0]))[0]
for missing_loc in assets_with_leading_nan:
asset = assets[missing_loc]
previous_dt = self.get_last_traded_dt(
asset, dt_to_fill, data_frequency)
if pd.isnull(previous_dt):
continue
previous_value = self.get_adjusted_value(
asset,
field,
previous_dt,
perspective_dt,
data_frequency,
)
df.iloc[0, missing_loc] = previous_value
df.fillna(method='ffill', inplace=True)
for asset in df.columns:
if df.index[-1] >= asset.end_date:
# if the window extends past the asset's end date, set
# all post-end-date values to NaN in that asset's series
series = df[asset]
series[series.index.normalize() > asset.end_date] = np.NaN
return df
def _get_minute_window_for_assets(self, assets, field, minutes_for_window):
"""
Internal method that gets a window of adjusted minute data for an asset
and specified date range. Used to support the history API method for
minute bars.
Missing bars are filled with NaN.
Parameters
----------
asset : Asset
The asset whose data is desired.
field: string
The specific field to return. "open", "high", "close_price", etc.
minutes_for_window: pd.DateTimeIndex
The list of minutes representing the desired window. Each minute
is a pd.Timestamp.
Returns
-------
A numpy array with requested values.
"""
return self._get_minute_window_data(assets, field, minutes_for_window)
def _get_minute_window_data(
self, assets, field, minutes_for_window):
return self._minute_history_loader.history(assets,
minutes_for_window,
field,
False)
def _get_daily_window_for_sids(
self, assets, field, days_in_window, extra_slot=True):
"""
Internal method that gets a window of adjusted daily data for a sid
and specified date range. Used to support the history API method for
daily bars.
Parameters
----------
asset : Asset
The asset whose data is desired.
start_dt: pandas.Timestamp
The start of the desired window of data.
bar_count: int
The number of days of data to return.
field: string
The specific field to return. "open", "high", "close_price", etc.
extra_slot: boolean
Whether to allocate an extra slot in the returned numpy array.
This extra slot will hold the data for the last partial day. It's
much better to create it here than to create a copy of the array
later just to add a slot.
Returns
-------
A numpy array with requested values. Any missing slots filled with
nan.
"""
bar_count = len(days_in_window)
# create an np.array of size bar_count
if extra_slot:
return_array = np.zeros((bar_count + 1, len(assets)))
else:
return_array = np.zeros((bar_count, len(assets)))
if field != "volume":
# volumes default to 0, so we don't need to put NaNs in the array
return_array[:] = np.NAN
if bar_count != 0:
data = self._history_loader.history(assets,
days_in_window,
field,
extra_slot)
if extra_slot:
return_array[:len(return_array) - 1, :] = data
else:
return_array[:len(data)] = data
return return_array
def _get_adjustment_list(self, asset, adjustments_dict, table_name):
"""
Internal method that returns a list of adjustments for the given sid.
Parameters
----------
asset : Asset
The asset for which to return adjustments.
adjustments_dict: dict
A dictionary of sid -> list that is used as a cache.
table_name: string
The table that contains this data in the adjustments db.
Returns
-------
adjustments: list
A list of [multiplier, pd.Timestamp], earliest first
"""
if self._adjustment_reader is None:
return []
sid = int(asset)
try:
adjustments = adjustments_dict[sid]
except KeyError:
adjustments = adjustments_dict[sid] = self._adjustment_reader.\
get_adjustments_for_sid(table_name, sid)
return adjustments
def _check_is_currently_alive(self, asset, dt):
sid = int(asset)
if sid not in self._asset_start_dates:
self._get_asset_start_date(asset)
start_date = self._asset_start_dates[sid]
if self._asset_start_dates[sid] > dt:
raise NoTradeDataAvailableTooEarly(
sid=sid,
dt=normalize_date(dt),
start_dt=start_date
)
end_date = self._asset_end_dates[sid]
if self._asset_end_dates[sid] < dt:
raise NoTradeDataAvailableTooLate(
sid=sid,
dt=normalize_date(dt),
end_dt=end_date
)
def _get_asset_start_date(self, asset):
self._ensure_asset_dates(asset)
return self._asset_start_dates[asset]
def _get_asset_end_date(self, asset):
self._ensure_asset_dates(asset)
return self._asset_end_dates[asset]
def _ensure_asset_dates(self, asset):
sid = int(asset)
if sid not in self._asset_start_dates:
if self._first_trading_day is not None:
self._asset_start_dates[sid] = \
max(asset.start_date, self._first_trading_day)
else:
self._asset_start_dates[sid] = asset.start_date
self._asset_end_dates[sid] = asset.end_date
def get_splits(self, sids, dt):
"""
Returns any splits for the given sids and the given dt.
Parameters
----------
sids : container
Sids for which we want splits.
dt : pd.Timestamp
The date for which we are checking for splits. Note: this is
expected to be midnight UTC.
Returns
-------
splits : list[(int, float)]
List of splits, where each split is a (sid, ratio) tuple.
"""
if self._adjustment_reader is None or not sids:
return {}
# convert dt to # of seconds since epoch, because that's what we use
# in the adjustments db
seconds = int(dt.value / 1e9)
splits = self._adjustment_reader.conn.execute(
"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?",
(seconds,)).fetchall()
splits = [split for split in splits if split[0] in sids]
return splits
def get_stock_dividends(self, sid, trading_days):
"""
Returns all the stock dividends for a specific sid that occur
in the given trading range.
Parameters
----------
sid: int
The asset whose stock dividends should be returned.
trading_days: pd.DatetimeIndex
The trading range.
Returns
-------
list: A list of objects with all relevant attributes populated.
All timestamp fields are converted to pd.Timestamps.
"""
if self._adjustment_reader is None:
return []
if len(trading_days) == 0:
return []
start_dt = trading_days[0].value / 1e9
end_dt = trading_days[-1].value / 1e9
dividends = self._adjustment_reader.conn.execute(
"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND "
"ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\
fetchall()
dividend_info = []
for dividend_tuple in dividends:
dividend_info.append({
"declared_date": dividend_tuple[1],
"ex_date": pd.Timestamp(dividend_tuple[2], unit="s"),
"pay_date": pd.Timestamp(dividend_tuple[3], unit="s"),
"payment_sid": dividend_tuple[4],
"ratio": dividend_tuple[5],
"record_date": pd.Timestamp(dividend_tuple[6], unit="s"),
"sid": dividend_tuple[7]
})
return dividend_info
def contains(self, asset, field):
return field in BASE_FIELDS or \
(field in self._augmented_sources_map and
asset in self._augmented_sources_map[field])
def get_fetcher_assets(self, dt):
"""
Returns a list of assets for the current date, as defined by the
fetcher data.
Returns
-------
list: a list of Asset objects.
"""
# return a list of assets for the current date, as defined by the
# fetcher source
if self._extra_source_df is None:
return []
day = normalize_date(dt)
if day in self._extra_source_df.index:
assets = self._extra_source_df.loc[day]['sid']
else:
return []
if isinstance(assets, pd.Series):
return [x for x in assets if isinstance(x, Asset)]
else:
return [assets] if isinstance(assets, Asset) else []
@weak_lru_cache(20)
def _get_minute_count_for_transform(self, ending_minute, days_count):
# cache size picked somewhat loosely. this code exists purely to
# handle deprecated API.
# bars is the number of days desired. we have to translate that
# into the number of minutes we want.
# we get all the minutes for the last (bars - 1) days, then add
# all the minutes so far today. the +2 is to account for ignoring
# today, and the previous day, in doing the math.
session_for_minute = self.trading_calendar.minute_to_session_label(
ending_minute
)
previous_session = self.trading_calendar.previous_session_label(
session_for_minute
)
sessions = self.trading_calendar.sessions_in_range(
self.trading_calendar.sessions_window(previous_session,
-days_count + 2)[0],
previous_session,
)
minutes_count = sum(
len(self.trading_calendar.minutes_for_session(session))
for session in sessions
)
# add the minutes for today
today_open = self.trading_calendar.open_and_close_for_session(
session_for_minute
)[0]
minutes_count += \
((ending_minute - today_open).total_seconds() // 60) + 1
return minutes_count
def get_simple_transform(self, asset, transform_name, dt, data_frequency,
bars=None):
if transform_name == "returns":
# returns is always calculated over the last 2 days, regardless
# of the simulation's data frequency.
hst = self.get_history_window(
[asset], dt, 2, "1d", "price", ffill=True
)[asset]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
if bars is None:
raise ValueError("bars cannot be None!")
if data_frequency == "minute":
freq_str = "1m"
calculated_bar_count = int(self._get_minute_count_for_transform(
dt, bars
))
else:
freq_str = "1d"
calculated_bar_count = bars
price_arr = self.get_history_window(
[asset], dt, calculated_bar_count, freq_str, "price", ffill=True
)[asset]
if transform_name == "mavg":
return nanmean(price_arr)
elif transform_name == "stddev":
return nanstd(price_arr, ddof=1)
elif transform_name == "vwap":
volume_arr = self.get_history_window(
[asset], dt, calculated_bar_count, freq_str, "volume",
ffill=True
)[asset]
vol_sum = nansum(volume_arr)
try:
ret = nansum(price_arr * volume_arr) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
| apache-2.0 |
ygenc/onlineLDA | onlineldavb_new/build/scipy/scipy/misc/common.py | 5 | 10696 | """
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from numpy import exp, log, asarray, arange, newaxis, hstack, product, array, \
where, zeros, extract, place, pi, sqrt, eye, poly1d, dot, r_
__all__ = ['logsumexp', 'factorial','factorial2','factorialk','comb',
'central_diff_weights', 'derivative', 'pade', 'lena']
# XXX: the factorial functions could move to scipy.special, and the others
# to numpy perhaps?
def logsumexp(a):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
Returns
-------
res : ndarray
The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
more stable way.
See Also
--------
numpy.logaddexp, numpy.logaddexp2
Notes
-----
Numpy has a logaddexp function which is very similar to `logsumexp`.
"""
a = asarray(a)
a_max = a.max()
return a_max + log((exp(a-a_max)).sum())
def factorial(n,exact=0):
"""
The factorial function, n! = special.gamma(n+1).
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
- Array argument accepted only for exact=0 case.
- If n<0, the return value is 0.
Parameters
----------
n : int or array_like of ints
Calculate ``n!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above. If `exact` is set to True, calculate the
answer exactly using integer arithmetic. Default is False.
Returns
-------
nf : float or int
Factorial of `n`, as an integer or a float depending on `exact`.
Examples
--------
>>> arr = np.array([3,4,5])
>>> sc.factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> sc.factorial(5, exact=True)
120L
"""
if exact:
if n < 0:
return 0L
val = 1L
for k in xrange(1,n+1):
val *= k
return val
else:
from scipy import special
n = asarray(n)
sv = special.errprint(0)
vals = special.gamma(n+1)
sv = special.errprint(sv)
return where(n>=0,vals,0)
def factorial2(n, exact=False):
"""
Double factorial.
This is the factorial with every second value skipped, i.e.,
``7!! = 7 * 5 * 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0L
if n <= 0:
return 1L
val = 1L
for k in xrange(n,0,-2):
val *= k
return val
else:
from scipy import special
n = asarray(n)
vals = zeros(n.shape,'d')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1,n)
evenn = extract(cond2,n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals,cond1,special.gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5))
place(vals,cond2,special.gamma(nd2e+1) * pow(2.0,nd2e))
return vals
def factorialk(n,k,exact=1):
"""
n(!!...!) = multifactorial of order k
k times
Parameters
----------
n : int, array_like
Calculate multifactorial. Arrays are only supported with exact
set to False. If n < 0, the return value is 0.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multi factorial of n.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> sc.factorialk(5, 1, exact=True)
120L
>>> sc.factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0L
if n<=0:
return 1L
val = 1L
for j in xrange(n,0,-k):
val = val*j
return val
else:
raise NotImplementedError
def comb(N,k,exact=0):
"""
The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, array
Number of things.
k : int, array
Number of elements taken.
exact : int, optional
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, array
The total number of combinations.
Notes
-----
- Array arguments accepted only for exact=0 case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> sc.comb(n, k, exact=False)
array([ 120., 210.])
>>> sc.comb(10, 3, exact=True)
120L
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0L
val = 1L
for j in xrange(min(k, N-k)):
val = (val*(N-j))//(j+1)
return val
else:
from scipy import special
k,N = asarray(k), asarray(N)
lgam = special.gammaln
cond = (k <= N) & (N >= 0) & (k >= 0)
sv = special.errprint(0)
vals = exp(lgam(N+1) - lgam(N-k+1) - lgam(k+1))
sv = special.errprint(sv)
return where(cond, vals, 0.0)
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative of order ndiv
assuming equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at point x0.
Given a function, use a central difference formula with spacing `dx` to
compute the n-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which nth derivative is found.
dx : int, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> def x2(x):
... return x*x
...
>>> derivative(x2, 2)
4.0
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n==1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n==2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def pade(an, m):
"""Given Taylor series coefficients in an, return a Pade approximation to
the function as the ratio of two polynomials p / q where the order of q is m.
"""
from scipy import linalg
an = asarray(an)
N = len(an) - 1
n = N - m
if n < 0:
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
Akj = eye(N+1, n+1)
Bkj = zeros((N+1, m), 'd')
for row in range(1, m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1, N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj, Bkj))
pq = linalg.solve(C, an)
p = pq[:n+1]
q = r_[1.0, pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1])
def lena():
"""
Get classic image processing example image, Lena, at 8-bit grayscale
bit-depth, 512 x 512 size.
Parameters
----------
None
Returns
-------
lena : ndarray
Lena image
Examples
--------
>>> import scipy.misc
>>> lena = scipy.misc.lena()
>>> lena.shape
(512, 512)
>>> lena.max()
245
>>> lena.dtype
dtype('int32')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(lena)
>>> plt.show()
"""
import cPickle, os
fname = os.path.join(os.path.dirname(__file__),'lena.dat')
f = open(fname,'rb')
lena = array(cPickle.load(f))
f.close()
return lena
| gpl-3.0 |
jpautom/scikit-learn | setup.py | 19 | 11460 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
import subprocess
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
from sklearn._build_utils import cythonize
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
cythonize.main(cwd)
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required, nor Cythonization
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
if len(sys.argv) >= 2 and sys.argv[1] not in 'config':
# Cythonize if needed
print('Generating cython files')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
# Clean left-over .so file
for dirpath, dirnames, filenames in os.walk(
os.path.join(cwd, 'sklearn')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension in (".so", ".pyd", ".dll"):
pyx_file = str.replace(filename, extension, '.pyx')
print(pyx_file)
if not os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
drefk99/skynet_beeva | python-scripts-analisis/load_classifier.py | 1 | 2863 | #Program to classify the texts of the tweets with the name of the json as argument
#Importing necesasary libraries
import pickle
import pandas as pd
import numpy as np
import nltk
import json
import sys
import os
#Getting the external arguments and builiding strings for later use in case of using crontab add absolute path
arg=sys.argv
name1=arg[1]+'_analisis.json'
name2=arg[1]+'_resultados.json'
#Loading the classifier and the word features in case of using crontab add absolute path
with open('objs.pickle', "rb") as f:
classifier, word_features=pickle.load(f)
#Defined function for extract the features given the text and the word features
def extract_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains(%s)' % word] = (word in document_words)
return features
if os.path.isfile(name1) and os.path.isfile(name2):
print('Data has been already analized')
else:
print('Analizing...')
#Loading the file specified in the argument
with open(arg[1]+'.json', 'r') as fp:
data = json.load(fp)
#Declaration of auxiliar variables
load=[]
#Get the text of the json file
for i in range(1,len(data['texts'])):
load.append(data['texts'][str(i)])
#Converting to dataframe for data manipulation
a=pd.DataFrame(load)
a.columns=['text']
#Declaration of more auxiliar variables
aux=[]
p=0
n=0
ne=0
values={}
values.setdefault('text',[])
values.setdefault('sentiment',[])
sentiments={}
sentiments.setdefault('positivos',[])
sentiments.setdefault('negativos',[])
sentiments.setdefault('neutros',[])
#Classifying each tweet
for element in a['text']:
values['text'].append(element)
aux=element.split()
prob1=classifier.prob_classify(extract_features(aux))
dist1=prob1.samples()
prob_pos=prob1.prob("positive")
print(prob_pos)
#The classifier gives us the probability that a tweet is positive, with this probability we define from what to another is positive negative or neutral
if prob_pos < 0.25 and prob_pos > 0.2:
values['sentiment'].append('neutral')
elif prob_pos < 0.2:
values['sentiment'].append('malo')
else:
values['sentiment'].append('bueno')
#Count the positives, negatives and neutrals
for sen in values['sentiment']:
if sen =='bueno':
p=p+1
elif sen == 'malo':
n=n+1
else:
ne=ne+1
#Assign values into dict
sentiments['positivos'].append(p)
sentiments['negativos'].append(n)
sentiments['neutros'].append(ne)
print(values)
#Save the file of the tweets with their evaluation in case of using crontab add absolute path
with open(name1, 'w') as fp1:
json.dump(values, fp1)
#Save the file with the final results in case of using crontab add absolute path
with open(name2, 'w') as fp2:
json.dump(sentiments, fp2)
print(p)
print(n)
print(ne)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.