repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
phvu/DDF | python/tests/test_ddf.py | 3 | 4006 | """
Created on Jun 22, 2014
@author: nhanitvn
"""
from __future__ import unicode_literals
import unittest
import pandas as pd
import test_base
from ddf import DistributedDataFrame
class TestDDF(test_base.BaseTest):
def testDDFBasic(self):
self.assertEqual(self.airlines.nrow, 31)
self.assertEqual(self.airlines.ncol, 29)
self.assertEqual(len(self.airlines), 31)
def testSummary(self):
df = self.airlines.summary()
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual(len(df.columns), self.airlines.ncol)
def testSample(self):
df = self.airlines.head(10)
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual(len(df.columns), self.airlines.ncol)
self.assertItemsEqual(df.columns.tolist(), self.airlines.colnames)
self.assertEqual(len(df), 10)
df = self.airlines.sample(10, replacement=False)
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual(len(df.columns), self.airlines.ncol)
self.assertItemsEqual(df.columns.tolist(), self.airlines.colnames)
self.assertEqual(len(df), 10)
df = self.airlines.sample(10, replacement=True)
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual(len(df.columns), self.airlines.ncol)
self.assertItemsEqual(df.columns.tolist(), self.airlines.colnames)
self.assertEqual(len(df), 10)
def testSample2DDF(self):
ddf2 = self.airlines.sample2ddf(0.5)
self.assertIsInstance(ddf2, DistributedDataFrame)
self.assertItemsEqual(ddf2.colnames, self.airlines.colnames)
def testFiveNums(self):
df = self.airlines.five_nums()
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual(len(df), 5)
def testMean(self):
self.assertIsInstance(self.airlines.mean(0), float)
self.assertIsInstance(self.airlines.mean('lateaircraftdelay'), float)
def testVar(self):
tp = self.airlines.var(0)
self.assertIsInstance(tp, tuple)
self.assertEqual(len(tp), 2)
tp = self.airlines.var('lateaircraftdelay')
self.assertIsInstance(tp, tuple)
self.assertEqual(len(tp), 2)
def testDropNA(self):
ddf2 = self.airlines.drop_na(axis='row')
self.assertIsInstance(ddf2, DistributedDataFrame)
self.assertEqual(ddf2.ncol, self.airlines.ncol)
ddf2 = self.airlines.drop_na(axis='column')
self.assertIsInstance(ddf2, DistributedDataFrame)
self.assertEqual(ddf2.nrow, self.airlines.nrow)
with self.assertRaises(ValueError):
self.airlines.drop_na(axis='whatever')
def testJoin(self):
ddf2 = self.airlines.join(self.airlines, self.airlines.colnames[0])
self.assertIsInstance(ddf2, DistributedDataFrame)
def testCorrelation(self):
self.assertIsInstance(self.mtcars.correlation('mpg', 'cyl'), float)
self.assertAlmostEqual(self.mtcars.correlation('mpg', 'mpg'), 1.0)
self.assertRaises(ValueError, self.airlines.correlation, 'Diverted', 'DayOfWeek')
self.assertRaises(ValueError, self.airlines.correlation, 'Diverted', 'stupid_column')
def testAggregate(self):
self.assertIsInstance(self.mtcars.aggregate(['sum(mpg)', 'min(hp)'], ['vs', 'am']), pd.DataFrame)
def testSubset(self):
ddf = self.mtcars['mpg']
self.assertIsInstance(ddf, DistributedDataFrame)
self.assertEqual(ddf.ncol, 1)
self.assertEqual(ddf.nrow, self.mtcars.nrow)
ddf = self.mtcars[0]
self.assertIsInstance(ddf, DistributedDataFrame)
self.assertEqual(ddf.ncol, 1)
self.assertEqual(ddf.nrow, self.mtcars.nrow)
self.assertEqual(ddf.colnames[0], self.mtcars.colnames[0])
ddf = self.mtcars[['mpg', 2]]
self.assertIsInstance(ddf, DistributedDataFrame)
self.assertEqual(ddf.ncol, 2)
self.assertEqual(ddf.nrow, self.mtcars.nrow)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
redarmy30/Eurobot-2017 | old year/RESET-master/CommunicationWithRobot/control4_debug.py | 2 | 7940 | from serial.tools import list_ports
import numpy as np
import serialWrapper
import packetBuilder
import packetParser
import sys
import time
import socket
import math
import random
import traceback
import multiprocessing
import lidar3_debug
import lidarGui
from ctypes import Structure, c_double
import matplotlib.pyplot as plt
# STM32 USB microcontroller ID
#VID = 1155
#PID = 22336
#SNR = '336234893534'
VID = 1155
PID = 22336
SNR = '3677346C3034'
####################
# CONTROL #
####################
def initPTC():
"""Initialize PID, Trajectory, Kinematics"""
# Build packet for sending to robot
packet = packetBuilder.BuildPacket(commands.switchOnPid)
# send packet to port. sendRequest method will wait answer from robot. In case
# if you don't need answer possible to use 'sendData' method (just send data,
# without waiting for answer)
startT = time.time()
recievedPacket = computerPort.sendRequest(packet.bytearray)
endT = time.time()
print 'Recieved ', (endT - startT)
if recievedPacket.reply == 'Ok':
print 'PID controller On'
else:
raise Exception('switchOnPid failed')
packet = packetBuilder.BuildPacket(commands.switchOnTrajectoryRegulator)
startT = time.time()
recievedPacket = computerPort.sendRequest(packet.bytearray)
endT = time.time()
print 'Recieved ', (endT - startT)
if recievedPacket.reply == 'Ok':
print 'Trajectory regulator ON'
else:
raise Exception('switchOnTrajectoryRegulator failed')
packet = packetBuilder.BuildPacket(commands.switchOnKinematicCalculation)
startT = time.time()
recievedPacket = computerPort.sendRequest(packet.bytearray)
endT = time.time()
print 'Recieved ', (endT - startT)
if recievedPacket.reply == 'Ok':
print 'Kinematics ON'
else:
raise Exception('switchOnKinematicCalculation failed')
def portNumber():
"""Find all ports, and returns one with defined STM values"""
for port in list_ports.comports():
print port
print port.serial_number, port.pid, port.vid
if (port.serial_number == SNR) and (port.pid == PID) and (port.vid == VID):
return port.name
def globMov():
print '\nInput coordinates and speed type'
# Get user input for movement
x = float(raw_input('X: '))
y = float(raw_input('Y: '))
fi = float(raw_input('angle: '))
speed = int(raw_input('speed type (0 = normal, 1 = stop, 2 = stand): '))
coordinates = [x, y, fi, speed]
#print 'Movement command: ', coordinates
packet = packetBuilder.BuildPacket(commands.addPointToStack, coordinates)
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
if recievedPacket.reply != 'Ok':
raise Exception('add PointToStack failed')
def relMov(x = False, y = False, fi = False, speed = False):
"""Move robot relative to its current coord"""
print '\nInput robot displacement and speed type'
if x == False:
x = float(raw_input('X: '))
if y == False:
y = float(raw_input('Y: '))
if fi == False:
fi = float(raw_input('angle: '))
if speed == False:
speed = int(raw_input('speed type (0 = normal, 1 = stop, 2 = stand): '))
dsplcmnt = [x, y, fi, speed]
#startT = time.time()
oldCoord = getCoord()
newCoord = [oldCoord[0] + dsplcmnt[0], oldCoord[1] + dsplcmnt[1], oldCoord[2]
+ dsplcmnt[2], speed]
#endT = time.time()
#print 'Rel Mov ', (endT - startT)
#print 'Displacement: ', dsplcmnt
#print 'Old Coord: ', oldCoord
#print 'New Coord: ', newCoord
packet = packetBuilder.BuildPacket(commands.addPointToStack, newCoord)
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
def setStart(x,y,fi):
coordinates = [x, y, fi]
packet = packetBuilder.BuildPacket(commands.setCoordinates, coordinates)
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
if recievedPacket.reply != 'Ok':
raise Exception('setCoordinates failed')
def setCoord():
print '\nSet current robot coordinates'
x = float(raw_input('X: '))
y = float(raw_input('Y: '))
fi = float(raw_input('angle: '))
coordinates = [x, y, fi]
packet = packetBuilder.BuildPacket(commands.setCorectCoordinates, coordinates)
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
if recievedPacket.reply != 'Ok':
raise Exception('setCoordinates failed')
def setCCoord(x,y,fi):
sharedcor.value = 1
coordinates = [x, y, fi]
while sharedcor.value == 0:
continue
packet = packetBuilder.BuildPacket(commands.setCorectCoordinates, coordinates)
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
if recievedPacket.reply != 'Ok':
raise Exception('setCoordinates failed')
def getCoord():
"""Return robot current coordinates"""
packet = packetBuilder.BuildPacket(commands.getCurentCoordinates)
#startT = time.time()
with lock:
recievedPacket = computerPort.sendRequest(packet.bytearray)
#endT = time.time()
#print 'GetCoord time: ', (endT - startT)
#print 'Current robot coordinates: ', recievedPacket.reply
return recievedPacket.reply
#packet = packetBuilder.BuildPacket(commands.switchOnKinematicCalculation)
#recievedPacket = computerPort.sendRequest(packet.bytearray)
def correction():
#robot = getCoord()
#print robot
lidar = shared[:]
print 'lidar correction: ', lidar
#diff = [lidar[0]/1000-robot[0],lidar[1]/1000-robot[1],lidar[2]-robot[2]]
setCCoord(lidar[0]/1000, lidar[1]/1000, lidar[2])
def getLidar():
return shared[:]
def detectStop():
old = getCoord()
time.sleep(0.5)
new = getCoord()
suma = sum([new[0]-old[0], new[1]-old[1],new[2]-old[2]])
if suma < 0.001:
return True
return False
class Pose(Structure):
_fields_ = [('x', c_double), ('y', c_double), ('fi', c_double)]
def weights():
""" setup an XY plot canvas """
global plidar, wlidar
pipx = [i.x for i in plidar]
pipy = [i.y for i in plidar]
pipfi =[math.degrees(i.fi) for i in plidar]
#we = [(wlidar[i],wlidar[i],wlidar[i]) for i in xrange(100)]
#print we
#print pip
plt.ion()
n, bins, patches = plt.hist((pipx,pipy,pipfi), bins = 50, weights = (wlidar,wlidar,wlidar),
rwidth = 0.9, color = ('b','r','g'))
plt.grid(True)
plt.draw()
raw_input("<Hit Enter To Close>")
plt.close()
################
## START ##
################
port = '/dev/'+portNumber()
if port:
print 'STM32 found on port %s' %port
else:
print 'No STM32 found. Aborting'
sys.exit()
# COM port initialization
computerPort = serialWrapper.SerialWrapper(port)
# we will choose commands which we want to send from this list
commands = packetBuilder.CommandsList()
# Initialize PID, Trajectory and Kinematics
initPTC()
iteration = 0
lock = multiprocessing.Lock()
shared = multiprocessing.Array('d', [0.0, 0.0, 0.0])
sharedcor = multiprocessing.Value('i', 0)
wlidar = multiprocessing.Array('d', 200)
plidar = multiprocessing.Array(Pose, [(0.0, 0.0, 0.0) for i in xrange(200)])
l = multiprocessing.Process(target=lidar3_debug.localisation, args =(lock,shared,computerPort,commands,plidar,wlidar,sharedcor))
l.start()
g = multiprocessing.Process(target=lidarGui.begin, args =(shared,))
g.start()
setStart(0.152,0.72,0.0)
#figure, lines = init_xy_plot()
comm_list = {1: globMov, 2: relMov, 3: setCoord, 4: getCoord, 5: getLidar,
6: correction, 7: weights}
while True:
try:
iteration += 1
print '\nList of available commands: \n1 Global Movement\n2 Relative Movement'\
'\n3 Set Coordinates\n4 Get Coordinates\n5 Get Lidar\n6 Correction'\
'\n7 Histogram'
command = int(raw_input('Command number: '))
print comm_list[command]()
print 'Command ended'
#print shared[:]
except:
print 'Traceback line in main: '
traceback.print_exc()
sys.exit()
#Communication test
#getCoord()
#print 'Iteration: ', iteration
#relMov(0.1, 0.1, 1.571, 2)
#print 'new'
#relMov(-0.1, -0.1, -1.571, 2)
#print 'new'
#relMov(0.1, 0.1, 1.571, 2)
#print 'new'
#relMov(-0.1, -0.1, -1.571, 2)
#print 'new'
#relMov(0.1, 0.1, 1.571, 2)
#print 'new'
#relMov(-0.1, -0.1, -1.571, 2)
#print 'stop' | mit |
ericdill/pyOlog | pyOlog/cli/utils.py | 2 | 2342 | import os
import subprocess
import tempfile
from .. import Attachment
text_message = '''
#
# Please enter the log message using the editor. Lines beginning
# with '#' will be ignored, and an empty message aborts the log
# message from being logged.
#
'''
def save_pyplot_figure(**kwargs):
"""Save a matplotlib figure to an Olog Attachment Object"""
import matplotlib.pyplot as plt
import StringIO
imgdata = StringIO.StringIO()
plt.savefig(imgdata, format='pdf', **kwargs)
imgdata.seek(0)
a = [Attachment(imgdata, 'plot.pdf')]
imgdata = StringIO.StringIO()
plt.savefig(imgdata, format='png', dpi=50,
**kwargs)
imgdata.seek(0)
a.append(Attachment(imgdata, 'thumbnail.png'))
return a
def get_screenshot(root=False, itype='png'):
"""Open ImageMagick and get screngrab as png."""
if root:
opts = '-window root'
else:
opts = ''
image = subprocess.Popen('import {0} {1}:-'.format(opts, itype),
shell=True,
stdout=subprocess.PIPE)
img = image.communicate()[0]
return Attachment(img, 'screenshot.' + itype)
def get_text_from_editor(prepend=None, postpend=None):
"""Open text editor and return text"""
with tempfile.NamedTemporaryFile(suffix='.tmp') as f:
# Write out the file and flush
message = ''
if prepend:
message += '\n\n'
message += prepend
message += '\n'
message += text_message
if postpend:
message += postpend
f.write(message)
f.flush()
# Now open editor to edit file
editor = os.environ.get('EDITOR', 'vim')
subprocess.call([editor, f.name])
# Read file back in
f.seek(0)
text = f.read()
# Strip off any lines that start with whitespace and a '#'
lines = [n for n in text.splitlines() if not n.lstrip().startswith('#')]
text = '\n'.join(lines)
return text
def get_pyplot_fig(self, *args, **kwargs):
"""Save a matplotlib figure as an Attachment"""
import matplotlib.pyplot as plt
import StringIO
imgdata = StringIO.StringIO()
plt.savefig(imgdata, format='png', **kwargs)
imgdata.seek(0)
a = Attachment(imgdata, 'plot.png')
return a
| mit |
jacobmarks/QTop | src/tests/test_488.py | 1 | 1055 | #
# QTop
#
# Copyright (c) 2016 Jacob Marks ([email protected])
#
# This file is part of QTop.
#
# QTop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
import sys
sys.path.append('../')
from src import color_codes, error_models, visualization
sys.path.append('decoders/')
from dsp import *
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
################## Testing ##################
L, d, p = 9, 2, 0.02
code = color_codes.Color_4_8_8(L,d)
code1 = color_codes.Color_6_6_6(L,d)
# model = error_models.CodeCapacity()
# code = code.CodeCycle(model, p)
# visualization.PlotPlaquette(code, "Before Decoding", 1)
# decoder = DSP_decoder()
# code = decoder(code)
visualization.PlotPrimal(code, "Bound Data", 1)
visualization.PlotPrimal(code1, "Bound Data", 2)
# visualization.PlotPlaquette(code, "After Decoding", 3)
plt.show() | gpl-3.0 |
hamish2014/optTune | optTune/paretoArchives/paretoArchive2D_multi_front.py | 1 | 11121 | """
Multi-objective pareto Front archive, features:
- stores current pareto front approximation.
- check designs domaninance status
- this version is designed for large pareto sets.
* quick dominance calculting algorithms
Only 2D Pareto Fronts values
"""
import numpy
def dominates(a,b):
"all(a <= b) and any(a < b), no longer used"
return (a[0] <= b[0] and a[1] <= b[1]) and (a[0]<b[0] or a[1]<b[1])
#return (a <= b).all() and (a < b).any() # to slow
#cmp_vals = [cmp(Av,Bv) for Av,Bv in zip(a,b)]
#return 1 not in cmp_vals and -1 in cmp_vals
class _paretoArchive_design:
"class containing information about the design."
def __init__(self, fv, xv):
self.fv = fv
self.xv = xv
def __eq__(self, b):
return (self.fv == b.fv).all() and (self.xv == b.xv).all()
class paretoArchive2D_multi_front:
def __init__(self, fronts=5, _offset=0, _frontDominating=None):
"""
make use of a sorted by f1 list for for sorting data.
"""
self.designs = []
self.search_list = []
self.nod_inspected = 0 #nod = number of designs
self.nod_dominance_check_only = 0
self.nod_rejected = 0
# recusively create pareto front layers.
self.frontDominating = _frontDominating
self.offset = _offset
if _offset < fronts-1:
self.frontDominated = paretoArchive2D_multi_front(fronts, _offset+1, self)
else:
self.frontDominated = None
self.N = 0
def list_loc(self, fv_dim1):
"binary search to locate comparison point."
search_list = self.search_list
lb, ub = 0, len(search_list)-1
while ub - lb > 1:
mp = (ub + lb)/2
if search_list[mp] < fv_dim1:
lb = mp #try make sure lb is always less than fv_dim1, and hence non dominated ...
else:
ub = mp
if search_list[ub] == fv_dim1 and search_list[lb] < fv_dim1:
return ub
else: #search_list[lb] == fv_dim1
return lb
def add_design(self, fv, xv, loc, adjust_bounds):
self.designs.insert(loc, _paretoArchive_design(fv,xv))
self.search_list.insert(loc, fv[0])
if adjust_bounds:
self.lower_bound = min(self.lower_bound, fv[0])
self.upper_bound = max(self.upper_bound, fv[0])
self.N = self.N + 1
def del_design(self, index):
if self.frontDominated <> None:
self.frontDominated.inspect_design(self.designs[index].xv, self.designs[index].fv)
del self.designs[index], self.search_list[index]
self.nod_rejected = self.nod_rejected + 1
self.N = self.N - 1
def inspect_design(self, xv, fv):
"""
inspects designs and returns True if design added, or False if the design in not added,
in other words it returns if the design is non-dominated (True) or domaninated(False)
"""
assert len(fv) == 2
self.nod_inspected = self.nod_inspected + 1
if len(self.designs) == 0:
self.designs = [_paretoArchive_design(fv,xv)]
self.search_list = [fv[0]]
self.lower_bound = fv[0]
self.upper_bound = fv[0]
self.N = 1
return True
if self.lower_bound <= fv[0] and fv[0] <= self.upper_bound:
ind = self.list_loc(fv[0])
if not dominates(self.designs[ind].fv, fv):
if fv[0] > self.designs[ind].fv[0]:
self.add_design(fv,xv,ind+1,False)
check_ind = ind+2
else:
self.add_design(fv,xv,ind,False)
check_ind = ind+1
while check_ind < len(self.designs) and fv[1] < self.designs[check_ind].fv[1]:
self.del_design(check_ind)
if check_ind == len(self.designs):
self.upper_bound = fv[0]
return True
else :
self.nod_rejected = self.nod_rejected + 1
if self.frontDominated <> None:
self.frontDominated.inspect_design(xv, fv)
return False
elif fv[0] < self.lower_bound:
self.add_design(fv,xv,0,True)
while 1 < len(self.designs) and fv[1] <= self.designs[1].fv[1]:
self.del_design(1)
if len(self.designs) == 1:
self.upper_bound = fv[0]
return True
else: # self.upper_bound < fv[0]
if fv[1] < self.designs[-1].fv[1]:
self.add_design(fv,xv,len(self.designs),True)
return True
else:
self.nod_rejected = self.nod_rejected + 1
if self.frontDominated <> None:
self.frontDominated.inspect_design(xv, fv)
return False
def inspect_multiple(self, xvals, fvals):
"inspect multiple designs many fvals and xvals. function helps to reduce expensive grid calculations"
return [self.inspect_design(xv,fv) for xv,fv in zip(xvals,fvals)]
def dominates(self, fv):
"check if front dominates fv"
assert len(fv) == 2
self.nod_dominance_check_only = self.nod_dominance_check_only + 1
if len(self.designs) == 0:
return False
if self.lower_bound <= fv[0] and fv[0] <= self.upper_bound:
ind = self.list_loc(fv[0])
return self.designs[ind].fv[1] < fv[1]
elif fv[0] < self.lower_bound:
return True
else:
return self.designs[-1].fv[1] < fv[1]
def lower_bounds(self):
return numpy.array([self.designs[0].fv[0], self.designs[-1].fv[1]])
def upper_bounds(self):
return numpy.array([self.designs[-1].fv[0], self.designs[0].fv[1]])
def hyper_volume(self, HPV_bound ):
'Calculated the hypervolume bound between, the pareto front and an HPV_bound'
start_ind = 0
#trimming points outside HPV_bounds
while self.designs[start_ind].fv[1] > HPV_bound[1] and start_ind + 1 < len(self.designs)-1 :
start_ind = start_ind + 1
end_ind = len(self.designs)-1
while self.designs[end_ind].fv[0] > HPV_bound[0] and 0 < end_ind :
end_ind = end_ind - 1
HPV = 0.0
for i in range(start_ind, end_ind + 1):
if i == start_ind:
wid = HPV_bound[1] - self.designs[i].fv[1]
else:
wid = self.designs[i-1].fv[1] - self.designs[i].fv[1]
HPV = HPV + wid * ( HPV_bound[0] - self.designs[i].fv[0])
return HPV
def __getstate__(self):
odict = self.__dict__.copy() # copy the dict since we change it
del odict['designs']
odict['design_fv'] = numpy.array([d.fv for d in self.designs])
odict['design_xv'] = numpy.array([d.xv for d in self.designs])
return odict
def __setstate__(self, dict):
dict['designs'] = [ _paretoArchive_design(fv,xv) for fv,xv in zip(dict['design_fv'],dict['design_xv']) ]
self.__dict__.update(dict)
def __eq__(self, b):
'very slow ...'
return all( b_design in self.designs for b_design in b.designs ) and all( d in b.designs for d in self.designs )
def __repr__(self):
return """<lossless 2D pareto Front archive: size: %i, designs inspected: %i, designs rejected: %i, dominance checks %i >""" % (len(self.designs), self.nod_inspected, self.nod_rejected, self.nod_dominance_check_only + self.nod_inspected )
def plot(self, key='go'):
designs = self.designs
xv = [d.fv[0] for d in designs]
yv = [d.fv[1] for d in designs]
import pylab
pylab.plot(xv,yv,key)
def plotAll(self, keysList):
assert type(keysList) == list
import pylab
designs = self.designs
xv = [d.fv[0] for d in designs]
yv = [d.fv[1] for d in designs]
import pylab
pylab.plot(xv,yv,keysList[0],label='Front %i' % self.offset)
if self.frontDominated <> None:
self.frontDominated.plotAll(keysList[1:])
else:
pylab.legend()
def copy(self):
import copy
return copy.deepcopy(self)
def copy_pareto_front_only(self):
r = paretoArchive2D_multi_front(fronts=1)
for d in reversed(self.designs): #reversed for quick adding.
r.inspect_design( d.xv.copy(), d.fv.copy() )
return r
def best_design(self, f1=None, f2=None):
'''
return the best decision/design vector according either f1 or f2 but not both!
if f1, then design selected according to list_loc
'''
assert f1 <> f2
if f1 <> None:
ind = self.list_loc( f1 )
return self.designs[ind].xv.copy()
else:
raise NotImplementedError,"f2 arguement not implemented"
if __name__ == '__main__':
print('Basic tests for the paretoArchive2D_multi_front module')
import pickle, time
from matplotlib import pyplot
plotKeys = ['go','b+','rx','m^']
class TestingPoint:
def __init__(self, label, f1, f2):
self.label = label
self.fv = numpy.array( [f1, f2] )
points = []
def add_exp_curve(a, b, label, f1_pnts=100, f1_min=0, f1_max=1):
for f1 in numpy.linspace(f1_min, f1_max, f1_pnts):
points.append( TestingPoint( label, f1, numpy.exp(a*f1 +b) ) )
add_exp_curve( -1, 0 , 0, 100)
add_exp_curve( -2, 0.2, 1, 90)
add_exp_curve( -3, 0.9, 2, 80)
add_exp_curve( -4, 1.0, 3, 120)
pyplot.subplot(1,2,1)
for p in points:
pyplot.plot([p.fv[0]], [p.fv[1]], plotKeys[p.label])
pyplot.title('sample points')
paretoArchiveClass = paretoArchive2D_multi_front
paretoArchive = paretoArchiveClass(fronts=3)
for p in points:
paretoArchive.inspect_design(numpy.array([p.label]), p.fv )
print(' %i of %i designs found to be non-dominated.' % (paretoArchive.N, len(points)))
#pickling test & auditing
assert paretoArchive == pickle.loads(pickle.dumps(paretoArchive))
no_reals = 0
paretoArchiveC = paretoArchive
while paretoArchiveC <> None:
no_reals = no_reals + sum([ 1 + 2 for d in paretoArchiveC.designs ])
paretoArchiveC = paretoArchiveC.frontDominated
pickle_s_full = pickle.dumps(paretoArchive, protocol=1)
template = '%20s : no reals bytes %8i, no bytes pickle str %8i, effiency %4.3f'
print(template % ('full pickle',no_reals*8,len(pickle_s_full),
1.0*no_reals*8/len(pickle_s_full)))
no_reals_pf = sum([ 1 + 2 for d in paretoArchive.designs ])
pickle_pf_only = pickle.dumps(paretoArchive.copy_pareto_front_only(), protocol=1)
print(template % ('pickle pf_only',no_reals_pf*8,len(pickle_pf_only),
1.0*no_reals_pf*8/len(pickle_pf_only)))
print(paretoArchive)
pyplot.subplot(1,2,2)
paretoArchive.plotAll(['rx','go','b^']*2)
pyplot.title('Fronts')
pyplot.show()
| gpl-3.0 |
terna/SLAPP3 | 6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/production/graphicDisplayGlobalVarAndFunctions.py | 1 | 6838 | # global variables and functions for graphic display management
# to be imported with
#import graphicDisplayGlobalVarAndFunctions as gvf
# useful links
#labels and colors in networkX
# https://networkx.github.io/documentation/latest/examples/drawing/labels_and_colors.html
# look also at
# https://www.wakari.io/sharing/bundle/nvikram/Basics%20of%20Networkx
# Matplotlib colors
# http://matplotlib.org/api/colors_api.html
# html colors
# http://www.w3schools.com/html/html_colornames.asp
# in this module the try/except structures are not cotrolled for debug
# these try/except constucts, indeed, are not intended to control user errors,
# but a regular flow of inputs
import networkx as nx
import matplotlib.pyplot as plt
import commonVar as common
# the base: creating the graph (and copying its address in a common variable
# to have the possibility of direct interaction with the graph when
# the program is finished, as the common space is imported also in the main
# program
def createGraph():
global colors, pos
common.g = nx.DiGraph() # directed graph, instead of nx.Graph()
colors = {}
pos = {}
common.g_labels = {}
common.g_edge_labels = {} # copy the address of the labels of the edges
# searching tools
def findNodesFromSector(sector):
nodeList = []
for aNode in common.g.nodes():
if common.g.nodes[aNode]['sector'] == sector:
nodeList.append(aNode)
return nodeList
def createEdge(a, b):
# implicitly directed, due to the use of DiGraph
if a is None or b is None:
print("Internal error, attempt to create an edge with a node defined None")
exit(0)
try:
common.g[a][b]['weight'] = 1 + common.g[a][b]['weight']
except BaseException:
common.g.add_edge(a, b)
common.g[a][b]['weight'] = 1
if a != b:
# verifying the presence of the edge in the other direction
try:
otherW = common.g[b][a]['weight']
common.g_edge_labels[a, b] = "w.s %d and %d" % (
common.g[a][b]['weight'], otherW)
common.g_edge_labels[b, a] = ""
except BaseException:
common.g_edge_labels[a, b] = "w. %d" % common.g[a][b]['weight']
if a == b:
common.g_edge_labels[a, b] = ""
common.g[a][b]['pseudoLabel'] = "auto link w. %d" \
% common.g[a][b]['weight']
# using networkX and matplotlib case
def closeNetworkXdisplay():
plt.close()
def openClearNetworkXdisplay():
if common.graphicStatus == "PythonViaTerminal":
plt.ion()
# plt.clf()
def clearNetworkXdisplay():
plt.clf()
def getGraph():
try:
return common.g
except BaseException:
return 0
def pruneEdges():
if not common.prune:
return
common.prune = False
print("New threshold to prune: < %d" % common.pruneThreshold)
#edges=common.g.edges() modified with NetworkX 2.0
edges=[]
for anE in common.g.edges():
edges.append(anE)
print("weights of the links")
for anEdge in edges:
u = anEdge[0].number
uu = anEdge[0]
v = anEdge[1].number
vv = anEdge[1]
w = common.g[anEdge[0]][anEdge[1]]["weight"]
print(u, v, w)
if w < common.pruneThreshold:
# managing labels, related to createEdge phase above
common.g_edge_labels.pop((uu, vv))
try:
common.g_edge_labels[vv,
uu] = "w. %d" % common.g[vv][uu]['weight']
except BaseException:
pass
if uu == vv:
common.g[uu][uu]['pseudoLabel'] = ""
common.g_labels[uu] = str(uu.number) + " (" +\
str(len(uu.recipeWaitingList)) + ")"
# removing
common.g.remove_edge(uu, vv)
def drawGraph():
# directed, due to the use of DiGraph
# draw_netwokx is well documented at
# https://networkx.github.io/documentation/latest/reference/
# generated/networkx.drawing.nx_pylab.draw_networkx.html
# nx.draw_networkx(agentGraph, font_size=10,node_size=500, \
clearNetworkXdisplay()
pruneEdges()
nx.draw_networkx(common.g, pos, font_size=10, node_size=500,
node_color=list(colors.values()),
labels=common.g_labels)
nx.draw_networkx_edge_labels(
common.g,
pos,
edge_labels=common.g_edge_labels,
font_size=9)
# plt.draw()
plt.show() # used by %Matplotlib inline [without ion()]; not conflicting
# with ion()
if common.graphicStatus == "PythonViaTerminal":
plt.pause(0.01)
# to show the sequence of the shown images in absence of pauses
# print agentGraph.nodes(data=True)
# print agentGraph.edges(data=True)
# print labels
# print edge_labels
# print a, agentGraph.node[a].keys(), agentGraph.node[a].values(),\
# agentGraph.node[a]['sector']
# adjacency
print()
for i in range(len(common.orderedListOfNodes)):
print("%d " % common.orderedListOfNodes[i].number, end=' ')
print()
# print "drawGraph verification of existing nodes",common.g.nodes()
if common.g.nodes() != []:
A = nx.adjacency_matrix(common.g, nodelist=common.orderedListOfNodes,
weight='weight')
# print A # as sparse matrix, defaul from nx 1.9.1
print(A.todense()) # as a regular matrix
else:
print("No nodes, impossible to create the adjacency_matrix")
print()
# neighbors
for aNode in common.g.nodes():
print(aNode.number, [node.number
for node in nx.neighbors(common.g, aNode)])
# betweenness_centrality
# Betweenness centrality of a node v is the sum of the fraction of all-pairs
# shortest paths that pass through v
# http://networkx.lanl.gov/reference/generated/
# networkx.algorithms.centrality.betweenness_centrality.html
print()
print("betweenness_centrality")
common.btwn = nx.betweenness_centrality(
common.g, normalized=False, weight='weight')
# print btw
for i in range(len(common.orderedListOfNodes)):
print(common.orderedListOfNodes[i].number,
common.btwn[common.orderedListOfNodes[i]])
# closeness_centrality
# Closeness centrality at a node is 1/average distance to all other nodes
# http://networkx.lanl.gov/reference/generated/
# networkx.algorithms.centrality.closeness_centrality.html
print()
print("closeness_centrality")
common.clsn = nx.closeness_centrality(common.g)
# print clsn
for i in range(len(common.orderedListOfNodes)):
print(common.orderedListOfNodes[i].number,
common.clsn[common.orderedListOfNodes[i]])
| cc0-1.0 |
guziy/basemap | examples/ortho_demo.py | 2 | 1864 | from __future__ import (absolute_import, division, print_function)
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import sys
def get_input(prompt):
if sys.hexversion > 0x03000000:
return input(prompt)
else:
return raw_input(prompt)
# create Basemap instance for Orthographic (satellite view) projection.
lon_0 = float(get_input('enter reference longitude (lon_0):'))
lat_0 = float(get_input('enter reference latitude (lat_0):'))
# map with land/sea mask plotted
fig = plt.figure()
resolution = 'l'; grid = 5
m = Basemap(projection='ortho',lon_0=lon_0,lat_0=lat_0,resolution=resolution)
# land coral, oceans aqua.
# lakes=True means plot inland lakes with ocean color.
# resolution = 5 (default) means use 5 min dataset (can use 2.5)
m.drawcoastlines()
m.drawlsmask(land_color='coral',ocean_color='aqua', lakes=True,\
resolution=resolution,grid=grid)
# draw parallels and meridians.
m.drawparallels(np.arange(-90.,120.,30.))
m.drawmeridians(np.arange(0.,420.,60.))
m.drawmapboundary()
plt.title('Orthographic Map Centered on Lon=%s, Lat=%s' % (lon_0,lat_0))
# map with continents drawn and filled (continent filling fails for
# lon=-120,lat=60).
fig = plt.figure()
m = Basemap(projection='ortho',lon_0=lon_0,lat_0=lat_0,resolution=resolution)
m.drawcoastlines()
m.fillcontinents(color='coral',lake_color='aqua')
m.drawcountries()
# draw parallels and meridians.
m.drawparallels(np.arange(-90.,120.,30.))
m.drawmeridians(np.arange(0.,420.,60.))
m.drawmapboundary(fill_color='aqua')
# add a map scale.
length = 5000
x1,y1 = 0.3*m.xmax, 0.25*m.ymax
lon1,lat1 = m(x1,y1,inverse=True)
m.drawmapscale(lon1,lat1,lon1,lat1,length,fontsize=8,barstyle='fancy',\
labelstyle='fancy',units='km')
plt.title('Orthographic Map Centered on Lon=%s, Lat=%s' % (lon_0,lat_0))
plt.show()
| gpl-2.0 |
mayblue9/scikit-learn | sklearn/utils/multiclass.py | 45 | 12390 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
kkozarev/mwacme | src/catalog/mwacme_search_hek_v2.py | 1 | 3678 | import os
from sunpy.net import hek
from datetime import datetime, timedelta
from local_hek_search import load_cme_events, hek_search
import numpy as np
import matplotlib.pyplot as plt
#DEFINITIONS
#mwacme_list='CME_events.json'
mwacme_list='test.json'
infile=os.environ['HOME']+'/git/mwacme/dat/'+mwacme_list
#LOAD THE EVENTS FROM THE LIST
cmelist=load_cme_events(infile)
#result = client.query(hek.attrs.Time("2014-05-04 20:00:05","2014-05-05 00:00:05") )
all_results=[]
all_events_list=[]
event_times=[]
#------------------Set up the plotting and other constants, etc.------------------------#
#Hardcode the y-limits
plt.axis([-1,len(cmelist),-100,300])
#Plot the zero line
plt.plot(np.arange(-2*len(cmelist),2*len(cmelist)),[0]*4*len(cmelist),c='k',linestyle='--')
alpha=0.3
sym_area=200
#ctable=[0.2,0.4,0.6,0.8,0.99]
ctable=['b','g','k','r','m']
labels=['CMEs (CE)','Eruptions (ER)','Flares (FL)','Filament Eruptions (FE)','Coronal Waves (CW)']
event_types=['CE','ER','FL','FE','CW']
nevtypes=len(event_types)
#Create the template of event dictionary
event_dict_template={}
for evt in event_types:
event_dict_template[evt]=[]
#If greater than zero, print out all the information
verbose=0
#-------------------------------------------------------------#
#For every event in the list, query the HEK
for ii, event in enumerate(cmelist):
event_dst_dict = event_dict_template
event_t0=datetime.strptime(event['C2st'],"%m/%d/%Y %H:%M:%S")
event_times.append(datetime.strptime(event['C2st'],"%m/%d/%Y %H:%M:%S"))
#event=cmelist[0]
print ''
print ''
print datetime.strptime(event['C2st'],"%m/%d/%Y %H:%M:%S"),event['source'],event['flarest'],event['flareclass']
if verbose: print '----------------------------------'
if verbose: print 'Event start time - CoordX - CoordY'
if verbose: print '----------------------------------'
#print datetime.strptime(event['date']+' '+event['flarest'],"%m/%d/%Y %H:%M")
#LOOP OVER THE EVENT TYPES
for evt,event_type in enumerate(event_types):
# Look up the CMEs (CE) around this event time
# event_type = event_types[evt]
if verbose: print labels[evt]
result=hek_search(event,event_type=event_type)
starttimes=[elem["event_starttime"] for elem in result]
coord1=[int(elem["event_coord1"]) for elem in result]
coord2=[int(elem["event_coord2"]) for elem in result]
st_vals,st_inds=np.unique(starttimes,return_index=True)
st_vals=[datetime.strptime(st_val,"%Y-%m-%dT%H:%M:%S") for st_val in st_vals]
cr_vals,cr_inds=np.unique(coord1,return_index=True)
d_st=[]
for st_val in st_vals:
d_st.append((event_t0-st_val).total_seconds()/60.)
event_dst_dict[event_type]=d_st
#Print the x-coordinates of the events and the corresponding start times, based on unique times!!!
if verbose:
for x in range(0,len(st_inds)):
el=st_inds[x]
print starttimes[el],coord1[el],coord2[el],d_st[x]
#Plot the time difference b/n CDAW events and CMEs
N=len(st_vals)
if N > 0:
st_vals[st_vals==0.]=1.
#Different colors for different types of events
colors=[ctable[evt]]
label=labels[evt]
area=[sym_area]*N
x=[ii]*N
if ii == 0:
plt.scatter(x,d_st, c=colors, alpha=alpha, s=sym_area,label=label)
else:
plt.scatter(x,d_st, c=colors, alpha=alpha, s=sym_area)
all_events_list.append(event_dst_dict)
#Show the plot
plt.legend(loc='upper left')
plt.show()
#######PLOTTING
| gpl-2.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/axes_grid/scatter_hist.py | 8 | 1582 | import numpy as np
import matplotlib.pyplot as plt
# the random data
x = np.random.randn(1000)
y = np.random.randn(1000)
fig = plt.figure(1, figsize=(5.5,5.5))
from mpl_toolkits.axes_grid1 import make_axes_locatable
# the scatter plot:
axScatter = plt.subplot(111)
axScatter.scatter(x, y)
axScatter.set_aspect(1.)
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistx = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
axHisty = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
# make some labels invisible
plt.setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(),
visible=False)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max( [np.max(np.fabs(x)), np.max(np.fabs(y))] )
lim = ( int(xymax/binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
# the xaxis of axHistx and yaxis of axHisty are shared with axScatter,
# thus there is no need to manually adjust the xlim and ylim of these
# axis.
#axHistx.axis["bottom"].major_ticklabels.set_visible(False)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
axHistx.set_yticks([0, 50, 100])
#axHisty.axis["left"].major_ticklabels.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHisty.set_xticks([0, 50, 100])
plt.draw()
plt.show()
| gpl-2.0 |
ddempsey/PyFEHM | ftemp.py | 1 | 34138 | """Various templates for use with PyFEHM."""
"""
Copyright 2013.
Los Alamos National Security, LLC.
This material was produced under U.S. Government contract DE-AC52-06NA25396 for
Los Alamos National Laboratory (LANL), which is operated by Los Alamos National
Security, LLC for the U.S. Department of Energy. The U.S. Government has rights
to use, reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR LOS
ALAMOS NATIONAL SECURITY, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES
ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is modified to produce
derivative works, such modified software should be clearly marked, so as not to
confuse it with the version available from LANL.
Additionally, this library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation; either version 2.1 of the License, or (at your option)
any later version. Accordingly, this library is distributed in the hope that it
will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
Public License for more details.
"""
import numpy as np
import scipy as sp
import os,math
from fdata import*
from ftool import*
from matplotlib import pyplot as plt
from fvars import*
from fdflt import*
dflt = fdflt()
#-----------------------------------------------------------------------------------------------------
#------------------------------------- FEHM MODEL TEMPLATES ------------------------------------------
#-----------------------------------------------------------------------------------------------------
class wellbore_model(fdata):
'''Create a simple 2D radial well bore model. Returns an fdata object corresponding to the model.
User inputs dimensions of the wellbore, steel pipe, casing and reservoir, their material properties and
permeability, information about initial reservoir conditions and the injection operation.
The model grid is generated on initialisation
but this can be replaced with a more complex mesh by calling ``read_grid()`` - zones will be automatically
reassigned.
The simulation is executed by calling the ``run()`` method.
Output data are visualised by calling ``plot()`` or ``summarise()`` methods.
:param xL: Horizontal dimension of model.
:type xL: fl64
:param zL: Vertical dimension of model.
:type zL: fl64
:param wellbore_radius: Well-bore radius.
:type wellbore_radius: fl64
:param wellbore_xdiv: Horizontal grid divisions in wellbore.
:type wellbore_xdiv: int
:param pipe_width: Steel pipe width.
:type pipe_width: fl64
:param pipe_xdiv: Horizontal grid divisions in steel pipe.
:type pipe_xdiv: int
:param casing_width: Casing width.
:type casing_width: fl64
:param casing_xdiv: Horizontal grid divisions in casing.
:type casing_xdiv: int
:param reservoir_xdiv: Horizontal grid divisions in reservoir.
:type reservoir_xdiv: int
:param zdiv: Number of vertical divisions for the grid (not including the feedzone).
:type zdiv: lst[int]
:param zprop: Proportioning of vertical dimension for gridding.
:type zprop: lst[fl64]
:param injection_temperature: Temperature of fluid injected at the wellhead.
:type injection_temperature: fl64
:param injection_flow_rate: Flow rate of fluid injected at the wellhead.
:type injection_flow_rate: fl64
:param initial_temperature: Specifies initial temperature conditions in the reservoir. If positive, initial temperature is interpreted as isotropic. If negative, initial temperature is interpreted as a vertical gradient. If a string, initial temperature corresponds to a text file containing a temperature-depth profile and applies this as the initial reservoir temperatures.
:type initial_temperature: fl64, str
:param simulation_time: Length of simulation in days.
:type simulation_time: fl64
:param inputfilename: Name of input file.
:type inputfilename: str
:param gridfilename: Name of grid file.
:type gridfilename: str
:param pipe_density: Density of steel pipe.
:type pipe_density: fl64
:param pipe_specific_heat: Specific heat of steel pipe.
:type pipe_specific_heat: fl64
:param casing_density: Density of casing.
:type casing_density: fl64
:param casing_specific_heat: Specific heat of casing.
:type casing_specific_heat: fl64
:param reservoir_density: Density of reservoir.
:type reservoir_density: fl64
:param reservoir_specific_heat: Specific heat of reservoir.
:type reservoir_specific_heat: fl64
:param reservoir_permeability: Reservoir permeability, specified as either isotropic k0 or anisotropic [kx,ky,kz].
:type reservoir_permeability: fl64, lst[fl64]
:param wellbore_permeability: Wellbore permeability, surrogate representing rapid transport down well. Set to high value.
:type wellbore_permeability: fl64, lst[fl64]
'''
def __init__(self,
xL, zL, wellbore_radius, wellbore_xdiv, pipe_width, pipe_xdiv, casing_width, casing_xdiv,
reservoir_xdiv, zdiv, zprop,
injection_temperature, injection_flow_rate,
initial_temperature,
simulation_time,
inputfilename='', gridfilename='', work_dir = None,
pipe_density = 2500., pipe_specific_heat=1.e3, pipe_conductivity = 2.,
casing_density = 2500., casing_specific_heat=1.e3, casing_conductivity = 0.5,
reservoir_density = 2500., reservoir_specific_heat=1.e3, reservoir_conductivity = 1., reservoir_porosity = 0.1, reservoir_permeability = 1.e-15,
wellbore_permeability=1.e-4,
surface_pressure=0.1, surface_temperature=25.
):
# 1. inherit properties and initialise
super(wellbore_model,self).__init__(filename='',gridfilename='',inconfilename='',sticky_zones=dflt.sticky_zones,associate=dflt.associate,work_dir = None,
full_connectivity=dflt.full_connectivity,skip=[],keep_unknown=dflt.keep_unknown)
self.nobr = True
self._injection_temperature = injection_temperature
self._injection_flow_rate = injection_flow_rate
self._initial_temperature = initial_temperature
self._simulation_time = simulation_time
self._pipe_density = pipe_density
self._casing_density = casing_density
self._reservoir_density = reservoir_density
self._pipe_specific_heat = pipe_specific_heat
self._casing_specific_heat = casing_specific_heat
self._reservoir_specific_heat = reservoir_specific_heat
self._pipe_conductivity = pipe_conductivity
self._casing_conductivity = casing_conductivity
self._reservoir_conductivity = reservoir_conductivity
self._reservoir_porosity = reservoir_porosity
self._reservoir_permeability = reservoir_permeability
self._wellbore_permeability = wellbore_permeability
self._surface_pressure = surface_pressure
self._surface_temperature = surface_temperature
self._remember_inittemp = None
if inputfilename: self._filename = inputfilename
if gridfilename: self.grid._path.filename = gridfilename
self.work_dir = work_dir
# 2. create a grid
if xL == 0 or zL == 0: print('Error: grid dimensions must be non-zero'); return
if wellbore_radius+casing_width+pipe_width>xL: print('Error: No room for reservoir rock.'); return
#if (abs(feedzone_depth)+feedzone_width/2)>abs(zL): print 'Error: model not deep enough to include feedzone.'; return
if gridfilename: meshname = gridfilename
elif inputfilename: meshname = inputfilename.split('.')[0]+'grid'
else: meshname = 'wellbore.grid'
zL = abs(zL); xL = abs(xL)
reservoir_width = xL - wellbore_radius - pipe_width - casing_width
x = (list(np.linspace(0,wellbore_radius,wellbore_xdiv))+
list(np.linspace(wellbore_radius,wellbore_radius+pipe_width,pipe_xdiv))[1:]+
list(np.linspace(wellbore_radius+pipe_width,wellbore_radius+pipe_width+casing_width,casing_xdiv))[1:]+
list(np.linspace(wellbore_radius+pipe_width+casing_width,xL,reservoir_xdiv))[1:])
z = np.linspace(-zL,0,zdiv)
self.grid.make(meshname,x=x,y=z,z=[0.],radial=True)
# 3. create zones
x0,x1 = self.grid.xmin,self.grid.xmax
z0,z1 = self.grid.ymin,self.grid.ymax
wb = fzone(index=1,name='wellbore')
wb.rect([x0-0.01,z0-0.01],[x0-0.01+wellbore_radius,z1+0.01])
self.add(wb)
wb = fzone(index=2,name='wellhead')
wb.rect([x0-0.01,z1-0.01],[x0-0.01+wellbore_radius,z1+0.01])
self.add(wb)
wb = fzone(index=3,name='wellbase')
wb.rect([x0-0.01,z0-0.01],[x0-0.01+wellbore_radius,z0+0.01])
self.add(wb)
pp = fzone(index=10,name='pipe_upper')
pp.rect([x0-0.01+wellbore_radius,z0-0.01],[x0-0.01+wellbore_radius+pipe_width,z1+0.01])
self.add(pp)
cs = fzone(index=20,name='casing_upper')
cs.rect([x0-0.01+wellbore_radius+pipe_width,z0-0.01],
[x0-0.01+wellbore_radius+pipe_width+casing_width,z1+0.01])
self.add(cs)
rk = fzone(index=100,name='reservoir')
rk.rect([x0-0.01+wellbore_radius+pipe_width+casing_width,z0-0.01],[x1+0.01,z1+0.01])
self.add(rk)
sz = fzone(index=900, name = 'surface')
sz.rect([x0-0.01+wellbore_radius,z1-0.01],[x1+0.01,z1+0.01])
self.add(sz)
ff = fzone(index=901, name = 'farfield')
ff.rect([x1-0.01,z0-0.01],[x1+0.01,z1+0.01])
self.add(ff)
# 4. assign material properties
# - conductivity
self.add(fmacro('cond',zone=0,param=(('cond_x',1),('cond_y',1),('cond_z',1))))
self.add(fmacro('cond',zone=10,param=(('cond_x',self.pipe_conductivity),('cond_y',self.pipe_conductivity),('cond_z',self.pipe_conductivity))))
self.add(fmacro('cond',zone=20,param=(('cond_x',self.casing_conductivity),('cond_y',self.casing_conductivity),('cond_z',self.casing_conductivity))))
self.add(fmacro('cond',zone=100,param=(('cond_x',self.reservoir_conductivity),('cond_y',self.reservoir_conductivity),('cond_z',self.reservoir_conductivity))))
# - rock
self.add(fmacro('rock',zone=10,param=(('density',self.pipe_density),('porosity',0),('specific_heat',self.pipe_specific_heat))))
self.add(fmacro('rock',zone=20,param=(('density',self.casing_density),('porosity',0),('specific_heat',self.casing_specific_heat))))
self.add(fmacro('rock',zone=100,param=(('density',self.reservoir_density),('porosity',self.reservoir_porosity),('specific_heat',self.reservoir_specific_heat))))
self.add(fmacro('rock',zone=1,param=(('density',2500.),('porosity',1.),('specific_heat',1.e3))))
# - permeability
self.add(fmacro('perm',zone=0,param=(('kx',1.e-15),('ky',1.e-15),('kz',1.e-15))))
self.add(fmacro('perm',zone=10,param=(('kx',1.e-20),('ky',1.e-20),('kz',1.e-20))))
self.add(fmacro('perm',zone=20,param=(('kx',1.e-20),('ky',1.e-20),('kz',1.e-20))))
reservoirPerm = fmacro('perm',zone=100)
if (isinstance(reservoir_permeability,list) or isinstance(reservoir_permeability,tuple) or
isinstance(reservoir_permeability,np.ndarray)):
reservoirPerm.param['kx']=reservoir_permeability[0]
reservoirPerm.param['ky']=reservoir_permeability[1]
if len(reservoir_permeability) == 2:
reservoirPerm.param['kz']=reservoir_permeability[1]
elif len(reservoir_permeability) == 3:
reservoirPerm.param['kz']=reservoir_permeability[2]
else:
reservoirPerm.param['kx']=reservoir_permeability
reservoirPerm.param['ky']=reservoir_permeability
reservoirPerm.param['kz']=reservoir_permeability
self.add(reservoirPerm)
wellborePerm = fmacro('perm',zone=1)
wellborePerm.param['kx']=wellbore_permeability
wellborePerm.param['ky']=wellbore_permeability
wellborePerm.param['kz']=wellbore_permeability
self.add(wellborePerm)
# 5. assign injection - use boun and distributed flow source
self.add(fboun(zone=[self.zone['wellhead']],type='ti',times=[0,self.simulation_time],
variable=[['dsw',-self.injection_flow_rate,-self.injection_flow_rate],
['t',self.injection_temperature,self.injection_temperature]]))
# 6. assign initial conditions
self.add(fmacro('grad',zone=0,param=(('reference_coord',z1),('direction',2),('variable',1),
('reference_value',0.1),('gradient',-0.00981))))
self._set_reservoir_temperature()
# 7. assign boundary conditions
self.add(fmacro('flow',zone=900,param=(('rate',self.surface_pressure),('energy',-self.surface_temperature),('impedance',100))))
self.add(fmacro('flow',zone=901,param=(('rate',0),('energy',-self.surface_temperature),('impedance',-100))))
self.add(fmacro('flow',zone=3,param=(('rate',0),('energy',-self.surface_temperature),('impedance',-100))))
# 8. assign simulation parameters
self.ctrl['geometry_ICNL'] = 4
self.ctrl['gravity_direction_AGRAV'] = 2
self.ctrl['stor_file_LDA'] = 0
self.ctrl['min_timestep_DAYMIN'] = 1.e-8
self.ctrl['max_timestep_DAYMAX'] = self.simulation_time/10
self.time['initial_timestep_DAY'] = self.simulation_time/1.e2
self.time['max_time_TIMS'] = self.simulation_time
self.time['max_timestep_NSTEP'] = 2000
# 9. assign history and contour output
self.hist.variables.append(['pressure','temperature','flow'])
self.hist.timestep_interval=1
self.hist.format='tec'
self.hist.time_interval=1.e20
self.hist.nodelist = self.zone['wellbore'].nodelist
self.cont.variables.append(['temperature','pressure','xyz','liquid'])
def _set_reservoir_temperature(self):
if isinstance(self._initial_temperature,str):
if not os.path.isfile(self._initial_temperature): print('ERROR: '+self._initial_temperature+' does not exist.'); return
tempfile = open(self._initial_temperature,'r')
lns = tempfile.readlines()
commaFlag = False; spaceFlag = False
if len(lns[0].split(',')) > 1: commaFlag = True
elif len(lns[0].split()) > 1: spaceFlag = True
if not commaFlag and not spaceFlag: print('ERROR: incorrect formatting for '+self._initial_temperature+'. Expect first column depth (m) and second column temperature (degC), either comma or space separated.'); return
zs = []; Ts = []
for ln in lns:
if commaFlag: ln = ln.split(',')
elif spaceFlag: ln = ln.split()
zs.append(float(ln[0])); Ts.append(float(ln[1]))
allNeg = True #flag that info read in has positive z-coords (make negative)
for z in zs:
if z>0: allNeg = False; break
if allNeg: zs = [-z for z in zs]
zm = np.unique([nd.position[1] for nd in self.grid.nodelist])
zn_ind = 0
x0,x1 = self.grid.xmin,self.grid.xmax
y0,y1 = self.grid.ymin,self.grid.ymax
for z in zm:
zn=fzone(index=200+zn_ind);zn.rect([x0-0.01,z-0.01],[x1+0.01,z+0.01]);self.add(zn)
T = np.interp(-z,zs,Ts)
self.add(fmacro('pres',zone=200+zn_ind,param=(('pressure',5),('temperature',T),('saturation',1))))
zn_ind+=1
else:
if self._initial_temperature > 0:
temp = fmacro('pres',zone=0,param=(('pressure',1),('temperature',self._initial_temperature),('saturation',1)))
elif self._initial_temperature < 0:
temp = fmacro('grad',zone=0,param=(('reference_coord',self.grid.zmax),('direction',2),('variable',2),
('reference_value',self.surface_temperature),('gradient',self._initial_temperature)))
self.add(temp)
self._remember_inittemp = temp
def _clear_reservoir_temperature(self):
if isinstance(self._remember_inittemp,fmacro):
self.delete(self._remember_inittemp)
self._remember_inittemp = None
def plot(self,temperature_lims = [],pdf = '',combineString = 'gswin64',
Tslice = True, Tslice_xlims=[],Tslice_ylims=[], Tslice_divisions=[100,100], Tslice_method = 'nearest',
Pslice = True, Pslice_xlims=[],Pslice_ylims=[], Pslice_divisions=[100,100], Pslice_method = 'nearest',
Ttime = True, Ttime_xlims=[], Ttime_ylims=[],
Twell = True, Twell_times = [], Twell_xlims=[], Twell_ylims = [], Twell_initial=True, Twell_profiles=None,
Twell_output = False,
Pcorrection = True, Pcorrection_xlims=[], Pcorrection_ylims=[],
imperial_units = False,
write_out = False
):
'''Generate plots of wellbore simulation.
:param temperature_lims: Limits on temperature axis of temperature vs. time plot.
:type temperature_lims: lst[fl64,fl64]
:param pdf: Name of pdf file to combine all output plots. If not specified, pdf will not be created.
:type pdf: str
:param combineString: Name of ghostscript executable.
:type combineString: str
'''
# read in contour data
if self.cont.format == 'surf':
if self.work_dir:
cont = fcontour(self.work_dir+os.sep+self.files.root+'.*_days_sca_node.csv',latest=True)
else:
cont = fcontour(self.files.root+'.*_days_sca_node.csv',latest=True)
# read in history data
if self.hist.format == 'tec':
if self.work_dir:
hist = fhistory(self.work_dir+os.sep+self.files.root+'_*_his.dat')
else:
hist = fhistory(self.files.root+'_*_his.dat')
if pdf:
if self.work_dir: pdf = self.work_dir+os.sep+pdf
ext = 'eps'
mp = multi_pdf(save=pdf)
mp.combineString = combineString
else: ext = 'png'
Tmax = np.max(cont[cont.times[0]]['T'])
# 1. slice plot of temperature
if Tslice:
xlims = [self.grid.xmin,self.grid.xmax]
ylims = [self.grid.ymin,self.grid.ymax]
eqa = True
if Tslice_xlims: xlims = Tslice_xlims; eqa = False
if Tslice_ylims: ylims = Tslice_ylims; eqa = False
scale = 1.
title = 'temperature / $^o$C'
if imperial_units: scale = [9/5.,+32.]; title = 'temperature / $^o$F'
cont.slice_plot(variable = 'T',save=self.files.root+'_temperature.'+ext,cbar = True,
ylabel='z / m',title = title,xlims = xlims,
ylims = ylims,divisions=Tslice_divisions,method=Tslice_method,
equal_axes = eqa,scale = scale)
if pdf: mp.add(self.files.root+'_temperature.eps')
# 2. slice plot of pressure
if Pslice:
xlims = [self.grid.xmin,self.grid.xmax]
ylims = [self.grid.ymin,self.grid.ymax]
eqa = True
if Pslice_xlims: xlims = Pslice_xlims; eqa = False
if Pslice_ylims: ylims = Pslice_ylims; eqa = False
scale = 1.
title = 'pressure / MPa'
if imperial_units: scale = 145.05; title = 'pressure / psi'
cont.slice_plot(variable = 'P',save=self.files.root+'_pressure.'+ext,cbar = True,
ylabel='z / m',title = title,xlims = xlims,
ylims = ylims,divisions=Pslice_divisions,method=Pslice_method,
equal_axes = eqa,scale=scale)
if pdf: mp.add(self.files.root+'_pressure.eps')
# 3. time series plot of bottom hole temperature
if Ttime:
xlims = [hist.times[0],hist.times[-1]]
if Ttime_xlims: xlims = Ttime_xlims
nd = self.grid.node_nearest_point([self.grid.xmin,self.grid.ymin,0])
Tnd = hist['T'][nd.index]
y0,y1 = np.min(Tnd),np.max(Tnd); ymid = (y0+y1)/2; yrange = y1-y0
ylims = [ymid-0.55*yrange,ymid+0.55*yrange]
if Ttime_ylims: ylims = Ttime_ylims
scale = 1.
ylabel = 'temperature / $^o$C'
if imperial_units:
scale = [9/5.,+32.]
ylabel = 'temperature / $^o$F'
ylims = list(scale[0]*np.array(ylims)+scale[1])
hist.time_plot(node = nd.index,save=self.files.root+'_temperatureWB.'+ext,
variable='T',xlabel='t / days', ylabel=ylabel,title='temperature at well bore',
var_lim=ylims,t_lim=xlims,scale = scale)
mp.add(self.files.root+'_temperatureWB.'+ext)
# write out text
if write_out:
if self.work_dir:
of = open(self.work_dir+os.sep+self.files.root+'_DH_temp.dat','w')
else:
of = open(self.files.root+'_DH_temp.dat','w')
t = hist.times
T = hist['T'][nd.index]
for ti,Ti in zip(t,T):
of.write(str(ti)+','+str(Ti)+'\n')
of.close()
# 4. down well plot of temperature, multiple times
if Twell:
xlabel = 'temperature / $^o$C'
if imperial_units:
xlabel = 'temperature / $^o$F'
cols = ['k-','r-','b-','k--','r--','b--','k:','r:','b:']
if len(Twell_times)>9: Twell_times = Twell_times[:9]
# get times
if not Twell_times: ts = [hist.times[-1]]
else:
ts = []
for t in Twell_times:
dt = abs(np.array(hist.times) - t)
ts.append(hist.times[np.where(dt == np.min(dt))[0][0]])
# plot all profiles
plt.figure(figsize=[8,8])
ax = plt.axes([0.15,0.15,0.75,0.75])
ax.set_title('downhole temperature profiles',size='medium')
x0 = self.grid.xmin
allNd = [nd for nd in self.hist.nodelist if nd.position[0] == x0]
allNd.sort(key=lambda x: x.position[1])
for t,col in zip(ts,cols[:len(ts)]):
tind = np.where(hist.times==t)[0][0]
if imperial_units:
T = [hist['T'][nd.index][tind]*9/5.+32. for nd in allNd]
else:
T = [hist['T'][nd.index][tind] for nd in allNd]
z = [nd.position[1] for nd in allNd]
ax.plot(T,z,col)
if Twell_output:
filename = self.files.root+'_T_'+str(hist.times[tind])+'days'
if imperial_units: filename+='_imperial'
filename+='.txt'
lns = []
for zi,Ti in zip(z,T): lns.append(str(zi)+','+str(Ti)+'\n')
file = open(filename,'w'); file.writelines(lns); file.close()
if Twell_initial:
if imperial_units:
T = [hist['T'][nd.index][0]*9/5.+32. for nd in allNd]
else:
T = [hist['T'][nd.index][0] for nd in allNd]
z = [nd.position[1] for nd in allNd]
ax.plot(T,z,'g-')
if Twell_output:
filename = self.files.root+'_T_0days'
if imperial_units: filename+='_imperial'
filename+='.txt'
lns = []
for zi,Ti in zip(z,T): lns.append(str(zi)+','+str(Ti)+'\n')
file = open(filename,'w'); file.writelines(lns); file.close()
if Twell_xlims: ax.set_xlim(Twell_xlims)
if Twell_ylims: ax.set_ylim(Twell_ylims)
ax.set_xlabel(xlabel)
ax.set_ylabel('z / m')
xlims = ax.get_xlim()
ylims = ax.get_ylim()
if Twell_profiles:
if isinstance(Twell_profiles,str): Twell_profiles = [Twell_profiles]
ylims = ax.get_ylim()
for profile,col in zip(Twell_profiles,cols[:len(Twell_profiles)]):
if not os.path.isfile(profile): print('WARNING: '+profile+' not found'); continue
dat = np.loadtxt(profile)
zs = dat[:,0]; Ts = dat[:,1]
allNeg = True #flag that info read in has positive z-coords (make negative)
for z in zs:
if z>0: allNeg = False; break
if allNeg: zs = [-z for z in zs]
N = int(abs(zs[-1]-zs[0])/abs(ylims[0]-ylims[1])*24)+3
z = np.linspace(np.min(zs),np.max(zs),N)
Ts = np.interp(z,zs,Ts)
ax.plot(Ts,-1*np.array(z),col+'x')
# set legend for flow rates
text_size = 'small'
xc,yc = 0.7,0.94
ln_len = 0.06
txt_gap = 0.04
dy = 0.04;
xr = xlims[1]-xlims[0]
yr = ylims[1]-ylims[0]
dyi = -dy*(ylims[1]-ylims[0])
x1 = xlims[0] +xc*xr
y1 = ylims[0] +yc*yr
import math
if Twell_initial:
ax.plot([x1,x1+ln_len*xr],[y1,y1],'g-')
ax.text(x1+(ln_len+txt_gap)*xr,y1,'t = 0 days',size=text_size,ha='left',va='center')
y1 = y1 + dyi
cnt = 0
for t,col in zip(ts,cols[:len(ts)]):
ax.plot([x1,x1+ln_len*xr],[y1,y1],col)
if isinstance(t,list): t = t[0]
if t>=1 and t<10: tstr = str(round(t*10)/10)
elif t>=10: tstr = str(int(t))
else: tstr = str(t)
tstr
ax.text(x1+(ln_len+txt_gap)*xr,y1,'t = '+tstr+' days',size=text_size,ha='left',va='center')
y1 = y1 + dyi
if Twell_profiles:
if cnt == len(Twell_profiles): continue
ax.plot([x1,x1+ln_len*xr],[y1,y1],col)
ax.plot((2*x1+ln_len*xr)/2,y1,col+'x')
if isinstance(t,list): t = t[0]
if t>=1 and t<10: tstr = str(round(t*10)/10)
elif t>=10: tstr = str(int(t))
else: tstr = str(t)
tstr
ax.text(x1+(ln_len+txt_gap)*xr,y1,Twell_profiles[cnt].split('.')[0],size=text_size,ha='left',va='center')
y1 = y1 + dyi
cnt +=1
ax.set_xlim(xlims)
ax.set_ylim(ylims)
plt.savefig(self.files.root+'_Twell.'+ext, dpi=100, facecolor='w', edgecolor='w',orientation='portrait',
format=ext,transparent=True, bbox_inches=None, pad_inches=0.1)
if pdf: mp.add(self.files.root+'_Twell.'+ext)
# 5. plot pressure correction at base of hole
if Pcorrection:
x0 = self.grid.xmin
allNd = [nd for nd in self.hist.nodelist if nd.position[0] == x0]
allNd.sort(key=lambda x: x.position[1])
T = [hist['T'][nd.index][0] for nd in allNd] # temperature profile
z = [nd.position[1] for nd in allNd]
P = [abs(nd.position[1])*1e3*9.81/1e6 for nd in allNd]
rho = dens(P,T)[0]
dP0 = abs(np.trapz(rho,z))*9.81/1e6
dP=[]
for t in hist.times:
tind = np.where(hist.times==t)[0][0]
T = [hist['T'][nd.index][tind] for nd in allNd] # temperature profile
rho = dens(P,T)[0]
dP.append(abs(np.trapz(rho,z))*9.81/1e6 - dP0)
plt.figure(figsize=[8,8])
plt.clf()
ax = plt.axes([0.15,0.15,0.75,0.75])
ax.set_title('downhole density pressure correction',size='medium')
scale = 1.
if imperial_units: scale = 145.05
ax.plot(hist.times,np.array(dP)*scale,'bx-')
# write out text
if write_out:
if self.work_dir:
of = open(self.work_dir+os.sep+self.files.root+'_DH_dens_corr.dat','w')
of2 = open(self.work_dir+os.sep+self.files.root+'_DH_pres.dat','w')
else:
of = open(self.files.root+'_DH_dens_corr.dat','w')
of2 = open(self.files.root+'_DH_pres.dat','w')
t = hist.times
for ti,dp in zip(t,dP):
of.write(str(ti)+','+str(dp)+'\n')
of2.write(str(ti)+','+str(dp+dP0)+'\n')
of.close()
of2.close()
ax.set_xlabel('time / days')
if imperial_units:
ax.set_ylabel('pressure correction / psi')
else:
ax.set_ylabel('pressure correction / MPa')
ax.set_xlim([hist.times[0],hist.times[-1]])
if Pcorrection_xlims: ax.set_xlim(Pcorrection_xlims)
if Pcorrection_ylims: ax.set_ylim(Pcorrection_ylims)
plt.savefig(self.files.root+'_Pcorrection.'+ext, dpi=100, facecolor='w', edgecolor='w',orientation='portrait',
format=ext,transparent=True, bbox_inches=None, pad_inches=0.1)
if pdf: mp.add(self.files.root+'_Pcorrection.'+ext)
if pdf: mp.make()
# -------------------------------------- ATTRIBUTES ------------------------------------------------
def _get_pipe_density(self): return self._pipe_density
def _set_pipe_density(self,value):
self._pipe_density = value
self.rock['pipe_upper'].param['density'] = value
pipe_density = property(_get_pipe_density, _set_pipe_density) #: (*fl64*) Density of pipe.
def _get_pipe_specific_heat(self): return self._pipe_specific_heat
def _set_pipe_specific_heat(self,value):
self._pipe_specific_heat = value
self.rock['pipe_upper'].param['specific_heat'] = value
pipe_specific_heat = property(_get_pipe_specific_heat, _set_pipe_specific_heat) #: (*fl64*) Specific heat of pipe.
def _get_casing_density(self): return self._casing_density
def _set_casing_density(self,value):
self._casing_density = value
self.rock['casing_upper'].param['density'] = value
casing_density = property(_get_casing_density, _set_casing_density) #: (*fl64*) Density of casing.
def _get_casing_specific_heat(self): return self._casing_specific_heat
def _set_casing_specific_heat(self,value):
self._casing_specific_heat = value
self.rock['casing_upper'].param['specific_heat'] = value
casing_specific_heat = property(_get_casing_specific_heat, _set_casing_specific_heat) #: (*fl64*) Specific heat of casing
def _get_reservoir_density(self): return self._reservoir_density
def _set_reservoir_density(self,value):
self._reservoir_density = value
self.rock['reservoir'].param['density'] = value
reservoir_density = property(_get_reservoir_density, _set_reservoir_density) #: (*fl64*) Density of reservoir.
def _get_reservoir_specific_heat(self): return self._reservoir_specific_heat
def _set_reservoir_specific_heat(self,value):
self._reservoir_specific_heat = value
self.rock['reservoir'].param['specific_heat'] = value
reservoir_specific_heat = property(_get_reservoir_specific_heat, _set_reservoir_specific_heat) #: (*fl64*) Specific heat of reservoir
def _get_reservoir_porosity(self): return self._reservoir_porosity
def _set_reservoir_porosity(self,value):
self._reservoir_porosity = value
self.rock['reservoir'].param['porosity'] = value
reservoir_porosity = property(_get_reservoir_porosity, _set_reservoir_porosity) #: (*fl64*) Porosity of reservoir.
def _get_reservoir_permeability(self): return self._reservoir_permeability
def _set_reservoir_permeability(self,value):
self._reservoir_permeability = value
if (isinstance(value,list) or isinstance(value,tuple) or
isinstance(value,np.ndarray)):
self.perm['reservoir'].param['kx']=value[0]
self.perm['reservoir'].param['ky']=value[1]
if len(value) == 2:
self.perm['reservoir'].param['kz']=value[1]
elif len(value) == 3:
self.perm['reservoir'].param['kz']=value[2]
else:
self.perm['reservoir'].param['kx']=value
self.perm['reservoir'].param['ky']=value
self.perm['reservoir'].param['kz']=value
reservoir_permeability = property(_get_reservoir_permeability, _set_reservoir_permeability) #: (*fl64*) Permeability of reservoir. If three element list, tuple or ndarray is passed, this is assumed to correspond to [kx,ky,kz].
def _get_wellbore_permeability(self): return self._wellbore_permeability
def _set_wellbore_permeability(self,value):
self._wellbore_permeability = value
self.perm['wellbore'].param['kx'] = value
self.perm['wellbore'].param['ky'] = value
self.perm['wellbore'].param['kz'] = value
wellbore_permeability = property(_get_wellbore_permeability, _set_wellbore_permeability) #: (*fl64*) Permeability in the wellbore. This should be set to a high number, representing free-flowing water.
def _get_pipe_conductivity(self): return self._pipe_conductivity
def _set_pipe_conductivity(self,value):
self._pipe_conductivity = value
self.cond['pipe_upper'].param['cond_x'] = value
self.cond['pipe_upper'].param['cond_y'] = value
self.cond['pipe_upper'].param['cond_z'] = value
pipe_conductivity = property(_get_pipe_conductivity, _set_pipe_conductivity) #: (*fl64*) Thermal conductivity of the steel pipe.
def _get_casing_conductivity(self): return self._casing_conductivity
def _set_casing_conductivity(self,value):
self._casing_conductivity = value
self.cond['casing_upper'].param['cond_x'] = value
self.cond['casing_upper'].param['cond_y'] = value
self.cond['casing_upper'].param['cond_z'] = value
casing_conductivity = property(_get_casing_conductivity, _set_casing_conductivity) #: (*fl64*) Thermal conductivity of the casing.
def _get_reservoir_conductivity(self): return self._reservoir_conductivity
def _set_reservoir_conductivity(self,value):
self._reservoir_conductivity = value
self.cond['reservoir'].param['cond_x'] = value
self.cond['reservoir'].param['cond_y'] = value
self.cond['reservoir'].param['cond_z'] = value
reservoir_conductivity = property(_get_reservoir_conductivity, _set_reservoir_conductivity) #: (*fl64*) Thermal conductivity of the reservoir.
def _get_surface_pressure(self): return self._surface_pressure
def _set_surface_pressure(self,value):
self._surface_pressure = value
self.bounlist[0].variables = [['dsw',self.injection_flow_rate,self.injection_flow_rate],
['t',self.injection_temperature,self.injection_temperature],
['pw',value,value]]
surface_pressure = property(_get_surface_pressure, _set_surface_pressure) #: (**) Pressure at top surface of model, default is atmospheric.
def _get_surface_temperature(self): return self._surface_temperature
def _set_surface_temperature(self,value):
self._surface_temperature = value
self.flow['surface'].param['temperature'] = value
surface_temperature = property(_get_surface_temperature, _set_surface_temperature) #: (**) Temperature at top surface of model, default is 25degC.
def _get_injection_temperature(self): return self._injection_temperature
def _set_injection_temperature(self,value):
self._injection_temperature = value
self.bounlist[0].variables = [['dsw',self.injection_flow_rate,self.injection_flow_rate],['t',value,value],
['pw',self.surface_pressure,self.surface_pressure]]
injection_temperature = property(_get_injection_temperature, _set_injection_temperature) #: (*fl64*) Temperature of the fluid injected at the wellhead.
def _get_injection_flow_rate(self): return self._injection_flow_rate
def _set_injection_flow_rate(self,value):
self._injection_flow_rate = value
self.bounlist[0].variables = [['dsw',value,value],['t',self.injection_temperature,self.injection_temperature],
['pw',self.surface_pressure,self.surface_pressure]]
injection_flow_rate = property(_get_injection_flow_rate, _set_injection_flow_rate) #: (*fl64*) Flow rate of fluid injected at the wellhead.
def _get_simulation_time(self): return self._simulation_time
def _set_simulation_time(self,value):
self._simulation_time = value
self.bounlist[0].times = [0.,value]
self.time['initial_timestep_DAY'] = value/1.e2
self.time['max_time_TIMS'] = value
simulation_time = property(_get_simulation_time, _set_simulation_time) #: (*fl64*) Length of simulation in days
def _get_initial_temperature(self): return self._initial_temperature
def _set_initial_temperature(self,value):
self._initial_temperature = value
self._clear_reservoir_temperature()
self._set_reservoir_temperature()
initial_temperature = property(_get_initial_temperature, _set_initial_temperature) #: (*fl64*, *str*) Specifies initial temperature conditions in the reservoir. If positive, initial temperature is interpreted as isotropic. If negative, initial temperature is interpreted as a vertical gradient. If a string, initial temperature corresponds to a text file containing a temperature-depth profile and applies this as the initial reservoir temperatures.
| lgpl-2.1 |
zhekan/MDrun | src/plotPolinomWindowFit.py | 1 | 1304 | import numpy as np
import matplotlib.pyplot as plt
def plotPolinom2Window(inX,inY,idelta, delta, dmin, dfit, style):
""" plotPolinomWindow -- build fit
inX np.array() for x label
inY np.array() for y label
idelta width min-max index window
delta width min-max value window
dmin minimum value
dfit width min-max fit window
style style line plot
idelta = 30
delta = 150
dmin = 200
dfit = 50
"""
i = 0
inew = 0
#for inY[iend] in inY:
while i+idelta < len(inY):
iend = i+idelta
d = (max(inY[i:iend])-min(inY[i:iend]))
m = (max(inY[i:iend]))
localmin = min(inY[i:iend])
im = max(enumerate(inY[i:iend]),key=lambda x: x[1])[0] + i
if d > delta and m > dmin or localmin < 0:
imax = im
#print ('%s, %s' %(imax, inY[imax]))
x = inX[inew:imax]
y = inY[inew:imax]
p = np.polyfit(x,y,2)
f = np.polyval(p,x)
xd = imax - inew
if max(f) > dmin and xd > dfit:
if p[0] < 0 and xd > 2*dfit and p[1] > 0 and p[2] < 0 and f[-1] >= max(f):
plt.plot(x,f,style, label=' %.2f'%(max(f)))
elif p[0] > 0:
p = np.polyfit(x,y,1)
f = np.polyval(p,x)
if p[0] > 0:
plt.plot(x,f,style, label=' %.2f'%(max(f)))
inew = imax + 50
i = im+idelta | agpl-3.0 |
MohammedWasim/scikit-learn | setup.py | 76 | 9370 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
jreback/pandas | pandas/tests/arrays/floating/test_arithmetic.py | 3 | 5867 | import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
# Basic test for the arithmetic array ops
# -----------------------------------------------------------------------------
@pytest.mark.parametrize(
"opname, exp",
[
("add", [1.1, 2.2, None, None, 5.5]),
("mul", [0.1, 0.4, None, None, 2.5]),
("sub", [0.9, 1.8, None, None, 4.5]),
("truediv", [10.0, 10.0, None, None, 10.0]),
("floordiv", [9.0, 9.0, None, None, 10.0]),
("mod", [0.1, 0.2, None, None, 0.0]),
],
ids=["add", "mul", "sub", "div", "floordiv", "mod"],
)
def test_array_op(dtype, opname, exp):
a = pd.array([1.0, 2.0, None, 4.0, 5.0], dtype=dtype)
b = pd.array([0.1, 0.2, 0.3, None, 0.5], dtype=dtype)
op = getattr(operator, opname)
result = op(a, b)
expected = pd.array(exp, dtype=dtype)
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(dtype, zero, negative):
# TODO pending NA/NaN discussion
# https://github.com/pandas-dev/pandas/issues/32265/
a = pd.array([0, 1, -1, None], dtype=dtype)
result = a / zero
expected = FloatingArray(
np.array([np.nan, np.inf, -np.inf, np.nan], dtype=dtype.numpy_dtype),
np.array([False, False, False, True]),
)
if negative:
expected *= -1
tm.assert_extension_array_equal(result, expected)
def test_pow_scalar(dtype):
a = pd.array([-1, 0, 1, None, 2], dtype=dtype)
result = a ** 0
expected = pd.array([1, 1, 1, 1, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
result = a ** 1
expected = pd.array([-1, 0, 1, None, 2], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
result = a ** pd.NA
expected = pd.array([None, None, 1, None, None], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
result = a ** np.nan
# TODO np.nan should be converted to pd.NA / missing before operation?
expected = FloatingArray(
np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype=dtype.numpy_dtype),
mask=a._mask,
)
tm.assert_extension_array_equal(result, expected)
# reversed
a = a[1:] # Can't raise integers to negative powers.
result = 0 ** a
expected = pd.array([1, 0, None, 0], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
result = 1 ** a
expected = pd.array([1, 1, 1, 1], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
result = pd.NA ** a
expected = pd.array([1, None, None, None], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
result = np.nan ** a
expected = FloatingArray(
np.array([1, np.nan, np.nan, np.nan], dtype=dtype.numpy_dtype), mask=a._mask
)
tm.assert_extension_array_equal(result, expected)
def test_pow_array(dtype):
a = pd.array([0, 0, 0, 1, 1, 1, None, None, None], dtype=dtype)
b = pd.array([0, 1, None, 0, 1, None, 0, 1, None], dtype=dtype)
result = a ** b
expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None], dtype=dtype)
tm.assert_extension_array_equal(result, expected)
def test_rpow_one_to_na():
# https://github.com/pandas-dev/pandas/issues/22022
# https://github.com/pandas-dev/pandas/issues/29997
arr = pd.array([np.nan, np.nan], dtype="Float64")
result = np.array([1.0, 2.0]) ** arr
expected = pd.array([1.0, np.nan], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(other):
arr = pd.array([1, None, 2], dtype="Float64")
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
# Test generic characteristics / errors
# -----------------------------------------------------------------------------
def test_error_invalid_values(data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?FloatingArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# Various
# -----------------------------------------------------------------------------
def test_cross_type_arithmetic():
df = pd.DataFrame(
{
"A": pd.array([1, 2, np.nan], dtype="Float64"),
"B": pd.array([1, np.nan, 3], dtype="Float32"),
"C": np.array([1, 2, 3], dtype="float64"),
}
)
result = df.A + df.C
expected = pd.Series([2, 4, np.nan], dtype="Float64")
tm.assert_series_equal(result, expected)
result = (df.A + df.C) * 3 == 12
expected = pd.Series([False, True, None], dtype="boolean")
tm.assert_series_equal(result, expected)
result = df.A + df.B
expected = pd.Series([2, np.nan, np.nan], dtype="Float64")
tm.assert_series_equal(result, expected)
| bsd-3-clause |
vortex-ape/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 7 | 18302 | """Testing for the boost module (sklearn.ensemble.boost)."""
import pytest
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_greater
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_oneclass_adaboost_proba():
# Test predict_proba robustness for one class label input.
# In response to issue #7501
# https://github.com/scikit-learn/scikit-learn/issues/7501
y_t = np.ones(len(X))
clf = AdaBoostClassifier().fit(X, y_t)
assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1)))
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Check we used multiple estimators
assert_greater(len(clf.estimators_), 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in clf.estimators_)),
len(clf.estimators_))
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
reg = AdaBoostRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
assert score > 0.85
# Check we used multiple estimators
assert_true(len(reg.estimators_) > 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in reg.estimators_)),
len(reg.estimators_))
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22
@pytest.mark.filterwarnings('ignore: You should specify a value') # 0.22
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
@pytest.mark.filterwarnings('ignore:The default value of n_estimators')
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(gamma="scale"), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(gamma='scale'), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(gamma="scale"), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(gamma='scale', probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(gamma='scale', probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_almost_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(gamma='scale'),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(gamma='scale'),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_almost_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_almost_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
jreback/pandas | asv_bench/benchmarks/tslibs/tslib.py | 3 | 1562 | """
ipython analogue:
tr = TimeIntsToPydatetime()
mi = pd.MultiIndex.from_product(
tr.params[:-1] + ([str(x) for x in tr.params[-1]],)
)
df = pd.DataFrame(np.nan, index=mi, columns=["mean", "stdev"])
for box in tr.params[0]:
for size in tr.params[1]:
for tz in tr.params[2]:
tr.setup(box, size, tz)
key = (box, size, str(tz))
print(key)
val = %timeit -o tr.time_ints_to_pydatetime(box, size, tz)
df.loc[key] = (val.average, val.stdev)
"""
from datetime import timedelta, timezone
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytz
try:
from pandas._libs.tslibs import ints_to_pydatetime
except ImportError:
from pandas._libs.tslib import ints_to_pydatetime
_tzs = [
None,
timezone.utc,
timezone(timedelta(minutes=60)),
pytz.timezone("US/Pacific"),
gettz("Asia/Tokyo"),
tzlocal(),
]
_sizes = [0, 1, 100, 10 ** 4, 10 ** 6]
class TimeIntsToPydatetime:
params = (
["time", "date", "datetime", "timestamp"],
_sizes,
_tzs,
)
param_names = ["box", "size", "tz"]
# TODO: fold? freq?
def setup(self, box, size, tz):
arr = np.random.randint(0, 10, size=size, dtype="i8")
self.i8data = arr
def time_ints_to_pydatetime(self, box, size, tz):
if box == "date":
# ints_to_pydatetime does not allow non-None tz with date;
# this will mean doing some duplicate benchmarks
tz = None
ints_to_pydatetime(self.i8data, tz, box=box)
| bsd-3-clause |
jairideout/q2d2 | setup.py | 1 | 2407 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
import re
import ast
from setuptools import find_packages, setup
# version parsing from __init__ pulled from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('q2d2/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
classes = """
Development Status :: 4 - Beta
License :: OSI Approved :: BSD License
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = ('Prototype/experiments for microbiome analyses.')
with open('README.md') as f:
long_description = f.read()
setup(name='q2d2',
version=version,
license='BSD',
description=description,
long_description=long_description,
author="scikit-bio development team",
author_email="[email protected]",
maintainer="scikit-bio development team",
maintainer_email="[email protected]",
url='http://caporasolab.us',
test_suite='nose.collector',
packages=find_packages(),
scripts=['scripts/q2d2'],
package_data={'q2d2': ['q2d2/markdown/*md']},
setup_requires=['numpy >= 1.9.2'],
install_requires=[
'scikit-bio >= 0.4.0',
'IPython >= 3.2.0',
'ipymd',
'click',
'marisa-trie',
'seaborn'
],
extras_require={'test': ["nose", "pep8", "flake8",
"python-dateutil"],
'doc': ["Sphinx == 1.2.2", "sphinx-bootstrap-theme"]},
classifiers=classifiers,
)
| bsd-3-clause |
adelomana/viridis | transcriptomeAnalysis/old/marginTracker.py | 1 | 3994 | ###
### This script tracks the margin of gene expression for each gene along different conditions.
###
import sys,numpy
import scipy,scipy.stats,scipy.interpolate
import matplotlib,matplotlib.pyplot
import library
matplotlib.rcParams.update({'font.size':18,'font.family':'Arial','xtick.labelsize':14,'ytick.labelsize':14})
matplotlib.rcParams['pdf.fonttype']=42
def histogrammer(theData):
'''
This function creates a histogram.
'''
n,bins=numpy.histogram(theData,bins=int(numpy.sqrt(len(theData))))
x=[]
halfBin=(bins[1]-bins[0])/2.
for bin in bins:
center=bin+halfBin
x.append(center)
x.pop()
y=[]
y=numpy.array(n)
y=list(y/float(sum(y)))
return x,y
###
### MAIN
###
print('\nwelcome to marginTracker...\n')
# 0. user defined variables
expressionFile='/Volumes/omics4tb/alomana/projects/dtp/data/expression/tippingPoints/cufflinks/allSamples/genes.fpkm_table.v2.txt'
metaDataFile='/Volumes/omics4tb/alomana/projects/dtp/data/expression/tippingPoints/metadata/metadata.v2.tsv'
# 1. read data
print('reading data...')
# 1.1. reading metadata
metadata=library.metadataReader(metaDataFile)
# 1.2. reading expression
expression=library.expressionReader(expressionFile)
sortedGeneNames=list(expression.keys())
sortedGeneNames.sort()
# 2. analysis
print('performing analysis...')
margins={}
co2levels=[300,1000]
epochs=[0,1,2]
growths=['exp','sta']
diurnals=['AM','PM']
for co2level in co2levels:
margins[co2level]=[]
for epoch in epochs:
localMargins=[]
for growth in growths:
for diurnal in diurnals:
print(co2level,epoch,growth,diurnal)
# obtain the replicate sample IDs
sampleIDs=[]
for sampleID in metadata.keys():
if metadata[sampleID]['co2'] == co2level and metadata[sampleID]['epoch'] == epoch and metadata[sampleID]['growth'] == growth and metadata[sampleID]['diurnal'] == diurnal:
sampleIDs.append(sampleID)
# fill the margin array
for geneName in sortedGeneNames:
m=[]
for sampleID in sampleIDs:
value=expression[geneName][sampleID]
logValue=numpy.log10(value+1)
m.append(logValue)
if m != []:
margin=max(m)-min(m)
# removing low expressed transcripts
if min(m) >= 1:
localMargins.append(margin)
if len(localMargins) != 0:
print(len(localMargins))
# remove 2.5% at each side
localMargins.sort()
extreme=int(len(localMargins)*0.025)-1
trimmedLocalMargins=localMargins[extreme:-extreme]
# compute PDF
x,y=histogrammer(trimmedLocalMargins)
margins[co2level].append([x,y])
# 3. plotting figures
for i in range(len(margins[co2level])):
curve=margins[co2level][i]
matplotlib.pyplot.plot(curve[0],curve[1],'.',color=matplotlib.cm.tab10(i),alpha=0.5,mew=0)
# fit a log-normal distribution
#newF=scipy.interpolate.interp1d(curve[0],curve[1])
newF=scipy.interpolate.splrep(curve[0],curve[1], s=4.5e-5)
# compute the new fitted trajectory
x=numpy.linspace(min(curve[0]),max(curve[0]),100)
newFit = scipy.interpolate.splev(x, newF, der=0)
# plot
matplotlib.pyplot.plot(x,newFit,'-',lw=2,color=matplotlib.cm.tab10(i),label='Stage {}'.format(i+1))
# close figure
matplotlib.pyplot.xlabel('Expression margin (log$_{10}$ FPKM)')
matplotlib.pyplot.ylabel('Probability')
figureName='figure.{}.pdf'.format(co2level)
matplotlib.pyplot.legend(markerscale=1.5,framealpha=1,loc=1,fontsize=12)
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.savefig(figureName)
matplotlib.pyplot.clf()
| gpl-2.0 |
zhangfang615/Tuberculosis | simulation/cluster.py | 1 | 7268 | from __future__ import division
from random import shuffle
import random
import pandas
def get_cluster_truth(clusters_truth_file):
cluster_truth = {}
line = clusters_truth_file.readline().strip()
i = 0
while line:
patients = line.split(" ")
for patient in patients:
cluster_truth[patient]=i
i += 1
line = clusters_truth_file.readline().strip()
return cluster_truth
def get_cluster_count(cluster_set,cluster_truth):
cluster_counts = []
for cluster in cluster_set:
cluster_count = {}
for patient in cluster:
cluster = cluster_truth[patient]
if cluster in cluster_count.keys():
cluster_count[cluster] += 1
else:
cluster_count[cluster] = 1
cluster_counts.append(cluster_count)
return cluster_counts
def combination(number):
if number > 1:
return number*(number-1)/2
else:
return 0
def claculate_TPTN(cluster_numbers):
TP = 0
TN = 0
FP = 0
total = 0
for i in range(len(cluster_numbers)):
total_local = 0
for cluster in cluster_numbers[i].keys():
total += cluster_numbers[i][cluster]
total_local += cluster_numbers[i][cluster]
TP += combination(cluster_numbers[i][cluster])
tn = 0
for j in range(len(cluster_numbers)):
if i != j:
for c in cluster_numbers[j].keys():
if c != cluster:
tn += cluster_numbers[j][c]
TN += cluster_numbers[i][cluster]*tn
FP += combination(total_local)
return TP,TN,FP,total
def check_convergence(resi_index,patient_cluster,patient,global_patient_set,resi_distance_matrix):
removed = []
for patient_i in patient_cluster:
if patient_i != patient:
i = resi_index.index(patient_i)
minimun = min(resi_distance_matrix[i])
smallest = resi_distance_matrix[i].index(minimun)
s = resi_index[smallest]
if s not in patient_cluster:
removed.append(patient_i)
for patient in removed:
patient_cluster.remove(patient)
global_patient_set.remove(patient)
# print str(patient) + " removed"
def load_diatance_matrix(distance_file_route):
resi_distance_matrix_file = file(distance_file_route)
line = resi_distance_matrix_file.readline().strip()
resi_index = line.split("\t")[1:]
size = len(resi_index)
resi_distance_matrix = []
line = resi_distance_matrix_file.readline().strip()
while line:
distance = []
distances = line.split("\t")[1:]
for dis in distances:
distance.append(int(dis))
resi_distance_matrix.append(distance)
line = resi_distance_matrix_file.readline().strip()
resi_distance_matrix_file.close()
return resi_distance_matrix,size, resi_index
def clustering(resi_distance_matrix, size, resi_index,distance_threshold,P,R,F,cluster_truth,clusters_result_file):
seed = random.randint(1,200)
random.seed = seed
cluster_set = []
global_patient_set = set()
order = [i for i in range(size)]
shuffle(order)
for i in order:
patient = resi_index[i]
cluster = set()
if patient not in global_patient_set:
cluster.add(patient)
global_patient_set.add(patient)
for j in range(0, size):
distance = int(resi_distance_matrix[i][j])
if distance < distance_threshold and resi_index[j] not in global_patient_set:
cluster.add(resi_index[j])
global_patient_set.add(resi_index[j])
if len(cluster) > 1:
check_convergence(resi_index, cluster, patient, global_patient_set, resi_distance_matrix)
if len(cluster) > 0:
cluster_set.append(cluster)
cluster_numbers = get_cluster_count(cluster_set, cluster_truth)
for cluster in cluster_set:
line = ""
for patient in cluster:
line += str(patient) + " "
line += "\n"
clusters_result_file.writelines(line)
# cluster_numbers = []
# cluster1 = {}
# cluster1[1] = 5
# cluster1[2] = 1
# cluster_numbers.append(cluster1)
#
# cluster1 = {}
# cluster1[1] = 1
# cluster1[2] = 4
# cluster1[3] = 1
# cluster_numbers.append(cluster1)
#
# cluster1 = {}
# cluster1[1] = 2
# cluster1[3] = 3
# cluster_numbers.append(cluster1)
TP, TN, FP_TP, total = claculate_TPTN(cluster_numbers)
TN /= 2
FN = combination(total) - FP_TP - TN
# print "TP:" + str(TP)
# print "TN:" + str(TN)
# print "FP_TP:" + str(FP_TP)
# print "FN:" + str(FN)
# print total
# RI = (TP+TN)/combination(total)
p = TP / FP_TP
r = TP / (TP + FN)
f = 2 * p * r / (p + r)
# print RI
P.append(p)
R.append(r)
F.append(f)
for isresi in ["resi","unresi"]:
resi_distance_matrix, size, resi_index = load_diatance_matrix("./output_new/distance_"+isresi+".txt")
clusters_result_statistics_file = file("./output_new/cluster_"+isresi+"_statistcs.txt", "w")
clusters_truth_file = file("./output_new/0.26_100_0.01_0.018_0.05_0.75_"+isresi+".txt")
cluster_truth = get_cluster_truth(clusters_truth_file)
clusters_result_file = file("./output_new/cluster_"+isresi+".txt", "w")
if isresi == "resi":
distance_threshold = [10, 30, 50, 70]
else:
distance_threshold = [30, 60, 90, 120]
for threshold in distance_threshold:
P = []
R = []
F = []
for i in range(50):
clustering(resi_distance_matrix, size, resi_index, threshold, P, R, F,cluster_truth,clusters_result_file)
clusters_result_statistics_file.writelines("Distance_Threshlod:" + str(threshold) + "\n\n")
line = " ".join(str(e) for e in P)
clusters_result_statistics_file.writelines("precision: " + line + "\n")
P_pd = pandas.Series(P)
clusters_result_statistics_file.writelines("precision mean: " + str(P_pd.mean()) + "\n")
clusters_result_statistics_file.writelines("precision std: " + str(P_pd.std()) + "\n\n")
line = " ".join(str(e) for e in R)
clusters_result_statistics_file.writelines("recall: " + line + "\n")
R_pd = pandas.Series(R)
clusters_result_statistics_file.writelines("recall mean: " + str(R_pd.mean()) + "\n")
clusters_result_statistics_file.writelines("recall std: " + str(R_pd.std()) + "\n\n")
line = " ".join(str(e) for e in F)
clusters_result_statistics_file.writelines("F1: " + line + "\n")
F_pd = pandas.Series(F)
clusters_result_statistics_file.writelines("F1 mean: " + str(F_pd.mean()) + "\n")
clusters_result_statistics_file.writelines("F1 std: " + str(F_pd.std()) + "\n\n")
clusters_truth_file.close()
clusters_result_file.close()
clusters_result_statistics_file.close() | apache-2.0 |
Extintor/piva | practica5/p5script1.py | 1 | 1486 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 8 12:54:17 2016
@author: paul
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage as ndimage
def cercle(s,radi):
y,x = np.ogrid[-s[0]/2: s[0]/2, -s[1]/2: s[1]/2]
mascara = (x**2 + y**2 < radi**2)
return mascara
def create_paraboloid(s):
y,x = np.ogrid[-s[0]/2: s[0]/2, -s[1]/2: s[1]/2]
paraboloide = x**2 + y**2
return paraboloide
def turn_to_grayscale(img):
try:
red = img[:,:,0]
green = img[:,:,1]
blue = img[:,:,2]
img = np.uint8(np.add(np.add(red*0.299,green*0.587),blue*0.114))
except:
pass
return img
if __name__ == "__main__":
plt.close('all')
img = turn_to_grayscale(plt.imread('../Imatges/camera.png'))
fftimg = np.fft.fft2(img)
fftimg = np.fft.fftshift(fftimg)
parab = create_paraboloid(img.shape)
plt.figure()
plt.imshow(parab)
fftimg = parab*fftimg
fftimg = np.fft.ifftshift(fftimg)
img2 = np.absolute(np.fft.ifft2(fftimg))
plt.figure()
plt.imshow(img2, cmap="gray")
k = np.array([[0,-1,0],[-1,4,-1],[0,-1,0]])
img3 = ndimage.filters.convolve(img, k, mode='wrap')
plt.figure()
plt.imshow(img3,cmap="gray")
#Calculem la transformada
kfft = np.fft.fft2(k,img.shape)
kfft = np.fft.fftshift(kfft)
plt.figure()
plt.imshow(np.absolute(kfft)) | gpl-3.0 |
jreback/pandas | pandas/tests/indexes/multi/test_names.py | 1 | 4966 | import pytest
import pandas as pd
from pandas import MultiIndex
import pandas._testing as tm
def check_level_names(index, names):
assert [level.name for level in index.levels] == list(names)
def test_slice_keep_name():
x = MultiIndex.from_tuples([("a", "b"), (1, 2), ("c", "d")], names=["x", "y"])
assert x[1:].names == x.names
def test_index_name_retained():
# GH9857
result = pd.DataFrame({"x": [1, 2, 6], "y": [2, 2, 8], "z": [-5, 0, 5]})
result = result.set_index("z")
result.loc[10] = [9, 10]
df_expected = pd.DataFrame(
{"x": [1, 2, 6, 9], "y": [2, 2, 8, 10], "z": [-5, 0, 5, 10]}
)
df_expected = df_expected.set_index("z")
tm.assert_frame_equal(result, df_expected)
def test_changing_names(idx):
assert [level.name for level in idx.levels] == ["first", "second"]
view = idx.view()
copy = idx.copy()
shallow_copy = idx._shallow_copy()
# changing names should not change level names on object
new_names = [name + "a" for name in idx.names]
idx.names = new_names
check_level_names(idx, ["firsta", "seconda"])
# and not on copies
check_level_names(view, ["first", "second"])
check_level_names(copy, ["first", "second"])
check_level_names(shallow_copy, ["first", "second"])
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
check_level_names(idx, ["firsta", "seconda"])
def test_take_preserve_name(idx):
taken = idx.take([3, 0, 1])
assert taken.names == idx.names
def test_copy_names():
# Check that adding a "names" parameter to the copy is honored
# GH14302
with tm.assert_produces_warning(FutureWarning):
# subclass-specific kwargs to pd.Index
multi_idx = pd.Index([(1, 2), (3, 4)], names=["MyName1", "MyName2"])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ["MyName1", "MyName2"]
assert multi_idx1.names == ["MyName1", "MyName2"]
multi_idx2 = multi_idx.copy(names=["NewName1", "NewName2"])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ["MyName1", "MyName2"]
assert multi_idx2.names == ["NewName1", "NewName2"]
multi_idx3 = multi_idx.copy(name=["NewName1", "NewName2"])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ["MyName1", "MyName2"]
assert multi_idx3.names == ["NewName1", "NewName2"]
# gh-35592
with pytest.raises(ValueError, match="Length of new names must be 2, got 1"):
multi_idx.copy(names=["mario"])
with pytest.raises(TypeError, match="MultiIndex.name must be a hashable type"):
multi_idx.copy(names=[["mario"], ["luigi"]])
def test_names(idx, index_names):
# names are assigned in setup
assert index_names == ["first", "second"]
level_names = [level.name for level in idx.levels]
assert level_names == index_names
# setting bad names on existing
index = idx
with pytest.raises(ValueError, match="^Length of names"):
setattr(index, "names", list(index.names) + ["third"])
with pytest.raises(ValueError, match="^Length of names"):
setattr(index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = idx.levels
major_codes, minor_codes = idx.codes
with pytest.raises(ValueError, match="^Length of names"):
MultiIndex(
levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes],
names=["first"],
)
with pytest.raises(ValueError, match="^Length of names"):
MultiIndex(
levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes],
names=["first", "second", "third"],
)
# names are assigned on index, but not transferred to the levels
index.names = ["a", "b"]
level_names = [level.name for level in index.levels]
assert level_names == ["a", "b"]
def test_duplicate_level_names_access_raises(idx):
# GH19029
idx.names = ["foo", "foo"]
with pytest.raises(ValueError, match="name foo occurs multiple times"):
idx._get_level_number("foo")
def test_get_names_from_levels():
idx = MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"])
assert idx.levels[0].name == "a"
assert idx.levels[1].name == "b"
def test_setting_names_from_levels_raises():
idx = MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"])
with pytest.raises(RuntimeError, match="set_names"):
idx.levels[0].name = "foo"
with pytest.raises(RuntimeError, match="set_names"):
idx.levels[1].name = "foo"
new = pd.Series(1, index=idx.levels[0])
with pytest.raises(RuntimeError, match="set_names"):
new.index.name = "bar"
assert pd.Index._no_setting_name is False
assert pd.Int64Index._no_setting_name is False
assert pd.RangeIndex._no_setting_name is False
| bsd-3-clause |
roxyboy/bokeh | bokeh/charts/builder/tests/test_area_builder.py | 33 | 3666 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pandas as pd
from bokeh.charts import Area
from bokeh.models import DataRange1d, Range1d
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestAreaBuilder(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict(
python=[2, 3, 7, 5, 26],
pypy=[12, 33, 47, 15, 126],
jython=[22, 43, 10, 25, 26],
)
# prepare some data to check tests results...
zeros = np.zeros(5)
x = np.array([4,3,2,1,0,0,1,2,3,4])
y_jython = np.hstack((zeros, np.array(xyvalues['jython'])))
y_pypy = np.hstack((zeros, np.array(xyvalues['pypy'])))
y_python = np.hstack((zeros, np.array(xyvalues['python'])))
data_keys = ['x', 'y_jython', 'y_pypy', 'y_python']
for _xy in [xyvalues, dict(xyvalues), pd.DataFrame(xyvalues)]:
area = create_chart(Area, _xy)
builder = area._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
self.assertListEqual(sorted(builder._data.keys()), data_keys)
assert_array_equal(builder._data['x'], x)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['y_python'], y_python)
self.assertIsInstance(area.x_range, DataRange1d)
self.assertIsInstance(area.y_range, Range1d)
assert_array_almost_equal(area.y_range.start, -12.6, decimal=4)
assert_array_almost_equal(area.y_range.end, 138.6, decimal=4)
self.assertEqual(builder._source._data, builder._data)
data_keys = ['x', 'y_0', 'y_1', 'y_2']
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
y_0, y_1, y_2 = y_python, y_pypy, y_jython
for _xy in [lvalues, np.array(lvalues)]:
area = create_chart(Area, _xy)
builder = area._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
self.assertListEqual(sorted(builder._data.keys()), data_keys)
assert_array_equal(builder._data['x'], x)
assert_array_equal(builder._data['y_0'], y_0)
assert_array_equal(builder._data['y_1'], y_1)
assert_array_equal(builder._data['y_2'], y_2)
self.assertIsInstance(area.x_range, DataRange1d)
self.assertIsInstance(area.y_range, Range1d)
assert_array_almost_equal(area.y_range.start, -12.6, decimal=4)
assert_array_almost_equal(area.y_range.end, 138.6, decimal=4)
self.assertEqual(builder._source._data, builder._data)
| bsd-3-clause |
jasonyaw/SFrame | oss_src/unity/python/sframe/test/test_dataframe.py | 9 | 1692 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
import unittest
import pandas
import array
from ..cython.cy_dataframe import _dataframe
from pandas.util.testing import assert_frame_equal
class DataFrameTest(unittest.TestCase):
def test_empty(self):
expected = pandas.DataFrame()
assert_frame_equal(_dataframe(expected), expected)
expected['int'] = []
expected['float'] = []
expected['str'] = []
assert_frame_equal(_dataframe(expected), expected)
def test_simple_dataframe(self):
expected = pandas.DataFrame()
expected['int'] = [i for i in range(10)]
expected['float'] = [float(i) for i in range(10)]
expected['str'] = [str(i) for i in range(10)]
expected['unicode'] = [unicode(i) for i in range(10)]
expected['array'] = [array.array('d', [i]) for i in range(10)]
expected['ls'] = [[str(i)] for i in range(10)]
assert_frame_equal(_dataframe(expected), expected)
def test_sparse_dataframe(self):
expected = pandas.DataFrame()
expected['sparse_int'] = [i if i % 2 == 0 else None for i in range(10)]
expected['sparse_float'] = [float(i) if i % 2 == 1 else None for i in range(10)]
expected['sparse_str'] = [str(i) if i % 3 == 0 else None for i in range(10)]
expected['sparse_array'] = [array.array('d', [i]) if i % 5 == 0 else None for i in range(10)]
expected['sparse_list'] = [[str(i)] if i % 7 == 0 else None for i in range(10)]
assert_frame_equal(_dataframe(expected), expected)
| bsd-3-clause |
Tjorriemorrie/trading | 07_reinforcement/signals/features.py | 2 | 12660 | import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
import sklearn as sk
import operator
from pprint import pprint
class FeatureFactory():
def ema(self, s, n):
""" returns an n period exponential moving average for the time series s
s is a list ordered from oldest (index 0) to most recent (index -1)
n is an integer
returns a numeric array of the exponential moving average """
s = np.array(s).astype(float)
ema = []
j = 1
# get n sma first and calculate the next n period ema
sma = sum(s[:n]) / n
multiplier = 2 / float(1 + n)
ema[:0] = [sma] * n
# EMA(current) = ( (Price(current) - EMA(prev) ) x Multiplier) + EMA(prev)
ema.append(( (s[n] - sma) * multiplier) + sma)
# now calculate the rest of the values
for i in s[n + 1:]:
tmp = ( (i - ema[j]) * multiplier) + ema[j]
ema.append(tmp)
j = j + 1
# print "ema length = " + str(len(ema))
return ema
def rsi(self, closes, n):
"""
RSI = 100 - 100/(1 + RS*)
*Where RS = Average of x days' up closes / Average of x days' down closes.
"""
# print '\ncloses'
# print len(closes)
delta = np.diff(closes)
dUp, dDown = delta.copy(), delta.copy()
dUp[dUp < 0] = 0
dDown[dDown > 0] = 0
RolUp = pd.rolling_mean(dUp, n)
RolDown = np.absolute(pd.rolling_mean(dDown, n))
RS = RolUp / RolDown
RS[0:n-1] = 0
RS = np.insert(RS, 0, 0)
# print '\nRS'
# print len(RS)
# print RS[0:20]
rsiCalc = lambda x: 100 - 100 / (1 + x)
rsi = [rsiCalc(rs) for rs in RS]
# print '\nrsi'
# print len(rsi)
# print np.array(rsi).astype(int)
return rsi
def extractChiMoku(self, highs, lows, closes):
tenkanSen = []
kijunSen = []
senkouSpanB = []
for i in xrange(len(highs)):
# avg of highest high and lowest low over past 9 ticks
tenkanSenHigh = max(highs[max(0, i-9):i+1])
tenkanSenLow = min(lows[max(0, i-9):i+1])
tenkanSen.append((tenkanSenHigh + tenkanSenLow) / 2)
# avg of highest high and lowest low over past 26 ticks
kijunSenHigh = max(highs[max(0, i-26):i+1])
kijunSenLow = min(lows[max(0, i-26):i+1])
kijunSen.append((kijunSenHigh + kijunSenLow) / 2)
# (Highest high + Lowest low) / 2 over the last 52 trading days plotted 26 days ahead.
senkouSpanBHigh = max(highs[max(0, i-52):i+1])
senkouSpanBLow = min(lows[max(0, i-52):i+1])
senkouSpanB.append((senkouSpanBHigh + senkouSpanBLow) / 2)
# (Tenkan Sen + Kijun Sen) / 2 plotted 26 days ahead.
senkouSpanA = [(tenkanSen[0] + kijunSen[0]) / 2] * 256
senkouSpanA.extend([(t + k) / 2 for t, k in zip(tenkanSen, kijunSen)])
senkouSpanA = senkouSpanA[:len(highs)]
# The closing price plotted 26 trading days behind.
chikouSpan = [closes[0]] * 26
chikouSpan.extend(closes)
chikouSpan = chikouSpan[:len(highs)]
# pprint(tenkanSen[-5:])
# pprint(kijunSen[-5:])
# pprint(senkouSpanA)
return tenkanSen, kijunSen, senkouSpanA, senkouSpanB, chikouSpan
def getNames(self):
names = [
'close/ema89', 'close/ema55', 'close/ema34', 'close/ema21', 'close/ema13', 'close/ema08',
'ema08/ema89', 'ema08/ema55', 'ema08/ema34', 'ema08/ema21', 'ema08/ema13',
'ema13/ema89', 'ema13/ema55', 'ema13/ema34', 'ema13/ema21',
'ema21/ema89', 'ema21/ema55', 'ema21/ema34',
'ema34/ema89', 'ema34/ema55',
'ema55/ema89',
# 'volume/ema20v', 'volume/ema8v', 'volume/ema5v',
# 'ema5v/ema20v', 'ema5v/ema8v',
# 'ema8v/ema20v',
'topShadow/topShadowsMean',
'botShadow/botShadowsMean',
# RSI
'close > rsi21', 'close > rsi34', 'close > rsi55', 'rsi21 > rsi34', 'rsi21 > rsi55', 'rsi34 > rsi55',
'close < rsi21', 'close < rsi34', 'close < rsi55', 'rsi21 < rsi34', 'rsi21 < rsi55', 'rsi34 < rsi55',
# chimoku
'tenkanKijunBullishWeak', 'tenkanKijunBullishNeutral', 'tenkanKijunBullishStrong',
'tenkanKijunBearishWeak', 'tenkanKijunBearishNeutral', 'tenkanKijunBearishStrong',
'kijunPriceBullishWeak', 'kijunPriceBullishNeutral', 'kijunPriceBullishStrong',
'kijunPriceBearishWeak', 'kijunPriceBearishNeutral', 'kijunPriceBearishStrong',
'kumoBullish', 'kumoBearish',
'senkouSpanBullishWeak', 'senkouSpanBullishNeutral', 'senkouSpanBullishStrong',
'senkouSpanBearishWeak', 'senkouSpanBearishNeutral', 'senkouSpanBearishStrong',
]
return names
def getFeatures(self, opens, highs, lows, closes, volumes):
ema08s = self.ema(closes, 8)
ema13s = self.ema(closes, 13)
ema21s = self.ema(closes, 21)
ema34s = self.ema(closes, 34)
ema55s = self.ema(closes, 55)
ema89s = self.ema(closes, 89)
# ema5vs = self.ema(volumes, 5)
# ema8vs = self.ema(volumes, 8)
# ema20vs = self.ema(volumes, 20)
topShadows = [high - max(open, close) for open, high, close in zip(opens, highs, closes)]
topShadowsMean = np.mean(topShadows)
botShadows = [min(open, close) - low for open, low, close in zip(opens, lows, closes)]
botShadowsMean = np.mean(botShadows)
rsi21s = self.rsi(closes, 21)
rsi34s = self.rsi(closes, 34)
rsi55s = self.rsi(closes, 55)
tenkanSen, kijunSen, senkouSpanA, senkouSpanB, chikouSpan = self.extractChiMoku(highs, lows, closes)
data = [
[
# EMA
close / ema89, close / ema55, close / ema34, close / ema21, close / ema13, close / ema08,
ema08 / ema89, ema08 / ema55, ema08 / ema34, ema08 / ema21, ema08 / ema13,
ema13 / ema89, ema13 / ema55, ema13 / ema34, ema13 / ema21,
ema21 / ema89, ema21 / ema55, ema21 / ema34,
ema34 / ema89, ema34 / ema55,
ema55 / ema89,
# volume / ema20v, volume / ema8v, volume / ema5v,
# ema5v / ema20v, ema5v / ema8v,
# ema8v / ema20v,
topShadow / topShadowsMean,
botShadow / botShadowsMean,
# RSI
# bullish
1 if close > rsi21 else 0,
1 if close > rsi34 else 0,
1 if close > rsi55 else 0,
1 if rsi21 > rsi34 else 0,
1 if rsi21 > rsi55 else 0,
1 if rsi34 > rsi55 else 0,
# bearish
1 if close < rsi21 else 0,
1 if close < rsi34 else 0,
1 if close < rsi55 else 0,
1 if rsi21 < rsi34 else 0,
1 if rsi21 < rsi55 else 0,
1 if rsi34 < rsi55 else 0,
# TENKAN & KIJUN
# weak bullish
1 if tenkanSen > kijunSen and kijunSen < senkouSpanA else 0,
# neutral bullish
1 if tenkanSen > kijunSen and senkouSpanA > kijunSen > senkouSpanB else 0,
# strong bullish
1 if tenkanSen > kijunSen and kijunSen > senkouSpanA else 0,
# weak bearish
1 if tenkanSen < kijunSen and kijunSen > senkouSpanA else 0,
# neutral bearish
1 if tenkanSen < kijunSen and senkouSpanA < kijunSen < senkouSpanB else 0,
# strong bearish
1 if tenkanSen < kijunSen and kijunSen < senkouSpanA else 0,
# KIJUN & PRICE
# weak bullish
1 if close > kijunSen and kijunSen < senkouSpanA else 0,
# neutral bullish
1 if close > kijunSen and senkouSpanA > kijunSen > senkouSpanB else 0,
# strong bullish
1 if close > kijunSen and kijunSen > senkouSpanA else 0,
# weak bearish
1 if close < kijunSen and kijunSen > senkouSpanA else 0,
# neutral bearish
1 if close < kijunSen and senkouSpanA < kijunSen < senkouSpanB else 0,
# strong bearish
1 if close < kijunSen and kijunSen < senkouSpanA else 0,
# KUMO BREAKOUT
# bullish
1 if close > senkouSpanA else 0,
# bearish
1 if close < senkouSpanA else 0,
# SENKOU SPAN
# weak bullish
1 if senkouSpanA > senkouSpanB and close < senkouSpanA else 0,
# neutral bullish
1 if senkouSpanA > senkouSpanB and senkouSpanA > close > senkouSpanB else 0,
# strong bullish
1 if senkouSpanA > senkouSpanB and close > senkouSpanA else 0,
# weak bearish
1 if senkouSpanA < senkouSpanB and close > senkouSpanA else 0,
# neutral bearish
1 if senkouSpanA < senkouSpanB and senkouSpanA < close < senkouSpanB else 0,
# strong bearish
1 if senkouSpanA < senkouSpanB and close < senkouSpanA else 0,
]
for close,
ema08, ema13, ema21, ema34, ema55, ema89,
# volume, ema5v, ema8v, ema20v,
topShadow, botShadow,
rsi21, rsi34, rsi55,
tenkanSen, kijunSen, senkouSpanA, senkouSpanB, chikouSpan
in zip(closes,
ema08s, ema13s, ema21s, ema34s, ema55s, ema89s,
# volumes, ema5vs, ema8vs, ema20vs,
topShadows, botShadows,
rsi21s, rsi34s, rsi55s,
tenkanSen, kijunSen, senkouSpanA, senkouSpanB, chikouSpan
)
]
# print data
return data
def getRewards(self, closes):
results = []
iMax = 5 * 4
for pos, close in enumerate(closes):
tmp = [0]
for i in xrange(1, iMax):
index = pos + i
if index >= len(closes) - 2:
break
tmp.append(closes[index] - close)
# label = 'long' if sum(tmp) >= 0 else 'short'
results.append(sum(tmp))
# pprint(results)
# mean = np.mean([abs(r) for r in results])
# mean /= 2
# print 'mean', round(mean, 4)
# rewards = ['long' if abs(r) > mean and r > 0 else 'short' if abs(r) > mean and r < 0 else 'none' for r in results]
rewards = ['long' if r > 0 else 'short' for r in results]
# pprint(rewards)
return rewards
def getRewardsCycle(self, closes):
rewards = []
iMax = 5 * 4
for pos, close in enumerate(closes):
# get score for bull
bullHighestHigh = close
bullHighestHighIndex = 0
for i in range(iMax):
if pos + i >= len(closes):
break
closeI = closes[pos + i]
if closeI > bullHighestHigh:
bullHighestHigh = closeI
bullHighestHighIndex = i
bullLowestLow = close
for i in range(bullHighestHighIndex):
closeI = closes[pos + i]
bullLowestLow = min([bullLowestLow, closeI])
bullProfit = bullHighestHigh - close
bullProfitRel = bullProfit - (close - bullLowestLow)
# get score for bear
bearLowestLow = close
bearLowestLowIndex = 0
for i in range(iMax):
if pos + i >= len(closes):
break
closeI = closes[pos + i]
if close < bearLowestLow:
bearLowestLow = closeI
bearLowestLowIndex = i
bearHighestHigh = close
for i in range(bearLowestLowIndex):
closeI = closes[pos + i]
bearHighestHigh = max([bearHighestHigh, closeI])
bearProfit = close - bearLowestLow
bearProfitRel = bearProfit - (bearHighestHigh - close)
rewards.append(bullProfit if bullProfitRel > bearProfitRel else -bearProfit)
return rewards | mit |
devanshdalal/scikit-learn | examples/exercises/plot_cv_diabetes.py | 27 | 2775 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LassoCV
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = Lasso(random_state=0)
alphas = np.logspace(-4, -0.5, 30)
tuned_parameters = [{'alpha': alphas}]
n_folds = 3
clf = GridSearchCV(lasso, tuned_parameters, cv=n_folds, refit=False)
clf.fit(X, y)
scores = clf.cv_results_['mean_test_score']
scores_std = clf.cv_results_['std_test_score']
plt.figure().set_size_inches(8, 6)
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
std_error = scores_std / np.sqrt(n_folds)
plt.semilogx(alphas, scores + std_error, 'b--')
plt.semilogx(alphas, scores - std_error, 'b--')
# alpha=0.2 controls the translucency of the fill color
plt.fill_between(alphas, scores + std_error, scores - std_error, alpha=0.2)
plt.ylabel('CV score +/- std error')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
plt.xlim([alphas[0], alphas[-1]])
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = LassoCV(alphas=alphas, random_state=0)
k_fold = KFold(3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold.split(X, y)):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
CG-F16-24-Rutgers/steersuite-rutgers | steerstats/tools/getUniParametersFromCSV.py | 8 | 2210 |
import matplotlib
from pylab import *
import csv
from matplotlib.backends.backend_pdf import PdfPages
import sys
from os import listdir
from os.path import isfile, join
import os
sys.path.append('../')
from util import saveMatrixToCVSDict
def cvsToDict(reader):
d = {}
rows=[]
keys=[]
for row in reader:
rows.append(row)
for item in rows[0]:
d[item]=[]
keys.append(item)
for row in rows[1:]:
i=0
for item in row:
d[keys[i]].append(item)
i=i+1
return d
def plotData(directory, outfile):
onlyfiles = [ f for f in listdir(directory) if isfile(join(directory,f)) and f.endswith(".csv") ]
print onlyfiles
objectives = ['u(Av)','f(Av)','eff(Av)','distance(Av)','ple(Av)','time(Av)']
p_data = [['objectives']]
p_data[0].extend(objectives)
for filename in onlyfiles:
tmp_data=[filename]
reader = csv.reader(open(directory+filename, 'r'))
data = cvsToDict(reader)
best_parameters = []
# print "In file: " + filename
for key in objectives:
best_parameters.append(data[key][-1])
tmp_data.extend(best_parameters)
p_data.append(tmp_data)
p_data = sorted(p_data,key=lambda x: x[0])
# p_data = np.array(p_data)
_dict = {}
keys = []
just_params = []
for line in p_data:
_dict[line[0]] = line[1:]
print _dict[line[0]]
keys.append(line[0])
just_params.append(_dict[line[0]])
just_params = zip(*just_params)
print "just params: " + str(just_params)
# p_data_2 = zip(*p_data)
# for line in p_data_2:
# print line
csvfile = open(outfile, 'w')
saveMatrixToCVSDict(just_params, csvfile, keys)
csvfile.close()
if __name__ == '__main__':
if len(sys.argv) != 3:
print "Usage:"
print "python " + os.path.basename(__file__) + " <directory> <out_file>"
print "Example:"
print "python " + os.path.basename(__file__) + "../../data/optimization/rvo2d/Uni-Variet/ orca_uni.csv"
print ""
sys.exit(0)
dir=sys.argv[1]
plotData(dir, sys.argv[2])
| gpl-3.0 |
edublancas/sklearn-evaluation | src/sklearn_evaluation/plot/validation_curve.py | 2 | 1859 | import numpy as np
import matplotlib.pyplot as plt
def validation_curve(train_scores, test_scores, param_range, param_name=None,
semilogx=False, ax=None):
"""Plot a validation curve
Plot a metric vs hyperpameter values for the training and test set
Parameters
----------
train_scores : array-like
Scores for the training set
test_scores : array-like
Scores for the test set
param_range : array-like
Hyperparameter values used to generate the curve
param_range : str
Hyperparameter name
semilgo : bool
Sets a log scale on the x axis
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/validation_curve.py
"""
if ax is None:
ax = plt.gca()
if semilogx:
ax.set_xscale('log')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.set_title("Validation Curve")
ax.set_ylabel("Score mean")
if param_name:
ax.set_xlabel(param_name)
ax.plot(param_range, train_scores_mean, label="Training score", color="r")
ax.plot(param_range, test_scores_mean, label="Cross-validation score",
color="g")
ax.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
ax.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
ax.legend(loc="best")
ax.margins(0.05)
return ax
| mit |
cowlicks/odo | odo/convert.py | 5 | 7903 | from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from datashape.predicates import isscalar
from toolz import concat, partition_all
from collections import Iterator, Iterable
import datashape
from datashape import discover
from .core import NetworkDispatcher, ooc_types
from .chunks import chunks, Chunks
from .numpy_dtype import dshape_to_numpy
from .utils import records_to_tuples
convert = NetworkDispatcher('convert')
@convert.register(np.ndarray, pd.DataFrame, cost=0.2)
def dataframe_to_numpy(df, dshape=None, **kwargs):
dtype = dshape_to_numpy(dshape or discover(df))
x = df.to_records(index=False)
if x.dtype != dtype:
x = x.astype(dtype)
return x
@convert.register(pd.DataFrame, np.ndarray, cost=1.0)
def numpy_to_dataframe(x, dshape, **kwargs):
return pd.DataFrame(x, columns=getattr(dshape.measure, 'names', None))
@convert.register(pd.Series, np.ndarray, cost=1.0)
def numpy_to_series(x, **kwargs):
names = x.dtype.names
if names is not None:
if len(names) > 1:
raise ValueError('passed in an ndarray with more than 1 column')
name, = names
return pd.Series(x[name], name=name)
return pd.Series(x)
@convert.register(pd.Series, pd.DataFrame, cost=0.1)
def DataFrame_to_Series(x, **kwargs):
assert len(x.columns) == 1
return x[x.columns[0]]
@convert.register(pd.DataFrame, pd.Series, cost=0.1)
def series_to_dataframe(x, **kwargs):
return x.to_frame()
@convert.register(np.recarray, np.ndarray, cost=0.0)
def ndarray_to_recarray(x, **kwargs):
return x.view(np.recarray)
@convert.register(np.ndarray, np.recarray, cost=0.0)
def recarray_to_ndarray(x, **kwargs):
return x.view(np.ndarray)
higher_precision_freqs = frozenset(('ns', 'ps', 'fs', 'as'))
@convert.register(np.ndarray, pd.Series, cost=0.1)
def series_to_array(s, dshape=None, **kwargs):
# if we come from a node that can't be discovered we need to discover
# on s
dtype = dshape_to_numpy(datashape.dshape(dshape or discover(s)))
sdtype = s.dtype
values = s.values
# don't lose precision of datetime64 more precise than microseconds
if ((issubclass(sdtype.type, np.datetime64) and
np.datetime_data(sdtype)[0] in higher_precision_freqs) or
s.dtype == dtype):
return values
try:
return values.astype(dtype)
except ValueError: # object series and record dshape, e.g., a frame row
return values
@convert.register(list, np.ndarray, cost=10.0)
def numpy_to_list(x, **kwargs):
dt = None
if x.dtype == 'M8[ns]':
dt = 'M8[us]' # lose precision when going to Python datetime
if x.dtype.fields and any(x.dtype[n] == 'M8[ns]' for n in x.dtype.names):
dt = [(n, 'M8[us]' if x.dtype[n] == 'M8[ns]' else x.dtype[n])
for n in x.dtype.names]
if dt:
return x.astype(dt).tolist()
else:
return x.tolist()
@convert.register(np.ndarray, chunks(np.ndarray), cost=1.0)
def numpy_chunks_to_numpy(c, **kwargs):
return np.concatenate(list(c))
@convert.register(chunks(np.ndarray), np.ndarray, cost=0.5)
def numpy_to_chunks_numpy(x, chunksize=2**20, **kwargs):
return chunks(np.ndarray)(
lambda: (x[i:i+chunksize] for i in range(0, x.shape[0], chunksize)))
@convert.register(pd.DataFrame, chunks(pd.DataFrame), cost=1.0)
def chunks_dataframe_to_dataframe(c, **kwargs):
c = list(c)
if not c: # empty case
return pd.DataFrame(columns=kwargs.get('dshape').measure.names)
else:
return pd.concat(c, axis=0, ignore_index=True)
@convert.register(chunks(pd.DataFrame), pd.DataFrame, cost=0.5)
def dataframe_to_chunks_dataframe(x, chunksize=2**20, **kwargs):
return chunks(pd.DataFrame)(
lambda: (x.iloc[i:i+chunksize] for i in range(0, x.shape[0], chunksize)))
def ishashable(x):
try:
hash(x)
return True
except:
return False
@convert.register(set, (list, tuple), cost=5.0)
def iterable_to_set(x, **kwargs):
if x and isinstance(x[0], (tuple, list)) and not ishashable(x):
x = map(tuple, x)
return set(x)
@convert.register(list, (tuple, set), cost=1.0)
def iterable_to_list(x, **kwargs):
return list(x)
@convert.register(tuple, (list, set), cost=1.0)
def iterable_to_tuple(x, **kwargs):
return tuple(x)
def element_of(seq):
"""
>>> element_of([1, 2, 3])
1
>>> element_of([[1, 2], [3, 4]])
1
"""
while isinstance(seq, list) and seq:
seq = seq[0]
return seq
@convert.register(np.ndarray, list, cost=10.0)
def list_to_numpy(seq, dshape=None, **kwargs):
if isinstance(element_of(seq), dict):
seq = list(records_to_tuples(dshape, seq))
if (seq and isinstance(seq[0], Iterable) and not ishashable(seq[0]) and
not isscalar(dshape)):
seq = list(map(tuple, seq))
return np.array(seq, dtype=dshape_to_numpy(dshape))
@convert.register(Iterator, list, cost=0.001)
def list_to_iterator(L, **kwargs):
return iter(L)
@convert.register(list, Iterator, cost=1.0)
def iterator_to_list(seq, **kwargs):
return list(seq)
@convert.register(Iterator, (chunks(pd.DataFrame), chunks(np.ndarray)), cost=10.0)
def numpy_chunks_to_iterator(c, **kwargs):
return concat(convert(Iterator, chunk, **kwargs) for chunk in c)
@convert.register(chunks(np.ndarray), Iterator, cost=10.0)
def iterator_to_numpy_chunks(seq, chunksize=1024, **kwargs):
seq2 = partition_all(chunksize, seq)
try:
first, rest = next(seq2), seq2
except StopIteration: # seq is empty
def _():
yield convert(np.ndarray, [], **kwargs)
else:
x = convert(np.ndarray, first, **kwargs)
def _():
yield x
for i in rest:
yield convert(np.ndarray, i, **kwargs)
return chunks(np.ndarray)(_)
@convert.register(chunks(pd.DataFrame), Iterator, cost=10.0)
def iterator_to_DataFrame_chunks(seq, chunksize=1024, **kwargs):
seq2 = partition_all(chunksize, seq)
try:
first, rest = next(seq2), seq2
except StopIteration:
def _():
yield convert(pd.DataFrame, [], **kwargs)
else:
df = convert(pd.DataFrame, first, **kwargs)
def _():
yield df
for i in rest:
yield convert(pd.DataFrame, i, **kwargs)
return chunks(pd.DataFrame)(_)
@convert.register(tuple, np.record)
def numpy_record_to_tuple(rec, **kwargs):
return rec.tolist()
@convert.register(chunks(np.ndarray), chunks(pd.DataFrame), cost=0.5)
def chunked_pandas_to_chunked_numpy(c, **kwargs):
return chunks(np.ndarray)(lambda: (convert(np.ndarray, chunk, **kwargs) for chunk in c))
@convert.register(chunks(pd.DataFrame), chunks(np.ndarray), cost=0.5)
def chunked_numpy_to_chunked_pandas(c, **kwargs):
return chunks(pd.DataFrame)(lambda: (convert(pd.DataFrame, chunk, **kwargs) for chunk in c))
@convert.register(chunks(np.ndarray), chunks(list), cost=10.0)
def chunked_list_to_chunked_numpy(c, **kwargs):
return chunks(np.ndarray)(lambda: (convert(np.ndarray, chunk, **kwargs) for chunk in c))
@convert.register(chunks(list), chunks(np.ndarray), cost=10.0)
def chunked_numpy_to_chunked_list(c, **kwargs):
return chunks(list)(lambda: (convert(list, chunk, **kwargs) for chunk in c))
@convert.register(chunks(Iterator), chunks(list), cost=0.1)
def chunked_list_to_chunked_iterator(c, **kwargs):
return chunks(Iterator)(c.data)
@convert.register(chunks(list), chunks(Iterator), cost=0.1)
def chunked_Iterator_to_chunked_list(c, **kwargs):
return chunks(Iterator)(lambda: (convert(Iterator, chunk, **kwargs) for chunk in c))
@convert.register(Iterator, chunks(Iterator), cost=0.1)
def chunked_iterator_to_iterator(c, **kwargs):
return concat(c)
ooc_types |= set([Iterator, Chunks])
| bsd-3-clause |
WolfBerwouts/opengrid | opengrid/library/houseprint/houseprint.py | 2 | 19273 | __author__ = 'Jan Pecinovsky'
from opengrid.config import Config
config = Config()
import os
import sys
import json
import jsonpickle
import datetime as dt
import pandas as pd
from requests.exceptions import HTTPError
import warnings
from tqdm import tqdm
# compatibility with py3
if sys.version_info.major >= 3:
import pickle
else:
import cPickle as pickle
import tmpo
# compatibility with py3
if sys.version_info.major >= 3:
from .site import Site
from .device import Device, Fluksometer
from .sensor import Sensor, Fluksosensor
else:
from site import Site
from device import Device, Fluksometer
from sensor import Sensor, Fluksosensor
"""
The Houseprint is a Singleton object which contains all metadata for sites, devices and sensors.
It can be pickled, saved and passed around
"""
class Houseprint(object):
def __init__(self,
gjson=None,
spreadsheet="Opengrid houseprint (Responses)",
empty_init=False
):
"""
Parameters
---------
gjson: Path to authentication json
spreadsheet: String, name of the spreadsheet containing the metadata
"""
self.sites = []
self.timestamp = dt.datetime.utcnow() # Add a timestamp upon creation
if not empty_init:
if gjson is None:
gjson = config.get('houseprint', 'json')
self.gjson = gjson
self.spreadsheet = spreadsheet
self._parse_sheet()
def reset(self):
"""
Connect to the Google Spreadsheet again and re-parse the data
"""
self.__init__(gjson=self.gjson, spreadsheet=self.spreadsheet)
if hasattr(self, '_tmpos'):
self._add_sensors_to_tmpos()
def __repr__(self):
return """
Houseprint
Created on {} (UTC)
{} sites
{} devices
{} sensors
""".format(self.timestamp,
len(self.sites),
sum([len(site.devices) for site in self.sites]),
sum([len(site.sensors) for site in self.sites])
)
def _parse_sheet(self):
"""
Connects to Google, fetches the spreadsheet and parses the content
"""
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
print('Opening connection to Houseprint sheet')
# fetch credentials
json_key = json.load(open(self.gjson))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(
json_key['client_email'],
json_key['private_key'].encode('ascii'),
scope
)
# authorize and login
gc = gspread.authorize(credentials)
gc.login()
# open sheets
print("Opening spreadsheets")
sheet = gc.open(self.spreadsheet)
sites_sheet = sheet.worksheet('Accounts')
devices_sheet = sheet.worksheet('Devices')
sensors_sheet = sheet.worksheet('Sensors')
print('Parsing spreadsheets')
# 3 sub-methods that parse the different sheets
self._parse_sites(sites_sheet)
self._parse_devices(devices_sheet)
self._parse_sensors(sensors_sheet)
print('Houseprint parsing complete')
def _parse_sites(self, sheet):
"""
Sub method of _parse_sheet() that parses only the 'sites' sheet
Parameters
----------
sheet: GSpread worksheet
sheet containing metadata about sites
"""
records = sheet.get_all_records()
for r in records:
if r['Key'] == '':
continue
new_site = Site(hp=self,
key=r['Key'],
size=r['House size'],
inhabitants=r['Number of inhabitants'],
postcode=r['postcode'],
construction_year=r['construction year'],
k_level=r['K-level'],
e_level=r['E-level'],
epc_cert=r['EPC certificate'])
self.sites.append(new_site)
print('{} Sites created'.format(len(self.sites)))
def _parse_devices(self, sheet):
"""
Sub method of _parse_sheet() that parses only the 'devices' sheet
Parameters
----------
sheet: GSpread worksheet
sheet containing metadata about devices
"""
records = sheet.get_all_records()
for r in records:
if r['Key'] == '':
continue
# find parent site and check if it exists
site = self.find_site(r['Parent site'])
if site is None:
raise ValueError('Device {} was given an invalid site key {}'.format(r['Key'], r['Parent site']))
# create a new device according to its manufacturer
if r['manufacturer'] == 'Flukso':
new_device = Fluksometer(site=site, key=r['Key'])
else:
raise NotImplementedError('Devices from {} are not supported'.format(r['manufacturer']))
# add new device to parent site
site.devices.append(new_device)
print('{} Devices created'.format(sum([len(site.devices) for site in self.sites])))
def _parse_sensors(self, sheet):
"""
Sub method of _parse_sheet() that parses only the 'sensors' sheet
Parameters
----------
sheet: GSpread worksheet
sheet containing metadata about sensors
"""
records = sheet.get_all_records()
for r in records:
if r['Sensor_id'] == '': continue
# find parent. If a parent device is specified, us that, otherwise use a parent site directly
if r['parent device'] != '':
device = self.find_device(r['parent device'])
if device is None:
raise ValueError(
'Sensor {} was given an invalid device key {}. \
Leave the device field empty if you want to add a sensor without a device'.format(
r['Sensor_id'], r['parent device']))
else:
site = self.find_site(r['parent site'])
if site is None:
raise ValueError(
'Sensor {} was given an invalid site key {}'.format(r['Sensor_id'], r['parent site']))
# create new sensor according to its manufacturer
if r['manufacturer'] == 'Flukso':
new_sensor = Fluksosensor(
device=device,
key=r['Sensor_id'],
token=r['token'],
type=r['sensor type'],
description=r['name by user'],
system=r['system'],
quantity=r['quantity'],
unit=r['unit'],
direction=r['direction'],
tariff=r['tariff'],
cumulative=None # will be determined based on type
)
else:
raise NotImplementedError('Sensors from {} are not supported'.format(r['manufacturer']))
new_sensor.device.sensors.append(new_sensor)
print('{} sensors created'.format(sum([len(site.sensors) for site in self.sites])))
def get_sensors(self, sensortype=None):
"""
Return a list with all sensors
Parameters
----------
sensortype: gas, water, electricity: optional
Returns
-------
list of sensors
"""
res = []
for site in self.sites:
for sensor in site.get_sensors(sensortype=sensortype):
res.append(sensor)
return res
def get_fluksosensors(self, **kwargs):
"""
Same thing as get_sensors, but only for fluksosensors
Parameters
----------
kwargs
Returns
-------
[Fluksosensor]
"""
return [sensor for sensor in self.get_sensors(**kwargs) if isinstance(
sensor, Fluksosensor)]
def get_devices(self):
"""
Return a list with all devices
Returns
-------
list of devices
"""
res = []
for site in self.sites:
for device in site.devices:
res.append(device)
return res
def search_sites(self, **kwargs):
"""
Parameters
----------
kwargs: any keyword argument, like key=mykey
Returns
-------
List of sites satisfying the search criterion or empty list if no
variable found.
"""
result = []
for site in self.sites:
for keyword, value in kwargs.items():
if getattr(site, keyword) == value:
continue
else:
break
else:
result.append(site)
return result
def search_sensors(self, **kwargs):
"""
Parameters
----------
kwargs: any keyword argument, like key=mykey
Returns
-------
List of sensors satisfying the search criterion or empty list if no
variable found.
"""
result = []
for sensor in self.get_sensors():
for keyword, value in kwargs.items():
if value in getattr(sensor, keyword):
continue
else:
break
else:
result.append(sensor)
return result
def find_site(self, key):
"""
Parameters
----------
key: string
Returns
-------
Site
"""
for site in self.sites:
if site.key == key:
return site
return None
def find_device(self, key):
"""
Parameters
----------
key: string
Returns
-------
Device
"""
for device in self.get_devices():
if device.key.lower() == key.lower():
return device
return None
def find_sensor(self, key):
"""
Parameters
----------
key: string
Returns
-------
Sensor
"""
for sensor in self.get_sensors():
if sensor.key.lower() == key.lower():
return sensor
return None
def save(self, filename, pickle_format='jsonpickle'):
"""
Save the houseprint object
Parameters
----------
* filename : str
Filename, if relative path or just filename, it is appended to the
current working directory
pickle_format : str
'jsonpickle' or 'pickle'
pickle may be more robust, but jsonpickle should be compatible
across python versions
"""
# temporarily delete tmpo session
try:
tmpos_tmp = self._tmpos
delattr(self, '_tmpos')
except:
pass
abspath = os.path.join(os.getcwd(), filename)
if pickle_format == 'jsonpickle':
with open(abspath, 'w') as f:
frozen = jsonpickle.encode(self)
f.write(frozen)
elif pickle_format == 'pickle':
with open(abspath, 'wb') as f:
pickle.dump(self, file=f)
else:
raise NotImplementedError("Pickle format '{}' is not supported".format(pickle_format))
print("Saved houseprint to {}".format(abspath))
# restore tmposession if needed
try:
setattr(self, '_tmpos', tmpos_tmp)
except:
pass
def init_tmpo(self, tmpos=None, path_to_tmpo_data=None):
"""
Flukso sensors need a tmpo session to obtain data.
It is overkill to have each flukso sensor make its own session, syncing would
take too long and be overly redundant.
Passing a tmpo session to the get_data function is also bad form because
we might add new types of sensors that don't use tmpo in the future.
This is why the session is initialised here.
A tmpo session as parameter is optional. If passed, no additional sensors are added.
If no session is passed, a new one will be created using the location in the config file.
It will then be populated with the flukso sensors known to the houseprint object
Parameters
----------
tmpos : tmpo session
path_to_tmpo_data : str
"""
if tmpos is not None:
self._tmpos = tmpos
else:
try:
path_to_tmpo_data = config.get('tmpo', 'data')
except:
path_to_tmpo_data = None
self._tmpos = tmpo.Session(path_to_tmpo_data)
self._add_sensors_to_tmpos()
print("Using tmpo database from {}".format(self._tmpos.db))
def _add_sensors_to_tmpos(self):
"""
Add all flukso sensors in the houseprint to the tmpo session
"""
for sensor in self.get_fluksosensors():
self._tmpos.add(sensor.key, sensor.token)
def get_tmpos(self):
"""
Returns
-------
TMPO session
"""
if hasattr(self, '_tmpos'):
return self._tmpos
else:
self.init_tmpo()
return self._tmpos
@property
def tmpos(self):
return self.get_tmpos()
def sync_tmpos(self, http_errors='warn'):
"""
Add all Flukso sensors to the TMPO session and sync
Parameters
----------
http_errors : 'raise' | 'warn' | 'ignore'
default 'warn'
define what should be done with TMPO Http-errors
"""
tmpos = self.get_tmpos()
for sensor in tqdm(self.get_fluksosensors()):
try:
warnings.simplefilter('ignore')
tmpos.sync(sensor.key)
warnings.simplefilter('default')
except HTTPError as e:
warnings.simplefilter('default')
if http_errors == 'ignore':
continue
elif http_errors == 'warn':
warnings.warn(message='Error for SensorID: ' + sensor.key
+ str(e))
else:
print('Error for SensorID: ' + sensor.key)
raise e
def get_data(self, sensors=None, sensortype=None, head=None, tail=None, diff='default', resample='min',
unit='default'):
"""
Return a Pandas Dataframe with joined data for the given sensors
Parameters
----------
sensors : list of Sensor objects
If None, use sensortype to make a selection
sensortype : string (optional)
gas, water, electricity. If None, and Sensors = None,
all available sensors in the houseprint are fetched
head, tail: timestamps,
diff : bool or 'default'
If True, the original data will be differentiated
If 'default', the sensor will decide: if it has the attribute
cumulative==True, the data will be differentiated.
resample : str (default='min')
Sampling rate, if any. Use 'raw' if no resampling.
unit : str , default='default'
String representation of the target unit, eg m**3/h, kW, ...
"""
if sensors is None:
sensors = self.get_sensors(sensortype)
series = [sensor.get_data(head=head, tail=tail, diff=diff, resample=resample, unit=unit) for sensor in sensors]
# workaround for https://github.com/pandas-dev/pandas/issues/12985
series = [s for s in series if not s.empty]
if series:
df = pd.concat(series, axis=1)
else:
df = pd.DataFrame()
# Add unit as string to each series in the df. This is not persistent: the attribute unit will get
# lost when doing operations with df, but at least it can be checked once.
for s in series:
try:
df[s.name].unit = s.unit
except:
pass
return df
def get_data_dynamic(self, sensors=None, sensortype=None, head=None,
tail=None, diff='default', resample='min',
unit='default'):
"""
Yield Pandas Series for the given sensors
Parameters
----------
sensors : list(Sensor), optional
If None, use sensortype to make a selection
sensortype : str, optional
gas, water, electricity. If None, and Sensors = None,
all available sensors in the houseprint are fetched
head : dt.datetime | pd.Timestamp | int, optional
tail : dt.datetime | pd.Timestamp | int, optional
diff : bool | str('default')
If True, the original data will be differentiated
If 'default', the sensor will decide: if it has the attribute
cumulative==True, the data will be differentiated.
resample : str
default='min'
Sampling rate, if any. Use 'raw' if no resampling.
unit : str
default='default'
String representation of the target unit, eg m**3/h, kW, ...
Yields
------
Pandas.Series
"""
if sensors is None:
sensors = self.get_sensors(sensortype)
for sensor in sensors:
ts = sensor.get_data(head=head, tail=tail, diff=diff,
resample=resample, unit=unit)
if ts.empty:
continue
else:
yield ts
def add_site(self, site):
"""
Parameters
----------
site : Site
"""
site.hp = self
self.sites.append(site)
def load_houseprint_from_file(filename, pickle_format='jsonpickle'):
"""
Return a static (=anonymous) houseprint object
Parameters
----------
filename : str
pickle_format : str
'jsonpickle' or 'pickle'
pickle may be more robust, but jsonpickle should be compatible
across python versions
"""
if pickle_format == 'jsonpickle':
with open(filename, 'r') as f:
hp = jsonpickle.decode(f.read())
elif pickle_format == 'pickle':
with open(filename, 'rb') as f:
hp = pickle.load(file=f)
else:
raise NotImplementedError("Pickle format '{}' is not supported".format(pickle_format))
return hp
| apache-2.0 |
tasoc/photometry | notes/paper_plot_backgrounds.py | 1 | 2941 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Code to generate plot for Photometry Paper.
.. codeauthor:: Rasmus Handberg <[email protected]>
"""
import numpy as np
import h5py
import sys
import os.path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from photometry.plots import plt, plot_image, matplotlib
from photometry.quality import PixelQualityFlags
from matplotlib.colors import ListedColormap
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['font.size'] = '14'
matplotlib.rcParams['axes.titlesize'] = '18'
matplotlib.rcParams['axes.labelsize'] = '16'
plt.rc('text', usetex=True)
from rasterize_and_save import rasterize_and_save
if __name__ == '__main__':
# Which timestamp to show:
k = 98
# Load the data from the HDF5 file:
with h5py.File('sector001_camera1_ccd2.hdf5', 'r') as hdf:
dset_name = '%04d' % k
flux0 = np.asarray(hdf['images/' + dset_name])
bkg = np.asarray(hdf['backgrounds/' + dset_name])
img = np.asarray(hdf['pixel_flags/' + dset_name])
#img = np.zeros_like(flux0, dtype='int32')
#img[512:1024,512:1024] = 128
flags = np.zeros_like(img, dtype='uint8')
flags[img & PixelQualityFlags.NotUsedForBackground != 0] = 1
flags[img & PixelQualityFlags.ManualExclude != 0] = 2
flags[img & PixelQualityFlags.BackgroundShenanigans != 0] = 3
vmin = hdf['backgrounds'].attrs.get('movie_vmin')
vmax = hdf['backgrounds'].attrs.get('movie_vmax')
vmin2 = hdf['images'].attrs.get('movie_vmin')
vmax2 = hdf['images'].attrs.get('movie_vmax')
print(vmin, vmax)
print(vmin2, vmax2)
# Colormap for images:
cmap = plt.cm.viridis
# Colormap for Flags:
viridis = plt.get_cmap('Dark2')
newcolors = viridis(np.linspace(0, 1, 4))
newcolors[:1, :] = np.array([1, 1, 1, 1])
cmap_flags = ListedColormap(newcolors)
# Create figures:
fig, ax = plt.subplots(1, 4, figsize=(20, 6.2))
img1 = plot_image(flux0+bkg, ax=ax[0], scale='sqrt', vmin=vmin, vmax=vmax, title='Original Image', xlabel=None, ylabel=None, cmap=cmap, make_cbar=True)
img2 = plot_image(bkg, ax=ax[1], scale='sqrt', vmin=vmin, vmax=vmax, title='Background', xlabel=None, ylabel=None, cmap=cmap, make_cbar=True)
img3 = plot_image(flux0, ax=ax[2], scale='sqrt', vmin=vmin2, vmax=vmax2, title='Background subtracted', xlabel=None, ylabel=None, cmap=cmap, make_cbar=True)
img4 = plot_image(flags, ax=ax[3], scale='linear', vmin=-0.5, vmax=3.5, title='Pixel Flags', xlabel=None, ylabel=None, cmap=cmap_flags, make_cbar=True, clabel='Flags', cbar_ticks=[0,1,2,3], cbar_ticklabels=['None','Not used','Man. Excl.','Shenan'])
# Remove axes ticks:
for a in ax:
a.set_xticks([])
a.set_yticks([])
fig.set_tight_layout('tight')
# Save figure to file:
#rasterize_and_save('sector001_camera1_ccd2.pdf', [img1, img2, img3, img4], fig=fig, dpi=150, bbox_inches='tight')
fig.savefig('sector001_camera1_ccd2.png', bbox_inches='tight', dpi=150)
plt.close(fig)
#plt.show()
| gpl-3.0 |
anorfleet/turntable | test/lib/python2.7/site-packages/scipy/interpolate/tests/test_rbf.py | 22 | 4159 | #!/usr/bin/env python
# Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
"""Check that the Rbf function interpolates through the nodes (1D)"""
olderr = np.seterr(all="ignore")
try:
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
finally:
np.seterr(**olderr)
def check_rbf2d_interpolation(function):
"""Check that the Rbf function interpolates through the nodes (2D)"""
olderr = np.seterr(all="ignore")
try:
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
finally:
np.seterr(**olderr)
def check_rbf3d_interpolation(function):
"""Check that the Rbf function interpolates through the nodes (3D)"""
olderr = np.seterr(all="ignore")
try:
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = random.rand(50,1)*4-2
d = x*exp(-x**2-y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
finally:
np.seterr(**olderr)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
"""Check that the Rbf function approximates a smooth function well away
from the nodes."""
olderr = np.seterr(all="ignore")
try:
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
#import matplotlib.pyplot as plt
#plt.figure()
#plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
#plt.title(function)
#plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
finally:
np.seterr(**olderr)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.05,
'inverse multiquadric': 0.02,
'gaussian': 0.01,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def test_default_construction():
"""Check that the Rbf class can be constructed with the default
multiquadric basis function. Regression test for ticket #1228."""
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
"""Check that the Rbf class can be constructed with function=callable."""
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
"""Check that the Rbf class can be constructed with a two argument
function=callable."""
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
if __name__ == "__main__":
run_module_suite()
| mit |
ratnania/pigasus | doc/manual/include/demo/test_nonlin_ex1_picard.py | 1 | 1914 | #! /usr/bin/python
# ...
try:
from matplotlib import pyplot as plt
PLOT=True
except ImportError:
PLOT=False
# ...
import numpy as np
from igakit.cad_geometry import circle
from pigasus.gallery.poisson_nonlin import poisson_picard
import sys
import inspect
filename = inspect.getfile(inspect.currentframe()) # script filename (usually with path)
exp = np.exp ; log = np.log ; sqrt = np.sqrt
#-----------------------------------
AllDirichlet = True
try:
nx = int(sys.argv[1])
except:
nx = 31
try:
ny = int(sys.argv[2])
except:
ny = 31
try:
px = int(sys.argv[3])
except:
px = 2
try:
py = int(sys.argv[4])
except:
py = 2
geo = circle (radius = 1. / sqrt (2), n =[nx, ny], p =[px, py])
#-----------------------------------
# ...
u_exact = lambda x,y : [- 2.0 * log ( x**2 + y**2 + 0.5 )]
# ...
PDE = poisson_picard( geometry=geo \
, AllDirichlet=AllDirichlet )
# ...
print ">>> Solving using Picard <<<"
# ...
if PDE.Dirichlet:
U = PDE.unknown_dirichlet
else:
U = PDE.unknown
# ...
from pigasus.fem.utils import function
# ...
def func(U,x,y):
_U = U.evaluate()
return [4. * exp (_U)]
# ...
F = function(func, fields=[U])
list_L2, list_H1 = PDE.solve(F, u0=None, maxiter=50, rtol=1.e-6,
verbose=True)
print "norm using Picard ", PDE.norm(exact=u_exact)
# ...
if PLOT:
fig = plt.figure()
plt.subplot(121, aspect='equal')
U.fast_plot() ; plt.colorbar(orientation='horizontal') ; plt.title('$u_h$')
# plot error evolution
plt.subplot(122)
plt.plot(list_L2, '-vb', label='$L^2$ norm')
plt.plot(list_H1, '-xr', label='$H^1$ norm')
plt.xlabel('N')
plt.semilogy()
plt.title('Norm evolution of $u^{n+1} - u^n$')
plt.legend()
# ...
plt.savefig(filename.split('.py')[0]+'.png', format='png')
plt.clf()
# ...
PDE.free()
| mit |
rseubert/scikit-learn | sklearn/tests/test_lda.py | 11 | 5671 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
"""Test LDA classification.
This checks that LDA implements fit and predict and returns correct values
for simple toy data.
"""
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
"""Test if the coefficients of the solvers are approximately the same.
"""
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
"""Test LDA transform.
"""
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1**2))
d2 /= np.sqrt(np.sum(d2**2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
"""Test if classification works correctly with differently scaled features.
"""
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
| bsd-3-clause |
johannesmik/neurons | docs/conf.py | 2 | 8276 | # -*- coding: utf-8 -*-
#
# Neurons documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 7 20:42:40 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
# Mock imports, so that readthedocs can import them
import mock
MOCK_MODULES = ['numpy', 'scipy', 'matplotlib', 'matplotlib.pyplot', 'pylab', 'matplotlib.animation']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
else:
html_theme = 'default'
# My options
autoclass_content = 'both'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Neurons'
copyright = u'2015, Johannes Mikulasch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from neurons import get_version
version = get_version()
# The full version, including alpha/beta/rc tags.
release = get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Neuronsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Neurons.tex', u'Neurons Documentation',
u'Johannes Mikulasch', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'neurons', u'Neurons Documentation',
[u'Johannes Mikulasch'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Neurons', u'Neurons Documentation',
u'Johannes Mikulasch', 'Neurons', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
def setup(app):
app.add_javascript('jquery-ui.js')
app.add_javascript('plot-eps.js')
app.add_javascript('plot-eta.js')
| bsd-2-clause |
mne-tools/mne-tools.github.io | 0.21/_downloads/7ba372d4d1fc615cb50ec2550db1f49b/plot_mixed_source_space_connectivity.py | 3 | 7028 | """
===============================================================================
Compute mixed source space connectivity and visualize it using a circular graph
===============================================================================
This example computes the all-to-all connectivity between 75 regions in a
mixed source space based on dSPM inverse solutions and a FreeSurfer cortical
parcellation. The connectivity is visualized using a circular graph which
is ordered based on the locations of the regions in the axial plane.
"""
# Author: Annalisa Pascarella <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import mne
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import setup_volume_source_space, setup_source_space
from mne import make_forward_solution
from mne.io import read_raw_fif
from mne.minimum_norm import make_inverse_operator, apply_inverse_epochs
from mne.connectivity import spectral_connectivity
from mne.viz import circular_layout, plot_connectivity_circle
# Set directories
data_path = sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_raw = data_dir + '/sample_audvis_filt-0-40_raw.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_cov = data_dir + '/ernoise-cov.fif'
fname_event = data_dir + '/sample_audvis_filt-0-40_raw-eve.fif'
# List of sub structures we are interested in. We select only the
# sub structures we want to include in the source space
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
# Setup a surface-based source space, oct5 is not very dense (just used
# to speed up this example; we recommend oct6 in actual analyses)
src = setup_source_space(subject, subjects_dir=subjects_dir,
spacing='oct5', add_dist=False)
# Setup a volume source space
# set pos=10.0 for speed, not very accurate; we recommend something smaller
# like 5.0 in actual analyses:
vol_src = setup_volume_source_space(
subject, mri=fname_aseg, pos=10.0, bem=fname_model,
add_interpolator=False, # just for speed, usually use True
volume_label=labels_vol, subjects_dir=subjects_dir)
# Generate the mixed source space
src += vol_src
# Load data
raw = read_raw_fif(fname_raw)
raw.pick_types(meg=True, eeg=False, eog=True, stim=True).load_data()
events = mne.find_events(raw)
noise_cov = mne.read_cov(fname_cov)
# compute the fwd matrix
fwd = make_forward_solution(raw.info, fname_trans, src, fname_bem,
mindist=5.0) # ignore sources<=5mm from innerskull
del src
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
reject = dict(mag=4e-12, grad=4000e-13, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
reject=reject, preload=False)
del raw
# Compute inverse solution and for each epoch
snr = 1.0 # use smaller SNR for raw data
inv_method = 'dSPM'
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
lambda2 = 1.0 / snr ** 2
# Compute inverse operator
inverse_operator = make_inverse_operator(
epochs.info, fwd, noise_cov, depth=None, fixed=False)
del fwd
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, inv_method,
pick_ori=None, return_generator=True)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(subject, parc=parc,
subjects_dir=subjects_dir)
# Average the source estimates within each label of the cortical parcellation
# and each sub structures contained in the src space
# If mode = 'mean_flip' this option is used only for the cortical label
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(
stcs, labels_parc, src, mode='mean_flip', allow_empty=True,
return_generator=True)
# We compute the connectivity in the alpha band and plot it using a circular
# graph layout
fmin = 8.
fmax = 13.
sfreq = epochs.info['sfreq'] # the sampling frequency
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method='pli', mode='multitaper', sfreq=sfreq, fmin=fmin,
fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=1)
# We create a list of Label containing also the sub structures
labels_aseg = mne.get_volume_labels_from_src(src, subject, subjects_dir)
labels = labels_parc + labels_aseg
# read colors
node_colors = [label.color for label in labels]
# We reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
lh_labels = [name for name in label_names if name.endswith('lh')]
rh_labels = [name for name in label_names if name.endswith('rh')]
# Get the y-location of the label
label_ypos_lh = list()
for name in lh_labels:
idx = label_names.index(name)
ypos = np.mean(labels[idx].pos[:, 1])
label_ypos_lh.append(ypos)
try:
idx = label_names.index('Brain-Stem')
except ValueError:
pass
else:
ypos = np.mean(labels[idx].pos[:, 1])
lh_labels.append('Brain-Stem')
label_ypos_lh.append(ypos)
# Reorder the labels based on their location
lh_labels = [label for (yp, label) in sorted(zip(label_ypos_lh, lh_labels))]
# For the right hemi
rh_labels = [label[:-2] + 'rh' for label in lh_labels
if label != 'Brain-Stem' and label[:-2] + 'rh' in rh_labels]
# Save the plot order
node_order = lh_labels[::-1] + rh_labels
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=[0, len(label_names) // 2])
# Plot the graph using node colors from the FreeSurfer parcellation. We only
# show the 300 strongest connections.
conmat = con[:, :, 0]
fig = plt.figure(num=None, figsize=(8, 8), facecolor='black')
plot_connectivity_circle(conmat, label_names, n_lines=300,
node_angles=node_angles, node_colors=node_colors,
title='All-to-All Connectivity left-Auditory '
'Condition (PLI)', fig=fig)
###############################################################################
# Save the figure (optional)
# --------------------------
#
# By default matplotlib does not save using the facecolor, even though this was
# set when the figure was generated. If not set via savefig, the labels, title,
# and legend will be cut off from the output png file.
# fname_fig = data_path + '/MEG/sample/plot_mixed_connect.png'
# plt.savefig(fname_fig, facecolor='black')
| bsd-3-clause |
johanherman/arteria-bcl2fastq | bcl2fastq/lib/illumina.py | 3 | 4258 |
from pandas import read_csv
class SampleRow:
"""
Provides a representation of the information presented in a Illumina Samplesheet.
Different samplesheet types (e.g. HiSeq, MiSeq, etc) will provide slightly different
information for each sample. This class aims at providing a interface to this that will
hopefully stay relatively stable across time.
For an example of how the samplesheet looks see: ./tests/sampledata/new_samplesheet_example.csv
TODO Implement picking up additional information from
samplesheet. Right only picking up the data field is
supported.
"""
def __init__(self, sample_id, sample_name, index1, sample_project, lane=None, sample_plate=None,
sample_well=None, index2=None, description=None):
"""
Constructs the SampleRow, which shadows the information on each sequencing unit (lane, sample, tag, etc)
in the samplesheet. NB: If a field is set to None, it means that column didn't exist in the samplesheet.
If it is a empty string it means that it was set to a empty value.
:param sample_id: unique id of sample
:param sample_name: the name of the sample
:param index1: index to demultiplex by
:param sample_project: project sample belongs to
:param lane: sequenced on - will default to 1 if not set (e.g. the MiSeq samplesheet does
not contain lane information
:param sample_plate: plate the sample was taken from
:param sample_well: well on plate
:param index2: second index in the case of dual indexing
:param description: a free text field containing additional info about the sample
:return:
"""
self.lane = int(lane) if lane else 1
self.sample_id = str(sample_id)
self.sample_name = str(sample_name)
self.sample_plate = sample_plate
self.sample_well = sample_well
self.index1 = index1
self.index2 = index2
self.sample_project = str(sample_project)
self.description = description
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
if type(other) == type(self):
return self.__dict__ == other.__dict__
else:
False
class Samplesheet:
"""
Represent information contanied in a Illumina samplesheet
"""
def __init__(self, samplesheet_file):
"""
Create a Samplesheet instance.
:param samplesheet_file: a path to the samplesheet file to read
"""
self.samplesheet_file = samplesheet_file
with open(samplesheet_file, mode="r") as s:
self.samples = self._read_samples(s)
@staticmethod
def _read_samples(samplesheet_file_handle):
"""
Read info about the sequencing units in the samplesheet.
:param samplesheet_file_handle: file handle for the corresponding samplesheet
:return: a list of the sequencing units in the samplesheet in the form of `SampleRow` instances.
"""
def find_data_line():
enumurated_lines = enumerate(samplesheet_file_handle)
lines_with_data = filter(lambda x: "[Data]" in x[1], enumurated_lines)
assert len(lines_with_data) == 1, "There wasn't strictly one line in samplesheet with line '[Data]'"
return lines_with_data[0][0]
def row_to_sample_row(index_and_row):
row = index_and_row[1]
return SampleRow(lane=row.get("Lane"), sample_id=row.get("Sample_ID"), sample_name=row.get("Sample_Name"),
sample_plate=row.get("Sample_Plate"), sample_well=row.get("Sample_Well"),
index1=row.get("index"), index2=row.get("index2"),
sample_project=row.get("Sample_Project"), description=row.get("Description"))
lines_to_skip = find_data_line() + 1
# Ensure that pointer is at beginning of file again.
samplesheet_file_handle.seek(0)
samplesheet_df = read_csv(samplesheet_file_handle, skiprows=lines_to_skip)
samplesheet_df = samplesheet_df.fillna("")
samples = map(row_to_sample_row, samplesheet_df.iterrows())
return list(samples)
| mit |
lenovor/scikit-learn | sklearn/ensemble/weight_boosting.py | 97 | 40773 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _check_sample_weight(self):
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
drewokane/seaborn | examples/structured_heatmap.py | 24 | 1304 | """
Discovering structure in heatmap data
=====================================
_thumb: .4, .2
"""
import pandas as pd
import seaborn as sns
sns.set(font="monospace")
# Load the brain networks example dataset
df = sns.load_dataset("brain_networks", header=[0, 1, 2], index_col=0)
# Select a subset of the networks
used_networks = [1, 5, 6, 7, 8, 11, 12, 13, 16, 17]
used_columns = (df.columns.get_level_values("network")
.astype(int)
.isin(used_networks))
df = df.loc[:, used_columns]
# Create a custom palette to identify the networks
network_pal = sns.cubehelix_palette(len(used_networks),
light=.9, dark=.1, reverse=True,
start=1, rot=-2)
network_lut = dict(zip(map(str, used_networks), network_pal))
# Convert the palette to vectors that will be drawn on the side of the matrix
networks = df.columns.get_level_values("network")
network_colors = pd.Series(networks).map(network_lut)
# Create a custom colormap for the heatmap values
cmap = sns.diverging_palette(h_neg=210, h_pos=350, s=90, l=30, as_cmap=True)
# Draw the full plot
sns.clustermap(df.corr(), row_colors=network_colors, linewidths=.5,
col_colors=network_colors, figsize=(13, 13), cmap=cmap)
| bsd-3-clause |
enigmampc/catalyst | catalyst/utils/calendars/exchange_calendar_nyse.py | 6 | 5082 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
from itertools import chain
from pandas.tseries.holiday import (
GoodFriday,
USLaborDay,
USPresidentsDay,
USThanksgivingDay,
)
from pytz import timezone
from .trading_calendar import TradingCalendar, HolidayCalendar
from .us_holidays import (
USNewYearsDay,
USMartinLutherKingJrAfter1998,
USMemorialDay,
USIndependenceDay,
Christmas,
MonTuesThursBeforeIndependenceDay,
FridayAfterIndependenceDayExcept2013,
USBlackFridayBefore1993,
USBlackFridayInOrAfter1993,
September11Closings,
HurricaneSandyClosings,
USNationalDaysofMourning,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
)
# Useful resources for making changes to this file:
# http://www.nyse.com/pdfs/closings.pdf
# http://www.stevemorse.org/jcal/whendid.html
class NYSEExchangeCalendar(TradingCalendar):
"""
Exchange calendar for NYSE
Open Time: 9:31 AM, US/Eastern
Close Time: 4:00 PM, US/Eastern
Regularly-Observed Holidays:
- New Years Day (observed on monday when Jan 1 is a Sunday)
- Martin Luther King Jr. Day (3rd Monday in January, only after 1998)
- Washington's Birthday (aka President's Day, 3rd Monday in February)
- Good Friday (two days before Easter Sunday)
- Memorial Day (last Monday in May)
- Independence Day (observed on the nearest weekday to July 4th)
- Labor Day (first Monday in September)
- Thanksgiving (fourth Thursday in November)
- Christmas (observed on nearest weekday to December 25)
NOTE: The NYSE does not observe the following US Federal Holidays:
- Columbus Day
- Veterans Day
Regularly-Observed Early Closes:
- July 3rd (Mondays, Tuesdays, and Thursdays, 1995 onward)
- July 5th (Fridays, 1995 onward, except 2013)
- Christmas Eve (except on Fridays, when the exchange is closed entirely)
- Day After Thanksgiving (aka Black Friday, observed from 1992 onward)
NOTE: Until 1993, the standard early close time for the NYSE was 2:00 PM.
From 1993 onward, it has been 1:00 PM.
Additional Irregularities:
- Closed from 9/11/2001 to 9/16/2001 due to terrorist attacks in NYC.
- Closed on 10/29/2012 and 10/30/2012 due to Hurricane Sandy.
- Closed on 4/27/1994 due to Richard Nixon's death.
- Closed on 6/11/2004 due to Ronald Reagan's death.
- Closed on 1/2/2007 due to Gerald Ford's death.
- Closed at 1:00 PM on Wednesday, July 3rd, 2013
- Closed at 1:00 PM on Friday, December 31, 1999
- Closed at 1:00 PM on Friday, December 26, 1997
- Closed at 1:00 PM on Friday, December 26, 2003
NOTE: The exchange was **not** closed early on Friday December 26, 2008,
nor was it closed on Friday December 26, 2014. The next Thursday Christmas
will be in 2025. If someone is still maintaining this code in 2025, then
we've done alright...and we should check if it's a half day.
"""
regular_early_close = time(13)
@property
def name(self):
return "NYSE"
@property
def tz(self):
return timezone('US/Eastern')
@property
def open_time(self):
return time(9, 31)
@property
def close_time(self):
return time(16)
@property
def regular_holidays(self):
return HolidayCalendar([
USNewYearsDay,
USMartinLutherKingJrAfter1998,
USPresidentsDay,
GoodFriday,
USMemorialDay,
USIndependenceDay,
USLaborDay,
USThanksgivingDay,
Christmas,
])
@property
def adhoc_holidays(self):
return list(chain(
September11Closings,
HurricaneSandyClosings,
USNationalDaysofMourning,
))
@property
def special_closes(self):
return [
(self.regular_early_close, HolidayCalendar([
MonTuesThursBeforeIndependenceDay,
FridayAfterIndependenceDayExcept2013,
USBlackFridayInOrAfter1993,
ChristmasEveInOrAfter1993
])),
(time(14), HolidayCalendar([
ChristmasEveBefore1993,
USBlackFridayBefore1993,
])),
]
@property
def special_closes_adhoc(self):
return [
(self.regular_early_close, [
'1997-12-26',
'1999-12-31',
'2003-12-26',
'2013-07-03'
])
]
| apache-2.0 |
TuKo/brainiak | tests/fcma/test_mvpa_voxel_selection.py | 5 | 1917 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from brainiak.fcma.mvpa_voxelselector import MVPAVoxelSelector
from brainiak.searchlight.searchlight import Searchlight
from sklearn import svm
import numpy as np
from mpi4py import MPI
from numpy.random import RandomState
# specify the random state to fix the random numbers
prng = RandomState(1234567890)
def test_mvpa_voxel_selection():
data = prng.rand(5, 5, 5, 8).astype(np.float32)
# all MPI processes read the mask; the mask file is small
mask = np.ones([5, 5, 5], dtype=np.bool)
mask[0, 0, :] = False
labels = [0, 1, 0, 1, 0, 1, 0, 1]
# 2 subjects, 4 epochs per subject
sl = Searchlight(sl_rad=1)
mvs = MVPAVoxelSelector(data, mask, labels, 2, sl)
# for cross validation, use SVM with precomputed kernel
clf = svm.SVC(kernel='rbf', C=10)
result_volume, results = mvs.run(clf)
if MPI.COMM_WORLD.Get_rank() == 0:
output = []
for tuple in results:
if tuple[1] > 0:
output.append(int(8*tuple[1]))
expected_output = [6, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4,
4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 1]
assert np.allclose(output, expected_output, atol=1), \
'voxel selection via SVM does not provide correct results'
if __name__ == '__main__':
test_mvpa_voxel_selection()
| apache-2.0 |
theodoregoetz/clas12-dc-wiremap | clas12_wiremap/ui/wire_doublet_status.py | 1 | 6279 | import numpy as np
from numpy import random as rand
from clas12_wiremap.ui import QtGui
from matplotlib import pyplot, gridspec, cm
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg \
as FigureCanvas
from matplotlib.backends.backend_qt4 import NavigationToolbar2QT \
as NavigationToolbar
from matplotlib.figure import Figure
from component_array import*
class WireMap(QtGui.QWidget):
def __init__(self, parent=None):
super(WireMap,self).__init__(parent)
self.parent = parent
self.setup_widgets()
self.vbox = QtGui.QVBoxLayout(self)
self.vbox.addWidget(self.canvas)
self.vbox.addWidget(self.toolbar)
def setup_widgets(self):
# setting up dimensions
self.fig = Figure((5.0, 4.0), dpi=100)
#attaching the figure to the canvas
self.canvas = FigureCanvas(self.fig)
#attaching a toolbar to the canvas
self.toolbar = NavigationToolbar(self.canvas, self.parent)
self.axs = [[] for i in range(6)]
self.pts = [[None]*6 for i in range(6)]
sector_grid = gridspec.GridSpec(2,3,wspace=0.3,hspace=0.2)
for sec in range(6):
slyr_grid = gridspec.GridSpecFromSubplotSpec(6,1,
wspace=0.0,hspace=0.1,
subplot_spec=sector_grid[sec])
for slyr in range(6):
self.axs[sec].append(
self.fig.add_subplot(slyr_grid[5-slyr]))
def update_plots(self):
for sec in range(6):
for slyr in range(6):
self.pts[sec][slyr] = \
self.superlayer_plot(self.axs[sec][slyr],sec,slyr)
self.canvas.draw()
def superlayer_plot(self,ax,sec,slyr):
if not hasattr(self,'data'):
self.data = fetchCrateArray(session)
pt = ax.imshow(self.data[sec][slyr],
origin='lower',
aspect='auto',
interpolation='nearest',
extent=[0.5,112.5,-0.5,5.5],
vmin=0,
cmap=cm.ocean)
if slyr == 5:
ax.set_title('Sector '+str(sec+1))
if (sec > 2) and (slyr == 0):
ax.xaxis.set_ticks([1]+list(range(32,113,32)))
ax.xaxis.set_ticklabels([1]+list(range(32,113,32)))
else:
ax.xaxis.set_major_locator(pyplot.NullLocator())
ax.set_ylabel(str(slyr+1))
ax.yaxis.set_major_locator(pyplot.NullLocator())
ax.hold(False)
return pt
class WireMapSector(QtGui.QWidget):
def __init__(self, sector, parent=None):
super(WireMapSector,self).__init__(parent)
self.sector = sector
self.parent = parent
self.setup_widgets()
self.vbox = QtGui.QVBoxLayout(self)
self.vbox.addWidget(self.canvas)
self.vbox.addWidget(self.toolbar)
def setup_widgets(self):
self.fig = Figure((5.0, 4.0), dpi=100)
self.canvas = FigureCanvas(self.fig)
self.toolbar = NavigationToolbar(self.canvas, self.parent)
self.axs = []
self.pts = [None]*6
slyr_grid = gridspec.GridSpec(6,1,wspace=0.0,hspace=0.1)
for slyr in range(6):
self.axs.append(
self.fig.add_subplot(slyr_grid[5-slyr]))
def update_plots(self):
for slyr in range(6):
self.pts[slyr] = \
self.superlayer_plot(self.axs[slyr],slyr)
self.canvas.draw()
def superlayer_plot(self,ax,slyr):
if not hasattr(self,'data'):
self.data = fetchCrateArray(session)
pt = ax.imshow(self.data[slyr],
origin='lower',
aspect='auto',
interpolation='nearest',
extent=[0.5,112.5,-0.5,5.5],
vmin=0,
cmap=cm.ocean)
if slyr == 5:
ax.set_title('Sector '+str(self.sector+1))
if slyr == 0:
ax.xaxis.set_ticks([1]+list(range(32,113,32)))
ax.xaxis.set_ticklabels([1]+list(range(32,113,32)))
else:
ax.xaxis.set_major_locator(pyplot.NullLocator())
ax.set_ylabel(str(slyr+1))
ax.yaxis.set_major_locator(pyplot.NullLocator())
ax.hold(False)
return pt
class WireMaps(QtGui.QStackedWidget):
def __init__(self, parent=None):
super(WireMaps,self).__init__(parent)
self.wiremap = WireMap(self)
self.addWidget(self.wiremap)
self.sec_wiremaps = []
for sec in range(6):
self.sec_wiremaps.append(WireMapSector(sec,self))
self.addWidget(self.sec_wiremaps[sec])
self.data = fetchCrateArray(session)
@property
def data(self):
return self.wiremap.data
@data.setter
def data(self,d):
self._data = d
self.wiremap.data = self._data
for sec in range(6):
self.sec_wiremaps[sec].data = self._data[sec]
self.update_active_plots()
def update_active_plots(self):
if super(WireMaps,self).currentIndex() == 0:
self.wiremap.update_plots()
else:
sec = super(WireMaps,self).currentIndex() - 1
self.sec_wiremaps[sec].update_plots()
def setCurrentIndex(self,*args,**kwargs):
super(WireMaps,self).setCurrentIndex(*args,**kwargs)
self.update_active_plots()
if __name__ == '__main__':
import sys
from matplotlib import pyplot
session = initialize_session()
dc_fill_tables(session)
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
wid = QtGui.QWidget()
vbox = QtGui.QVBoxLayout()
wid.setLayout(vbox)
cbox = QtGui.QSpinBox()
cbox.setMinimum(0)
cbox.setMaximum(6)
cbox.setSpecialValueText('-')
stack = WireMaps()
stack.data = fetchDoubletArray(session)
#change this line to fetch different component
vbox.addWidget(cbox)
vbox.addWidget(stack)
self.setCentralWidget(wid)
cbox.valueChanged.connect(stack.setCurrentIndex)
self.show()
app = QtGui.QApplication(sys.argv)
main_window = MainWindow()
sys.exit(app.exec_())
| gpl-3.0 |
mataevs/persondetector | detection/hog_train_set.py | 1 | 3020 |
import cv2
import utils
from skimage.feature import hog
import cPickle
import numpy
from sklearn.externals import joblib
from sklearn import svm
def get_set(metadataFile, classType):
set = []
with open(metadataFile, "r") as f:
entries = f.readlines()
for entry in entries:
entry = entry.split()
filePath = entry[0]
x, y, scale = int(entry[1]), int(entry[2]), float(entry[3])
img = cv2.imread(filePath)
img = cv2.resize(img, (0, 0), fx=scale, fy=scale)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_gray_crop = img_gray[y:y+128, x:x+64]
hog_gray = hog(img_gray_crop, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(2, 2), visualise=False)
prevFilePath = utils.get_prev_img(filePath)
prev_img = cv2.imread(prevFilePath)
prev_img = cv2.resize(prev_img, (0, 0), fx=scale, fy=scale)
prev_img_gray = cv2.cvtColor(prev_img, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prev_img_gray, img_gray, 0.5, 3, 15, 3, 5, 1.2, 0)
# flowx, flowy = flow[..., 0], flow[..., 1]
# flowx_crop, flowy_crop = flowx[y:y+128, x:x+64], flowy[y:y+128, x:x+64]
#
# hog_flow_x = hog(flowx_crop, orientations=9, pixels_per_cell=(8, 8),
# cells_per_block=(2, 2), visualise=False)
# hog_flow_y = hog(flowy_crop, orientations=9, pixels_per_cell=(8, 8),
# cells_per_block=(2, 2), visualise=False)
hsv = numpy.zeros_like(img)
hsv[..., 1] = 255
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180/ numpy.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
flowRGB = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
flow_gray = cv2.cvtColor(flowRGB, cv2.COLOR_BGR2GRAY)
flow_gray_crop = flow_gray[y:y+128, x:x+64]
hog_flow = hog(flow_gray_crop, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(2, 2), visualise=False)
desc = hog_gray + hog_flow
set.append(desc)
return set, [classType] * len(entries)
def get_hog_train_set(pos_filepath, neg_filepath, featureFile):
p_features, p_classes = get_set(pos_filepath, 1)
n_features, n_classes = get_set(neg_filepath, -1)
print "positive samples", len(p_features)
print "negative samples", len(n_features)
features = p_features + n_features
classes = p_classes + n_classes
cPickle.dump((features, classes), open(featureFile, "wb"), protocol=2)
def trainSvmClassifier(feature_file, classifierFile):
get_hog_train_set("hog_corpus_pos.txt", "hog_corpus_neg.txt", feature_file)
obj = cPickle.load(open(feature_file, "rb"))
features, classes = obj
svc = svm.SVC(C=1.0, kernel='linear', probability=True).fit(features, classes)
joblib.dump(svc, classifierFile)
trainSvmClassifier("hog_train_test_sum.dump", "hog_sum.dump") | mit |
rexshihaoren/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
huaj1101/ML-PY | ML_IN_ACTION/C6/EXTRAS/notLinSeperable.py | 4 | 2270 | '''
Created on Oct 6, 2010
@author: Peter
'''
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
xcord0 = []; ycord0 = []; xcord1 = []; ycord1 = []
markers =[]
colors =[]
fr = open('testSet.txt')#this file was generated by 2normalGen.py
for line in fr.readlines():
lineSplit = line.strip().split('\t')
xPt = float(lineSplit[0])
yPt = float(lineSplit[1])
label = int(lineSplit[2])
if (label == 0):
xcord0.append(xPt)
ycord0.append(yPt)
else:
xcord1.append(xPt)
ycord1.append(yPt)
fr.close()
fig = plt.figure()
ax = fig.add_subplot(221)
xcord0 = []; ycord0 = []; xcord1 = []; ycord1 = []
for i in range(300):
[x,y] = random.uniform(0,1,2)
if ((x > 0.5) and (y < 0.5)) or ((x < 0.5) and (y > 0.5)):
xcord0.append(x); ycord0.append(y)
else:
xcord1.append(x); ycord1.append(y)
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('A')
ax = fig.add_subplot(222)
xcord0 = random.standard_normal(150); ycord0 = random.standard_normal(150)
xcord1 = random.standard_normal(150)+2.0; ycord1 = random.standard_normal(150)+2.0
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('B')
ax = fig.add_subplot(223)
xcord0 = []; ycord0 = []; xcord1 = []; ycord1 = []
for i in range(300):
[x,y] = random.uniform(0,1,2)
if (x > 0.5):
xcord0.append(x*cos(2.0*pi*y)); ycord0.append(x*sin(2.0*pi*y))
else:
xcord1.append(x*cos(2.0*pi*y)); ycord1.append(x*sin(2.0*pi*y))
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('C')
ax = fig.add_subplot(224)
xcord1 = zeros(150); ycord1 = zeros(150)
xcord0 = random.uniform(-3,3,350); ycord0 = random.uniform(-3,3,350);
xcord1[0:50] = 0.3*random.standard_normal(50)+2.0; ycord1[0:50] = 0.3*random.standard_normal(50)+2.0
xcord1[50:100] = 0.3*random.standard_normal(50)-2.0; ycord1[50:100] = 0.3*random.standard_normal(50)-3.0
xcord1[100:150] = 0.3*random.standard_normal(50)+1.0; ycord1[100:150] = 0.3*random.standard_normal(50)
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('D')
plt.show() | apache-2.0 |
mcnowinski/various-and-sundry | lightcurve/moc4.py | 1 | 31624 | import pandas as pd
from collections import defaultdict
import math
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt
import os
#SDSS MOC4 data file
path = 'ADR4.dat'
#solar colors (reverse calculated from Carvano)
#reference to g
solar_color_ug = 3.81
solar_color_rg = 2.04
solar_color_ig = 1.94
solar_color_zg = 1.90
solar_color_gg = 2.5 #to make LRgg = 1
#4.27, 2.96, 2.5, 2.4, 2.36
#2.32, 0.46, 0, -0.1, -0.14
#reference to r
solar_color_ur = solar_color_ug - solar_color_rg
solar_color_gr = solar_color_gg - solar_color_rg
solar_color_rr = 0.0
solar_color_ir = solar_color_ig - solar_color_rg
solar_color_zr = solar_color_zg - solar_color_rg
#print solar_color_ur, solar_color_gr, solar_color_rr, solar_color_ir, solar_color_zr
#os.sys.exit(1)
#sdss wavelengths (microns)
#0.354, 0.477, 0.6230, 0.7630 and 0.913 um
u_wavelength=0.3543
g_wavelength=0.4770
r_wavelength=0.6231
i_wavelength=0.7625
z_wavelength=0.9134
#carvano taxonomy limits
#TAX LRug LRgg LRrg LRig LRzg CGguL CGguU CGrgL CGrgU CGirL CGirU CGziL CGziU
# O 0.884 1.000 1.057 1.053 0.861 0.784 1.666 0.175 0.505 -0.143 0.106 -0.833 -0.467
# V 0.810 1.000 1.099 1.140 0.854 1.087 2.095 0.511 2.374 -0.077 0.445 -2.018 -0.683
# Q 0.842 1.000 1.082 1.094 0.989 0.757 2.122 0.421 0.967 -0.032 0.229 -0.719 -0.200
# S 0.839 1.000 1.099 1.148 1.096 0.868 1.960 0.379 0.910 0.148 0.601 -0.530 -0.047
# A 0.736 1.000 1.156 1.209 1.137 1.264 4.210 0.937 1.342 0.151 0.505 -0.521 -0.089
# C 0.907 1.000 1.008 1.011 1.021 0.385 1.990 -0.140 0.403 -0.203 0.202 -0.221 0.259
# X 0.942 1.000 1.029 1.063 1.073 0.178 1.081 -0.089 0.481 0.136 0.478 -0.182 0.187
# L 0.858 1.000 1.071 1.109 1.116 0.913 2.089 0.253 0.871 0.136 0.622 -0.125 0.160
# D 0.942 1.000 1.075 1.135 1.213 0.085 1.717 -0.080 0.589 0.142 0.625 0.121 0.502
LR_means = {}
LR_means['O'] = {'LRug': 0.884, 'LRgg': 1.000, 'LRrg': 1.057, 'LRig': 1.053, 'LRzg': 0.861}
LR_means['V'] = {'LRug': 0.810, 'LRgg': 1.000, 'LRrg': 1.099, 'LRig': 1.140, 'LRzg': 0.854}
LR_means['Q'] = {'LRug': 0.842, 'LRgg': 1.000, 'LRrg': 1.082, 'LRig': 1.094, 'LRzg': 0.989}
LR_means['S'] = {'LRug': 0.839, 'LRgg': 1.000, 'LRrg': 1.099, 'LRig': 1.148, 'LRzg': 1.096}
LR_means['A'] = {'LRug': 0.736, 'LRgg': 1.000, 'LRrg': 1.156, 'LRig': 1.209, 'LRzg': 1.137}
LR_means['C'] = {'LRug': 0.907, 'LRgg': 1.000, 'LRrg': 1.008, 'LRig': 1.011, 'LRzg': 1.021}
LR_means['X'] = {'LRug': 0.942, 'LRgg': 1.000, 'LRrg': 1.029, 'LRig': 1.063, 'LRzg': 1.073}
LR_means['L'] = {'LRug': 0.858, 'LRgg': 1.000, 'LRrg': 1.071, 'LRig': 1.109, 'LRzg': 1.116}
LR_means['D'] = {'LRug': 0.942, 'LRgg': 1.000, 'LRrg': 1.075, 'LRig': 1.135, 'LRzg': 1.213}
#K type calc from Wabash 2453
LR_means['K'] = {'LRug': 0.871, 'LRgg': 1.000, 'LRrg': 1.053, 'LRig': 1.088, 'LRzg': 1.077}
#calc slope and bd (Carvano 2015) for the mean taxonomic shapes (Carvano 2011)
#LR_means['O'] = {'LRug': 0.884, 'LRgg': 1.000, 'LRrg': 1.057, 'LRig': 1.053, 'LRzg': 0.861}
log_mean=open('moc4.mean.txt', 'w')
log_mean.write('%s,%f,%f,%f,%f,%f\n'%('space', u_wavelength, g_wavelength, r_wavelength, i_wavelength, z_wavelength))
log_mean.write('%s,%s,%s,%s,%s,%s,%s,%s\n'%('class', 'Rur', 'Rgr', 'Rrr', 'Rir', 'Rzr', 'slope', 'bd'))
for key in LR_means:
LRug = LR_means[key]['LRug']
LRgg = LR_means[key]['LRgg']
LRrg = LR_means[key]['LRrg']
LRig = LR_means[key]['LRig']
LRzg = LR_means[key]['LRzg']
#
Cug = -2.5*LRug
Cgg = -2.5*LRgg
Crg = -2.5*LRrg
Cig = -2.5*LRig
Czg = -2.5*LRzg
#
Cur = Cug - Crg
Cgr = Cgg - Crg
Crr = 0.0
Cir = Cig - Crg
Czr = Czg - Crg
#
LRur = -Cur/2.5
LRgr = -Cgr/2.5
LRrr = -Crr/2.5
LRir = -Cir/2.5
LRzr = -Czr/2.5
#
Rur = pow(10,LRur)
Rgr = pow(10,LRgr)
Rrr = pow(10,LRrr)
Rir = pow(10,LRir)
Rzr = pow(10,LRzr)
#Carvano 2015 parameters
slope = (Rir-Rgr)/(i_wavelength-g_wavelength)
bd = Rzr - Rir
log_mean.write('%s,%f,%f,%f,%f,%f,%f,%f\n'%(key, Rur, Rgr, Rrr, Rir, Rzr, slope, bd))
log_mean.close()
CG_limits = {}
CG_limits['O'] = {'CGguL': 0.784, 'CGguU': 1.666, 'CGrgL': 0.175, 'CGrgU': 0.505, 'CGirL':-0.143, 'CGirU': 0.106, 'CGziL': -0.833, 'CGziU': -0.467}
CG_limits['V'] = {'CGguL': 1.087, 'CGguU': 2.095, 'CGrgL': 0.511, 'CGrgU': 2.374, 'CGirL':-0.077, 'CGirU': 0.445, 'CGziL': -2.018, 'CGziU': -0.683}
CG_limits['Q'] = {'CGguL': 0.757, 'CGguU': 2.122, 'CGrgL': 0.421, 'CGrgU': 0.967, 'CGirL':-0.032, 'CGirU': 0.229, 'CGziL': -0.719, 'CGziU': -0.200}
CG_limits['S'] = {'CGguL': 0.868, 'CGguU': 1.960, 'CGrgL': 0.379, 'CGrgU': 0.910, 'CGirL': 0.148, 'CGirU': 0.601, 'CGziL': -0.530, 'CGziU': -0.047}
CG_limits['A'] = {'CGguL': 1.264, 'CGguU': 4.210, 'CGrgL': 0.937, 'CGrgU': 1.342, 'CGirL': 0.151, 'CGirU': 0.505, 'CGziL': -0.521, 'CGziU': -0.089}
CG_limits['C'] = {'CGguL': 0.385, 'CGguU': 1.990, 'CGrgL':-0.140, 'CGrgU': 0.403, 'CGirL':-0.203, 'CGirU': 0.202, 'CGziL': -0.221, 'CGziU': 0.259}
CG_limits['X'] = {'CGguL': 0.178, 'CGguU': 1.081, 'CGrgL':-0.089, 'CGrgU': 0.481, 'CGirL': 0.136, 'CGirU': 0.478, 'CGziL': -0.182, 'CGziU': 0.187}
CG_limits['L'] = {'CGguL': 0.913, 'CGguU': 2.089, 'CGrgL': 0.253, 'CGrgU': 0.871, 'CGirL': 0.136, 'CGirU': 0.622, 'CGziL': -0.125, 'CGziU': 0.160}
CG_limits['D'] = {'CGguL': 0.085, 'CGguU': 1.717, 'CGrgL':-0.080, 'CGrgU': 0.589, 'CGirL': 0.142, 'CGirU': 0.625, 'CGziL': 0.121, 'CGziU': 0.502}
#1 x sigma
#1.243181211 0.516802843 0.357449432 0.074183133
#0.870581826 0.209380322 0.137706511 -0.216456472
#CG_limits['K'] = {'CGguL': 0.870581826, 'CGguU': 1.243181211, 'CGrgL':0.209380322, 'CGrgU': 0.516802843, 'CGirL': 0.137706511, 'CGirU': 0.357449432, 'CGziL': -0.216456472, 'CGziU': 0.074183133}
#2x sigma
#1.429480904 0.670514103 0.467320892 0.219502936
#0.684282133 0.055669061 0.027835051 -0.361776275
CG_limits['K'] = {'CGguL': 0.684282133, 'CGguU': 1.429480904, 'CGrgL':0.055669061, 'CGrgU': 0.670514103, 'CGirL': 0.027835051, 'CGirU': 0.467320892, 'CGziL': -0.361776275, 'CGziU': 0.219502936}
#asteroid dictionary
asteroids = defaultdict(dict)
#===============================================================================
# 1 1 - 7 moID Unique SDSS moving-object ID
# 2 8 - 13 Run SDSS object IDs, for details see SDSS EDR paper
# 3 14 - 15 Col
# 4 16 - 20 Field
# 5 21 - 26 Object
# 6 27 - 35 rowc Pixel row
# 7 36 - 44 colc Pixel col
# -- Astrometry --
# 8 47 - 59 Time (MJD) Modified Julian Day for the mean observation time
# 9 60 - 70 R.A. J2000 right ascension of the object at the time of the (r band) SDSS observation
# 10 71 - 81 Dec J2000 declination of the object at the time of the (r band) SDSS observation
# 11 82 - 92 Lambda Ecliptic longitude at the time of observation
# 12 93 - 103 Beta Ecliptic latitude at the time of observation
# 13 104 - 115 Phi Distance from the opposition at the time of observation
# 14 117 - 124 vMu The velocity component parallel to the SDSS scanning direction, and its error (deg/day)
# 15 125 - 131 vMu Error
# 16 132 - 139 vNu The velocity component perpendicular to the SDSS scanning direction, and its error (deg/day)
# 17 140 - 146 vNu Error
# 18 147 - 154
# vLambda
# The velocity component parallel to the Ecliptic (deg/day)
# 19 155 - 162
# vBeta
# The velocity component perpendicular to the Ecliptic (deg/day)
# -- Photometry --
# 20 164 - 169 u SDSS u'g'r'i'z' psf magnitudes and corresponding errors
# 21 170 - 174 uErr
# 22 175 - 180 g
# 23 181 - 185 gErr
# 24 186 - 191 r
# 25 192 - 196 rErr
# 26 197 - 202 i
# 27 203 - 207 iErr
# 28 208 - 213 z
# 29 214 - 218 zErr
# 30 219 - 224 a a* color = 0.89 (g - r) + 0.45 (r - i) - 0.57 (see Paper I)
# 31 225 - 229 aErr
# 32 231 - 236 V Johnson-V band magnitude, synthetized from SDSS magnitudes
# 33 237 - 242 B Johnson-B band magnitude, synthetized from SDSS magnitudes
# -- Identification --
# 34 243 - 244 Identification flag Has this moving object been linked to a known asteroid (0/1)? See Paper II.
# 35 245 - 252 Numeration Numeration of the asteroid. If the asteroid is not numbered, or this moving object has not yet been linked to a known asteroid, it's 0.
# 36 253 - 273 Designation Asteroid designation or name. If this moving object has not yet been linked to a known asteroid, it's '-'
# 37 274 - 276
# Detection Counter
# Detection counter of this object in SDSS data
# 38 277 - 279 Total Detection Count Total number of SDSS observations of this asteroid
# 39 280 - 288 Flags Flags that encode SDSSMOC processing information (internal)
# -- Matching information --
# 40 290 - 300 Computed R.A. Predicted position and magnitude at the time of SDSS observation for an associated known object computed using ASTORB data See a note about an error in the first three releases
# 41 301 - 311 Computed Dec
# 42 312 - 317 Computed App. Mag.
# 43 319 - 326 R Heliocentric distance at the time of observation
# 44 327 - 334 Geocentric Geocentric distance at the time of observation
# 45 335 - 340 Phase Phase angle at the time of observation
# -- Osculating elements --
# 46 342 - 352 Catalog ID Identification of the catalog from which the osculating elements and (H, G) values were extracted
# 47 363 - 368 H Absolute magnitude and slope parameter
# 48 369 - 373 G
# 49 374 - 379 Arc Arc of observations used to derive the elements
# 50 380 - 393 Epoch Osculating elements
# 51 394 - 406 a
# 52 407 - 417 e
# 53 418 - 428 i
# 54 429 - 439 Lon. of asc. node
# 55 440 - 450 Arg. of perihelion
# 56 451 - 461 M
# -- Proper elements --
# 57 463 - 483 Proper elements catalog ID Identification of the catalog from which the proper elements were extracted
# 58 484 - 496 a' Proper elements
# 59 497 - 507 e'
# 60 508 - 518 sin(i')
# 61-124 519 - 646 binary processing flags Only since the 3rd release!!
#===============================================================================
#using pandas with a column specification defined above
col_specification =[ (0, 6), (7, 12), (13, 14), (15, 19), (20, 25), (26, 34), (35, 43), (46, 58), (59, 69), (70, 80), (81, 91), (92, 102), (103, 114), (116, 123), (124, 130), (131, 138), (139, 145), (146, 153), (154, 161), (163, 168), (169, 173), (174, 179), (180, 184), (185, 190), (191, 195), (196, 201), (202, 206), (207, 212), (213, 217), (218, 223), (224, 228), (230, 235), (236, 241), (242, 243), (244, 251), (252, 272), (273, 275), (276, 278), (279, 287), (289, 299), (300, 310), (311, 316), (318, 325), (326, 333), (334, 339), (341, 351), (362, 367), (368, 372), (373, 378), (379, 392), (393, 405), (406, 416), (417, 427), (428, 438), (439, 449), (450, 460), (462, 482), (483, 495), (496, 506), (507, 517), (518, 645)]
print 'Reading SDSS MOC data from %s...'%path
#read all lines from MOC 4 data file
#variables to process big ole MOC4 data file
skipRows = 0
nRowsMax = 100000
nRows=nRowsMax
#is this a known moving object?
id_flag = 0
#track observation and unique asteroid count
asteroid_count = 0
observation_count = 0
#log files
log=open('moc4.log.txt', 'w')
log_tax=open('moc4.tax.txt', 'w')
log_tax_final=open('moc4.tax.final.txt', 'w')
#organize the observations by asteroid
observation={}
while nRows >= nRowsMax:
try:
data = pd.read_fwf(path, colspecs=col_specification, skiprows=skipRows, nrows=nRowsMax, header=None)
except:
break
nRows = data.shape[0]
for irow in range(0,nRows):
id_flag = data.iat[irow, 33]
#is this a known asteroid?
if id_flag == 1:
designation = data.iat[irow, 35]
if not asteroids.has_key(designation):
asteroids[designation]={}
asteroids[designation]['numeration'] = data.iat[irow, 34]
asteroids[designation]['observations'] = []
asteroid_count += 1
#add a new observation to this asteroid
observation={}
observation['moID'] = data.iat[irow, 0]
observation['mjd'] = float(data.iat[irow, 7])
observation['u'] = float(data.iat[irow, 19])
observation['uErr'] = float(data.iat[irow, 20])
observation['g'] = float(data.iat[irow, 21])
observation['gErr'] = float(data.iat[irow, 22])
observation['r'] = float(data.iat[irow, 23])
observation['rErr'] = float(data.iat[irow, 24])
observation['i'] = float(data.iat[irow, 25])
observation['iErr'] = float(data.iat[irow, 26])
observation['z'] = float(data.iat[irow, 27])
observation['zErr'] = float(data.iat[irow, 28])
observation['a'] = float(data.iat[irow, 29])
observation['aErr'] = float(data.iat[irow, 30])
observation['V'] = float(data.iat[irow, 31])
observation['B'] = float(data.iat[irow, 32])
observation['Phase'] = float(data.iat[irow, 44])
#print observation['moID'], observation['Phase']
#calc asteroid colors, relative to g-band and with solar color subtracted
#Cxg = mx - mg - (C(solar)x - C(solar)g)
observation['Cug'] = observation['u'] - observation['g'] - solar_color_ug
observation['Cgg'] = -solar_color_gg
observation['Crg'] = observation['r'] - observation['g'] - solar_color_rg
observation['Cig'] = observation['i'] - observation['g'] - solar_color_ig
observation['Czg'] = observation['z'] - observation['g'] - solar_color_zg
#calc asteroid color error
##propagate errors using quadrature, e.g. for Cug, error is sqrt(uErr*uErr+gErr*gErr)??
##observation['CugErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['uErr']*observation['uErr'])
##observation['CggErr'] = observation['gErr']
##observation['CrgErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['rErr']*observation['rErr'])
##observation['CigErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['iErr']*observation['iErr'])
##observation['CzgErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['zErr']*observation['zErr'])
#from the Carvano data, this is what it seems they are doing
observation['CugErr'] = observation['uErr']
observation['CggErr'] = observation['gErr']
observation['CrgErr'] = observation['rErr']
observation['CigErr'] = observation['iErr']
observation['CzgErr'] = observation['zErr']
#calc asteroid log reflectance, relative to g-band
#Cxg = -2.5(logRx-logRg) = -2.5(log(Rx/Rg)) = -2.5*LRx
#LRx = LRxg = -Cxg/2.5
observation['LRug'] = -observation['Cug']/2.5
observation['LRgg'] = 1.0
observation['LRrg'] = -observation['Crg']/2.5
observation['LRig'] = -observation['Cig']/2.5
observation['LRzg'] = -observation['Czg']/2.5
#calc asteroid log reflectance errors by propagating the Cxg errors
observation['LRugErr'] = observation['CugErr']/2.5
observation['LRggErr'] = observation['CggErr']/2.5
observation['LRrgErr'] = observation['CrgErr']/2.5
observation['LRigErr'] = observation['CigErr']/2.5
observation['LRzgErr'] = observation['CzgErr']/2.5
#calc asteroid color gradients, basis of Carvano taxonomy
#CGx = -0.4*(Cxg-C(x-1)g)/(lambdax-lambda(x-1))
observation['CGgu'] = -0.4*(observation['Cgg']-observation['Cug'])/(g_wavelength-u_wavelength)
observation['CGrg'] = -0.4*(observation['Crg']-observation['Cgg'])/(r_wavelength-g_wavelength)
observation['CGir'] = -0.4*(observation['Cig']-observation['Crg'])/(i_wavelength-r_wavelength)
observation['CGzi'] = -0.4*(observation['Czg']-observation['Cig'])/(z_wavelength-i_wavelength)
#observation['CGguErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['uErr']*observation['uErr'])
#observation['CGrgErr'] = math.sqrt(observation['rErr']*observation['rErr']+observation['gErr']*observation['gErr'])
#observation['CGirErr'] = math.sqrt(observation['iErr']*observation['iErr']+observation['rErr']*observation['rErr'])
#observation['CGziErr'] = math.sqrt(observation['zErr']*observation['zErr']+observation['iErr']*observation['iErr'])
#observation['CGguErr'] = observation['gErr'] + observation['uErr']
#observation['CGrgErr'] = observation['rErr'] + observation['gErr']
#observation['CGirErr'] = observation['iErr'] + observation['rErr']
#observation['CGziErr'] = observation['zErr'] + observation['iErr']
#observation['CGguErr'] = math.sqrt(observation['gErr']*observation['gErr']+observation['uErr']*observation['uErr'])*0.4/(g_wavelength-u_wavelength)
#observation['CGrgErr'] = math.sqrt(observation['rErr']*observation['rErr']+observation['gErr']*observation['gErr'])*0.4/(r_wavelength-g_wavelength)
#observation['CGirErr'] = math.sqrt(observation['iErr']*observation['iErr']+observation['rErr']*observation['rErr'])*0.4/(i_wavelength-r_wavelength)
#observation['CGziErr'] = math.sqrt(observation['zErr']*observation['zErr']+observation['iErr']*observation['iErr'])*0.4/(z_wavelength-i_wavelength)
observation['CGguErr'] = math.sqrt(observation['LRggErr']*observation['LRggErr']+observation['LRugErr']*observation['LRugErr'])/(g_wavelength-u_wavelength)
observation['CGrgErr'] = math.sqrt(observation['LRrgErr']*observation['LRrgErr']+observation['LRggErr']*observation['LRggErr'])/(r_wavelength-g_wavelength)
observation['CGirErr'] = math.sqrt(observation['LRigErr']*observation['LRigErr']+observation['LRrgErr']*observation['LRrgErr'])/(i_wavelength-r_wavelength)
observation['CGziErr'] = math.sqrt(observation['LRzgErr']*observation['LRzgErr']+observation['LRigErr']*observation['LRigErr'])/(z_wavelength-i_wavelength)
#observation['CGguErr'] = (observation['gErr']+observation['uErr'])*0.4/(g_wavelength-u_wavelength)
#observation['CGrgErr'] = (observation['rErr']+observation['gErr'])*0.4/(r_wavelength-g_wavelength)
#observation['CGirErr'] = (observation['iErr']+observation['rErr'])*0.4/(i_wavelength-r_wavelength)
#observation['CGziErr'] = (observation['zErr']+observation['iErr'])*0.4/(z_wavelength-i_wavelength)
#
#this is for phase angle analysis (Carvano et al. 2015)
#color gradients based on r'
observation['Cur'] = observation['u'] - observation['r'] - solar_color_ur
observation['Cgr'] = observation['g'] - observation['r'] - solar_color_gr
observation['Crr'] = 0.0 #-solar_color_rr
observation['Cir'] = observation['i'] - observation['r'] - solar_color_ir
observation['Czr'] = observation['z'] - observation['r'] - solar_color_zr
#from the Carvano data, this is what it seems they are doing
observation['CurErr'] = observation['uErr']
observation['CgrErr'] = observation['gErr']
observation['CrrErr'] = observation['rErr']
observation['CirErr'] = observation['iErr']
observation['CzrErr'] = observation['zErr']
#calc asteroid reflectance, relative to r-band
#Cxr = -2.5(logRx-logRr) = -2.5(log(Rx/Rr))
#Rx/Rr = 10^(-Cxr/2.5)
observation['Rur'] = pow(10,-observation['Cur']/2.5)
observation['Rgr'] = pow(10, -observation['Cgr']/2.5)
observation['Rrr'] = 1.0
observation['Rir'] = pow(10, -observation['Cir']/2.5)
observation['Rzr'] = pow(10, -observation['Czr']/2.5)
#calc slope and bd parameters from Carvano et al. 2015
#eq 1: Rir-Rgr/(lambdai-lambdag)
#eq 2: Rzr-Rir
observation['slope'] = (observation['Rir']-observation['Rgr'])/(i_wavelength-g_wavelength)
observation['bd'] = observation['Rzr'] - observation['Rir']
#calc asteroid log reflectance errors by propagating the Cxg errors
#observation['RurErr'] = ?
#observation['RgrErr'] = ?
#observation['RrrErr'] = ?
#observation['RirErr'] = ?
#observation['RzrErr'] = ?
#
asteroids[designation]['observations'].append(observation)
#print asteroids[designation]
skipRows += nRows
print 'Read %d row(s).'%(skipRows)
print 'Found %d asteroid(s).'%asteroid_count
print 'Calculating taxonomic classes for each observation...'
log_tax.write('%s,%s,%s,%s,%s,%s,%s,%s\n'%('designation', 'moid', 'phase', 'slope', 'bd', 'class', 'score', 'type'))
for designation in asteroids:
log.write('%s\n'%designation)
print 'Processing observations for %s...'%designation
for observation in asteroids[designation]['observations']:
log.write('\t%s\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\t\t%s\n'%('moID', 'LRug', 'LRugErr', 'LRgg', 'LRggErr', 'LRrg', 'LRrgErr', 'LRig', 'LRigErr', 'LRzg', 'LRzgErr'))
log.write('\t%s\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n'%(observation['moID'], observation['LRug'], observation['LRugErr'], observation['LRgg'], observation['LRggErr'], observation['LRrg'], observation['LRrgErr'], observation['LRig'], observation['LRigErr'], observation['LRzg'], observation['LRzgErr']))
log.write('\t%s\t\t%s\t\t%s\t\t%s\n'%('CGgu', 'CGrg', 'CGir', 'CGzi'))
log.write('\t%f\t%f\t%f\t%f\n'%(observation['CGgu'], observation['CGrg'], observation['CGir'], observation['CGzi']))
log.write('\t%s\t\t%s\t\t%s\t\t%s\n'%('CGguErr', 'CGrgErr', 'CGirErr', 'CGziErr'))
log.write('\t%f\t%f\t%f\t%f\n'%(observation['CGguErr'], observation['CGrgErr'], observation['CGirErr'], observation['CGziErr']))
#for this observation, loop through the limits for each taxonomic type
CG_cdf={}
CG_cdf_sum = 0
log.write('\t%s\t%s\t\t%s\t\t%s\t\t%s\t\t%s\n'%('tax', 'score', 'scoregu', 'scorerg', 'scoreir', 'scorezi'))
for taxclass in CG_limits:
CGgu_cdf = CGrg_cdf = CGir_cdf = CGzi_cdf = 0.0
#create normal probability density functions for each color gradient, CG; mean is CG value and stddev is error; cdf = cumulative density function
if observation['CGguErr'] > 0:
CGgu_cdf = stats.norm.cdf(CG_limits[taxclass]['CGguU'], loc=observation['CGgu'], scale=observation['CGguErr'])-stats.norm.cdf(CG_limits[taxclass]['CGguL'], loc=observation['CGgu'], scale=observation['CGguErr'])
#print observation['CGgu'], observation['CGguErr'], CG_limits[taxclass]['CGguL'], CG_limits[taxclass]['CGguU'], stats.norm.cdf(CG_limits[taxclass]['CGguL'], loc=observation['CGgu'], scale=observation['CGguErr']), stats.norm.cdf(CG_limits[taxclass]['CGguU'], loc=observation['CGgu'], scale=observation['CGguErr'])
if observation['CGrgErr'] > 0:
CGrg_cdf = stats.norm.cdf(CG_limits[taxclass]['CGrgU'], loc=observation['CGrg'], scale=observation['CGrgErr'])-stats.norm.cdf(CG_limits[taxclass]['CGrgL'], loc=observation['CGrg'], scale=observation['CGrgErr'])
#print stats.norm.cdf(CG_limits[taxclass]['CGrgU'], loc=observation['CGrg'], scale=observation['CGrgErr']), stats.norm.cdf(CG_limits[taxclass]['CGrgL'], loc=observation['CGrg'], scale=observation['CGrgErr'])
if observation['CGirErr'] > 0:
CGir_cdf = stats.norm.cdf(CG_limits[taxclass]['CGirU'], loc=observation['CGir'], scale=observation['CGirErr'])-stats.norm.cdf(CG_limits[taxclass]['CGirL'], loc=observation['CGir'], scale=observation['CGirErr'])
#print stats.norm.cdf(CG_limits[taxclass]['CGirU'], loc=observation['CGir'], scale=observation['CGirErr']), stats.norm.cdf(CG_limits[taxclass]['CGirL'], loc=observation['CGir'], scale=observation['CGirErr'])
if observation['CGziErr'] > 0:
CGzi_cdf = stats.norm.cdf(CG_limits[taxclass]['CGziU'], loc=observation['CGzi'], scale=observation['CGziErr'])-stats.norm.cdf(CG_limits[taxclass]['CGziL'], loc=observation['CGzi'], scale=observation['CGziErr'])
#print stats.norm.cdf(CG_limits[taxclass]['CGziU'], loc=observation['CGzi'], scale=observation['CGziErr']), stats.norm.cdf(CG_limits[taxclass]['CGziL'], loc=observation['CGzi'], scale=observation['CGziErr'])
CG_cdf[taxclass] = CGgu_cdf * CGrg_cdf * CGir_cdf * CGzi_cdf
CG_cdf_sum += CG_cdf[taxclass]
log.write('\t%s\t%f\t%f\t%f\t%f\t%f\n'%(taxclass, CG_cdf[taxclass], CGgu_cdf, CGrg_cdf, CGir_cdf, CGzi_cdf))
#plt.text(0, 0, '%s\t%s'%(observation['moID'],taxclass))
#uncomment to show plots!
#plt.show()
CG_cdf_max = 0.0
CG_cdf_max_taxclass = ''
log.write('\t%s\t%s\ttype\n'%('tax', '%score'))
for taxclass in CG_cdf:
if CG_cdf[taxclass] > CG_cdf_max:
CG_cdf_max_taxclass = taxclass
CG_cdf_max = CG_cdf[taxclass]
#print taxclass, CG_cdf[taxclass]/CG_cdf_sum*100
try:
if CG_cdf_sum > 0:
log.write('\t%s\t%f\n'%(taxclass, (CG_cdf[taxclass]/CG_cdf_sum*100)))
else:
log.write('\t%s\t%f\n'%(taxclass, 0.0))
except:
log.write('ERROR')
if CG_cdf_sum > 0 and CG_cdf_max/CG_cdf_sum >= 0.6:
#CGzi_ave = (CG_limits[CG_cdf_max_taxclass]['CGziU']+CG_limits[CG_cdf_max_taxclass]['CGziL'])/2.0
log_tax.write('%s,%s,%s,%f,%f,%s,%f,single\n'%(designation, observation['moID'], observation['Phase'], observation['slope'], observation['bd'], CG_cdf_max_taxclass, CG_cdf_max))
log.write('\t%s\t%s\n'%('tax', 'score'))
log.write('\t%s\t%f\n'%(CG_cdf_max_taxclass, CG_cdf_max))
#save final tax and score
observation['class'] = CG_cdf_max_taxclass
observation['score'] = CG_cdf_max
else:
comboclass = ''
combocount = 0
comboscoresum = 0.0
comboscore = 0.0
CGzi_ave = 0.0
for taxclass in CG_cdf:
if CG_cdf_sum > 0 and CG_cdf[taxclass]/CG_cdf_sum >= 0.3:
comboclass += taxclass
combocount += 1
comboscoresum += CG_cdf[taxclass]
CGzi_ave += (CG_limits[taxclass]['CGziU']+CG_limits[taxclass]['CGziL'])/2.0
if combocount > 0:
comboscore = comboscoresum/combocount
CGzi_ave = CGzi_ave/combocount
else:
comboclass = 'U'
log_tax.write('%s,%s,%s,%f,%f,%s,%f,combo\n'%(designation, observation['moID'], observation['Phase'], observation['slope'], observation['bd'], comboclass, comboscore))
#log_tax.write('%s\t%s\t%s\t%s\t%f\tcombo\n'%(designation, observation['moID'], observation['Phase'], comboclass, comboscore))
log.write('\tcombo\n')
log.write('\t%s\t%s\n'%('tax', 'score'))
log.write('\t%s\t%f\n'%(comboclass, comboscore))
#save final tax and score
observation['class'] = comboclass
observation['score'] = comboscore
log.write('\t***************************************\n')
#create dictionary to hold asteroid taxonomy counts and high scores
#include U class too
tax_classes = {}
for key in CG_limits:
tax_classes[key] = {}
tax_classes['U'] = {}
print 'Calculating final taxonomic classes for each asteroid...'
for designation in asteroids:
#init this asteroid's counts and high scores
for key in tax_classes:
tax_classes[key]['count'] = 0
tax_classes[key]['high_score'] = 0.0
pearson_rank_slope = None
pearson_rank_bd = None
if len(asteroids[designation]['observations']) > 2:
phase = []
slope = []
bd = []
for observation in asteroids[designation]['observations']:
phase.append(observation['Phase'])
slope.append(observation['slope'])
bd.append(observation['bd'])
#pearson_rank_slope = stats.pearsonr(phase, slope)
#pearson_rank_bd = stats.pearsonr(phase, bd)
#print pearson_rank_slope, pearson_rank_bd
for observation in asteroids[designation]['observations']:
for tax_class in observation['class']:
tax_classes[tax_class]['count'] += 1
if observation['score'] > tax_classes[tax_class]['high_score']:
tax_classes[tax_class]['high_score'] = observation['score']
#print designation, observation['class'], tax_classes
max_count = 0
for key in tax_classes:
#print key, tax_classes[key]
if tax_classes[key]['count'] > max_count:
max_count = tax_classes[key]['count']
#print max_count
max_high_score = 0
final_tax_class = ''
for key in tax_classes:
if tax_classes[key]['count'] == max_count:
final_tax_class += key
if tax_classes[key]['high_score'] > max_high_score:
max_high_score = tax_classes[key]['high_score']
log_tax_final.write('%s\t%s\t%f\n'%(designation, final_tax_class, max_high_score))
log.close()
log_tax.close()
log_tax_final.close()
# 1 1 - 7 moID Unique SDSS moving-object ID
# 8 47 - 59 Time (MJD) Modified Julian Day for the mean observation time
# 34 243 - 244 Identification flag Has this moving object been linked to a known asteroid (0/1)? See Paper II.
# 35 245 - 252 Numeration Numeration of the asteroid. If the asteroid is not numbered, or this moving object has not yet been linked to a known asteroid, it's 0.
# 36 253 - 273 Designation Asteroid designation or name. If this moving object has not yet been linked to a known asteroid, it's '-'
# 20 164 - 169 u SDSS u'g'r'i'z' psf magnitudes and corresponding errors
# 21 170 - 174 uErr
# 22 175 - 180 g
# 23 181 - 185 gErr
# 24 186 - 191 r
# 25 192 - 196 rErr
# 26 197 - 202 i
# 27 203 - 207 iErr
# 28 208 - 213 z
# 29 214 - 218 zErr
# 30 219 - 224 a a* color = 0.89 (g - r) + 0.45 (r - i) - 0.57 (see Paper I)
# 31 225 - 229 aErr
# 32 231 - 236 V Johnson-V band magnitude, synthetized from SDSS magnitudes
# 33 237 - 242 B Johnson-B band magnitude, synthetized from SDSS magnitudes
| mit |
justinbois/fish-activity | fishact/visualize.py | 1 | 14336 | import numpy as np
import pandas as pd
from . import parse
import tsplot
def ecdf(data, formal=False, buff=0.1, min_x=None, max_x=None):
"""
Generate `x` and `y` values for plotting an ECDF.
Parameters
----------
data : array_like
Array of data to be plotted as an ECDF.
formal : bool, default False
If True, generate `x` and `y` values for formal ECDF.
Otherwise, generate `x` and `y` values for "dot" style ECDF.
buff : float, default 0.1
How long the tails at y = 0 and y = 1 should extend as a
fraction of the total range of the data. Ignored if
`formal` is False.
min_x : float, default None
Minimum value of `x` to include on plot. Overrides `buff`.
Ignored if `formal` is False.
max_x : float, default None
Maximum value of `x` to include on plot. Overrides `buff`.
Ignored if `formal` is False.
Returns
-------
x : array
`x` values for plotting
y : array
`y` values for plotting
"""
if formal:
return _ecdf_formal(data, buff=buff, min_x=min_x, max_x=max_x)
else:
return _ecdf_dots(data)
def _ecdf_dots(data):
"""
Compute `x` and `y` values for plotting an ECDF.
Parameters
----------
data : array_like
Array of data to be plotted as an ECDF.
Returns
-------
x : array
`x` values for plotting
y : array
`y` values for plotting
"""
return np.sort(data), np.arange(1, len(data)+1) / len(data)
def _ecdf_formal(data, buff=0.1, min_x=None, max_x=None):
"""
Generate `x` and `y` values for plotting a formal ECDF.
Parameters
----------
data : array_like
Array of data to be plotted as an ECDF.
buff : float, default 0.1
How long the tails at y = 0 and y = 1 should extend as a fraction
of the total range of the data.
min_x : float, default None
Minimum value of `x` to include on plot. Overrides `buff`.
max_x : float, default None
Maximum value of `x` to include on plot. Overrides `buff`.
Returns
-------
x : array
`x` values for plotting
y : array
`y` values for plotting
"""
# Get x and y values for data points
x, y = _ecdf_dots(data)
# Set defaults for min and max tails
if min_x is None:
min_x = x[0] - (x[-1] - x[0])*buff
if max_x is None:
max_x = x[-1] + (x[-1] - x[0])*buff
# Set up output arrays
x_formal = np.empty(2*(len(x) + 1))
y_formal = np.empty(2*(len(x) + 1))
# y-values for steps
y_formal[:2] = 0
y_formal[2::2] = y
y_formal[3::2] = y
# x- values for steps
x_formal[0] = min_x
x_formal[1] = x[0]
x_formal[2::2] = x
x_formal[3:-1:2] = x[1:]
x_formal[-1] = max_x
return x_formal, y_formal
def get_y_axis_label(df, signal, loc_name='location', time_unit=None):
"""
Generate y-label for visualizations.
Parameters
----------
df : pandas DataFrame
Tidy DataFrame as loaded from parse.load_data() or returned
from parse.resample().
signal : string
String for what is on the y-axis
loc_name : str, default 'location'
Name of column containing the "location," i.e., animal location.
'fish' is a common entry.
time_unit : str, default None
Time unit for time axis label. By default, if `signal` is
'sleep', `time_unit` is 'min.', and if signal is 'activity',
`time_unit` is 'sec.'.
Returns
-------
output : string
y-axis label.
"""
if time_unit is None:
if signal == 'sleep':
time_unit = 'min.'
elif signal == 'activity':
time_unit = 'sec.'
# Get approximate time interval of averages
inds = df[loc_name]==df[loc_name].unique()[0]
zeit = np.sort(df.loc[inds, 'zeit'].values)
dt = np.median(np.diff(zeit)) * 60
# Make y-axis label
if 0.05 <= abs(dt - int(dt)) <= 0.95:
return '{0:s} of {1:s} in {2:.2f} min.'.format(time_unit, signal, dt)
else:
return '{0:s} of {1:s} in {2:d} min.'.format(time_unit, signal,
int(np.round(dt)))
def all_traces(df, signal='activity', summary_trace='mean',
loc_name='location',time_shift='center',
alpha=0.75, hover_color='#535353', height=350, width=650,
colors=None):
"""
Generate a set of plots for each genotype.
Parameters
----------
df : pandas DataFrame
Tidy DataFrame as loaded from parse.load_data() or returned
from parse.resample().
signal : string, default 'activity'
Column of `df` that is used for the y-values in the plot.
summary_trace : string, float, or None, default 'mean'
Which summary statistic to use to make summary trace. If a
string, can one of 'mean', 'median', 'max', or 'min'. If
None, no summary trace is generated. If a float between
0 and 1, denotes which quantile to show.
loc_name : str, default 'location'
Name of column containing the "location," i.e., animal location.
'fish' is a common entry.
time_shift : string, default 'left'
One of {'left', 'right', 'center', 'interval'}
left: do not perform a time shift
right: Align time points to right edge of interval
center: Align time points to the center of the interval
interval: Plot the signal as a horizontal line segment
over the time interval
alpha : float, default 0.75
alpha value for individual time traces
hover_color : string, default '#535353'
Hex value for color when hovering over a curve
height : int, default 200
Height of each subplot plot in pixels.
width : int, default 650
Width of each subplot in pixels.
colors : dict, default None
colors[cat] is a 2-list containg, for category `cat`:
colors[cat][0]: hex value for color of all time series
colors[cat][1]: hex value for color of summary trace
If none, colors are generated using paired ColorBrewer colors,
with a maximum of six categories.
Returns
-------
output : Bokeh grid plot
Bokeh figure with subplots of all time series
"""
# Make y-axis label
y_axis_label = get_y_axis_label(df, signal)
# Get all instrument/trial pairs
inst_trial = parse.instrument_trial_pairs(df)
if len(inst_trial) == 1:
df_in = df
else:
# Be sure it is sorted by zeit
df_in = df.sort_values(by=['zeit', 'instrument', 'trial', loc_name])
# Convert location name to instrument/trial/location tuple
df_in['new_loc_name'] = [(r['instrument'], r['trial'], r[loc_name])
for _, r in df_in.iterrows()]
loc_name = 'new_loc_name'
# Make plots
p = tsplot.all_traces(
df_in, 'zeit', signal, loc_name, time_ind='zeit_ind',
light='light', summary_trace='mean', time_shift=time_shift,
alpha=0.75, x_axis_label='time (hr)', y_axis_label=y_axis_label)
return p
def grid(df, signal='activity', summary_trace='mean', loc_name='location',
gtype_order=None, time_shift='center', alpha=0.75,
hover_color='#535353', height=200, width=650, colors=None):
"""
Generate a set of plots for each genotype.
Parameters
----------
df : pandas DataFrame
Tidy DataFrame as loaded from parse.load_data() or returned
from parse.resample().
signal : string, default 'activity'
Column of `df` that is used for the y-values in the plot.
summary_trace : string, float, or None, default 'mean'
Which summary statistic to use to make summary trace. If a
string, can one of 'mean', 'median', 'max', or 'min'. If
None, no summary trace is generated. If a float between
0 and 1, denotes which quantile to show.
loc_name : str, default 'location'
Name of column containing the "location," i.e., animal location.
'fish' is a common entry.
gtype_order : list or tuple, default None
A list of the order of the genotypes to use in the plots. Each
entry must be in df['genotype']. If None,
df['genotype'].unique() is used.
time_shift : string, default 'left'
One of {'left', 'right', 'center', 'interval'}
left: do not perform a time shift
right: Align time points to right edge of interval
center: Align time points to the center of the interval
interval: Plot the signal as a horizontal line segment
over the time interval
alpha : float, default 0.75
alpha value for individual time traces
hover_color : string, default '#535353'
Hex value for color when hovering over a curve
height : int, default 200
Height of each subplot plot in pixels.
width : int, default 650
Width of each subplot in pixels.
colors : dict, default None
colors[cat] is a 2-list containg, for category `cat`:
colors[cat][0]: hex value for color of all time series
colors[cat][1]: hex value for color of summary trace
If none, colors are generated using paired ColorBrewer colors,
with a maximum of six categories.
Returns
-------
output : Bokeh grid plot
Bokeh figure with subplots of all time series
"""
# Make y-axis label
y_axis_label = get_y_axis_label(df, signal)
# Get all instrument/trial pairs
inst_trial = parse.instrument_trial_pairs(df)
if len(inst_trial) == 1:
df_in = df
else:
# Be sure it is sorted by zeit
df_in = df.sort_values(by=['zeit', 'instrument', 'trial', loc_name])
# Convert location name to instrument/trial/location tuple
df_in['new_loc_name'] = [(r['instrument'], r['trial'], r[loc_name])
for _, r in df_in.iterrows()]
loc_name = 'new_loc_name'
# Make plots
p = tsplot.grid(
df_in, 'zeit', signal, 'genotype', loc_name, cats=gtype_order,
time_ind='zeit_ind', light='light', summary_trace=summary_trace,
time_shift=time_shift, height=height, width=width,
x_axis_label='time (hr)', y_axis_label=y_axis_label, colors=colors)
return p
def summary(df, signal='activity', summary_trace='mean', loc_name='location',
gtype_order=None, time_shift='center', confint=True,
ptiles=(2.5, 97.5), n_bs_reps=1000, alpha=0.35, height=350,
width=650, colors=None, legend=True):
"""
Generate a summary plot of the time courses.
Parameters
----------
df : pandas DataFrame
Tidy DataFrame as loaded from parse.load_data() or returned
from parse.resample().
signal : string, default 'activity'
Column of `df` that is used for the y-values in the plot.
summary_trace : string, float, or None, default 'mean'
Which summary statistic to use to make summary trace. If a
string, can one of 'mean', 'median', 'max', or 'min'. If
None, no summary trace is generated. If a float between
0 and 1, denotes which quantile to show.
loc_name : str, default 'location'
Name of column containing the "location," i.e., animal location.
'fish' is a common entry.
gtype_order : list or tuple, default None
A list of the order of the genotypes to use in the plots. Each
entry must be in df['genotype']. If None,
df['genotype'].unique() is used.
time_shift : string, default 'left'
One of {'left', 'right', 'center', 'interval'}
left: do not perform a time shift
right: Align time points to right edge of interval
center: Align time points to the center of the interval
interval: Plot the signal as a horizontal line segment
over the time interval
confint : bool, default True
If True, also display confidence interval.
ptiles : list or tuple of length two, default (2.5, 97.5)
Percentiles for confidence intervals; ignored if
`confint` is False.
n_bs_reps : int, default 1000
Number of bootstrap replicates to use in conf. int. Ignored if
`confint` is False.
alpha : float, default 0.75
alpha value for individual time traces
hover_color : string, default '#535353'
Hex value for color when hovering over a curve
height : int, default 200
Height of each subplot plot in pixels.
width : int, default 650
Width of each subplot in pixels.
colors : dict, default None
colors[cat] is a 2-list containg, for category `cat`:
colors[cat][0]: hex value for color of all time series
colors[cat][1]: hex value for color of summary trace
If none, colors are generated using paired ColorBrewer colors,
with a maximum of six categories.
legend : bool, default True
If True, show legend.
Returns
-------
output : Bokleh plot
Bokeh figure with summary plots
"""
# Make y-axis label
y_axis_label = get_y_axis_label(df, signal)
# Get all instrument/trial pairs
inst_trial = parse.instrument_trial_pairs(df)
if len(inst_trial) == 1:
df_in = df
else:
# Be sure it is sorted by zeit
df_in = df.sort_values(by=['zeit', 'instrument', 'trial', loc_name])
# Convert location name to instrument/trial/location tuple
df_in['new_loc_name'] = [(r['instrument'], r['trial'], r[loc_name])
for _, r in df_in.iterrows()]
loc_name = 'new_loc_name'
p = tsplot.summary(
df_in, 'zeit', signal, 'genotype', loc_name, cats=gtype_order,
time_ind='zeit_ind', light='light', summary_trace=summary_trace,
time_shift=time_shift, confint=confint, ptiles=ptiles,
n_bs_reps=n_bs_reps, alpha=0.25, height=height, width=width,
x_axis_label='time (hr)', y_axis_label=y_axis_label,
colors=colors, legend=legend)
return p
| mit |
simonjwbond/MarketModel | MarketClearing.py | 1 | 12872 | import numpy as np
import pandas as pd
import math
import multiprocessing
import copy
import datetime
import time
class MarketClearing:
def __init__(self,inHours, inMaxPrice, inMinPrice, inBucketType, inBucketSize, inBidCollection):
self.Hours = inHours
self.MaxPrice = inMaxPrice
self.MinPrice = inMinPrice
self.BucketType = inBucketType
self.BucketSize = inBucketSize
self.BidCollection = inBidCollection
self.BidStack = np.zeros([self.MaxPrice-self.MinPrice+1,self.Hours],dtype=np.float64)
self.OfferStack = np.zeros([self.MaxPrice-self.MinPrice+1,self.Hours],dtype=np.float64)
self.BidCurve = np.zeros([self.MaxPrice-self.MinPrice+1,self.Hours],dtype=np.float64)
self.OfferCurve = np.zeros([self.MaxPrice-self.MinPrice+1,self.Hours],dtype=np.float64)
self.BidOfferCurve = np.zeros([self.MaxPrice-self.MinPrice+1,self.Hours],dtype=np.float64)
self.InitialMCP = np.zeros(self.Hours)
self.PriceIndex = np.asmatrix(range(self.MinPrice,self.MaxPrice+1))
print("Initialised MarketClearing Object")
self.MakeBidOfferCurve()
self.MakeSurplusRanges()
self.MaximizeSurplus()
def MaximizeSurplus(self):
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
threads = multiprocessing.cpu_count()
threads = 1
logevery = 10000
combis = 2 ** len(self.BidCollection.BlockOffers)
perthread = int(combis/threads)
if(threads > 1):
jobs = []
pipe_list = []
i = 0
for i in range(0,threads-1):
recv_end, send_end = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=self.MaximiseSurplusProcess, args=(i*perthread,i*perthread+perthread,len(self.BidCollection.BlockOffers),i,logevery,send_end))
jobs.append(p)
pipe_list.append(recv_end)
p.start()
recv_end, send_end = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=self.MaximiseSurplusProcess, args=((i+1) * perthread, combis, len(self.BidCollection.BlockOffers),i+1, logevery,send_end))
jobs.append(p)
pipe_list.append(recv_end)
p.start()
for i in jobs:
i.join()
result_list = [x.recv() for x in pipe_list]
Best_Result = 0
Best_Index = 0
for i in range(0, len(result_list)):
if Best_Result < result_list[i][1]:
Best_Result = result_list[i][1]
Best_Index = i
print(" ")
print("Calculated Optimum")
print("Final Optimal MCP: \n ")
print(result_list[Best_Index][2])
print("Accepted blocks: " + str(bin(result_list[Best_Index][0]))[2:].zfill(len(self.BidCollection.BlockOffers)))
#get the optimals from each process
else:
recv_end, send_end = multiprocessing.Pipe(False)
result_list = self.MaximiseSurplusProcess(0, combis, len(self.BidCollection.BlockOffers),0, logevery,send_end)
ts2 = time.time()
et = datetime.datetime.fromtimestamp(ts2-ts).strftime('%Y-%m-%d %H:%M:%S')
print (et)
def MaximiseSurplusProcess(self,Start,End, length, Worker, logevery, send_end):
MaxSurplus = 0
BestOffers = 0
for i in range(Start,End):
self.BidOfferCurve = np.zeros([self.MaxPrice - self.MinPrice + 1, self.Hours], dtype=np.float64)
bitstr = str(bin(i))[2:].zfill(length)
offersaccepted = np.asarray(list(map(int, list(bitstr)))).flatten()
BlocksAccepted = (offersaccepted[:, np.newaxis] * self.BidCollection.BlockOfferVolumes).sum(axis=0)
OfferCurve = self.ShiftOffers(BlocksAccepted)
BidOfferCurve = OfferCurve - self.BidCurve
MCP = self.FindMCP(BidOfferCurve)
Surplus = self.CalculateSurplus(MCP,BlocksAccepted,offersaccepted)
#TotSurplus = sum(Surplus)
if(Surplus>MaxSurplus):
MaxSurplus = Surplus
BestOffers = i
BestMCP = copy.deepcopy(MCP)
if(i% logevery == 0):
print(str(i)+ " of " + str(End) + " Worker: " + str(Worker))
print(str(BestOffers) + " - Surplus: " + str(MaxSurplus))
print(str(bin(BestOffers))[2:].zfill(length))
print(BestMCP)
print(" ")
print("Calculated Optimum:")
print(MaxSurplus)
print("Optimum MCP: \n ")
print(BestMCP)
print("Accepted blocks: " +str(BestOffers) + ", " + str(bin(BestOffers))[2:].zfill(len(self.BidCollection.BlockOffers)))
result = [BestOffers,MaxSurplus,BestMCP]
send_end.send(result)
def CalculateSurplus(self,MCP,Blocks,offersaccepted):
#surplus2 = []
# the additional consumer surplus on the left
#scratch that theres no surplus on the left because its pay as bid
#surplus = Blocks * MCP
surplus = np.zeros(MCP.shape)
#And add each PS below
for h in range(0, self.Hours):
tmpVal = 0
i=0
#you should review the calculation of the surplus, the index here for for PSEnd may need to be PSStart, and also check the i index
while MCP[h] > self.PSEnd[h][i]:
i+=1
surplus[h] += self.PS[h][i-1]
#surplus2.append() = list(map(sum, self.PS))
surplus = sum(surplus)
NegSurplus=0
#now for each block, the difference between the settled price and the block price, multiplied by the volume
for i in range(len(offersaccepted)):
if(offersaccepted[i]):
#sumproduct of volume and mcp vs sumproduct of block price
MarketValue = self.BidCollection.BlockOffers[i].Volume * MCP
BlockValue = self.BidCollection.BlockOffers[i].Volume * self.BidCollection.BlockOffers[i].Price
NegSurplus += sum(MarketValue-BlockValue)
return surplus + NegSurplus
def ShiftOffers(self,inBlocksAccepted):
OfferCurve = copy.deepcopy(self.OfferCurve)
for i in range(0,len(OfferCurve)):
OfferCurve[i] += inBlocksAccepted
return OfferCurve
def MakeSurplusRanges(self):
self.SetProducerSurplusRange()
self.SetConsumerSurplusRange()
print("Made Surplus Ranges")
def SetProducerSurplusRange(self):
self.PSStart = []
self.PSEnd = []
self.PSLength = []
self.PSMarginal = []
self.PS = []
for p in range(0, self.Hours):
self.PSStart.append([])
self.PSEnd.append([])
self.PSLength.append([])
self.PSMarginal.append([])
self.PS.append([])
i = self.MinPrice
while (i < self.MaxPrice):
tmpVol = self.OfferCurve[i][p]
self.PSStart[p].append(i)
while (i < self.MaxPrice and self.OfferCurve[i][p] == tmpVol):
i += 1
totVol = self.OfferCurve[i][p] - tmpVol
self.PSEnd[p].append(i-1)
self.PSLength[p].append(self.PSEnd[p][-1] - self.PSStart[p][-1])
self.PSMarginal[p].append(self.PSLength[p][-1]*totVol)
self.PS[p].append(self.PSLength[p][-1]*totVol)
for j in range(0, len(self.PSMarginal[p])-1):
self.PS[p][-1] += self.PSMarginal[p][j]
print("Made PS Range")
def SetConsumerSurplusRange(self):
#work on this, make sure calculation of Consumer surplus is correct
self.CSStart = []
self.CSEnd = []
self.CSLength = []
self.CSMarginal = []
self.CS = []
for p in range(0, self.Hours):
self.CSStart.append([])
self.CSEnd.append([])
self.CSLength.append([])
self.CSMarginal.append([])
self.CS.append([])
i = self.MaxPrice
while (i > self.MinPrice):
tmpVol = self.BidCurve[i][p]
self.CSStart[p].append(i)
while (i > self.MinPrice and self.BidCurve[i][p] == tmpVol):
i -= 1
totVol = self.OfferCurve[i][p] - tmpVol
self.CSEnd[p].append(i-1)
self.CSLength[p].append(self.CSEnd[p][-1] - self.CSStart[p][-1])
self.CSMarginal[p].append(self.CSLength[p][-1]*totVol)
self.CS[p].append(self.CSLength[p][-1]*totVol)
for j in range(0, len(self.CSMarginal[p])-1):
self.CS[p][-1] += self.CSMarginal[p][j]
print("Made CS Range")
def FindMCP(self,inBidOfferCurve):
MCP = np.zeros(self.Hours)
for i in range(0,self.Hours):
MCP[i] = self.find_zero(inBidOfferCurve[:,i])#np.searchsorted(self.BidOfferCurve[:,i], 0, side="left")
return MCP
def CalculateTotalSurplus(self):
#for each period
for p in range(0,self.Hours):
p=p
print("Calculated Surplus")
def CalculateConsumerSurplus(self):
print("Calculated Consumer Surplus")
def MakeBidOfferCurve(self):
for i in self.BidCollection.BidOfferList:
if(i.Volume < 0):
ix = int(i.Price) - self.MinPrice
self.BidStack[ix,i.HourID] += i.Volume
else:
ix = int(i.Price) - self.MinPrice
self.OfferStack[ix,i.HourID] += i.Volume
it = np.nditer(self.OfferStack, flags=['multi_index'])
for i in it:
if(it.multi_index[0]>0):
self.OfferCurve[it.multi_index] += self.OfferCurve[it.multi_index[0]-1,it.multi_index[1]]+self.OfferStack[it.multi_index[0]-1,it.multi_index[1]]
for i in range(self.MaxPrice - self.MinPrice-1,-1,-1):
for j in range(0,self.Hours):
self.BidCurve[i,j] += self.BidCurve[i+1,j]-self.BidStack[i,j]
print("Made BidOffer Curve")
def find_zero(self, array):
value = 0
idx = np.searchsorted(array, value, side="left")
if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):
return idx-1
else:
return idx
class BidCollection:
def __init__(self):
self.BidOfferList = []
self.BlockOffers = []
print("Initialised Bid Collection")
def LoadFromText(self,FolderPath):
print("loading From Bids from Text in " + FolderPath)
aOfferPrice = np.atleast_2d(np.loadtxt(FolderPath + "/OfferPrice.txt", np.float64,delimiter='\t',skiprows=1,usecols=range(1,25)))
aOfferVolume = np.atleast_2d(np.loadtxt(FolderPath + "/OfferVolume.txt", np.float64,delimiter='\t',skiprows=1,usecols=range(1,25)))
aBlockOffers = np.atleast_2d(np.loadtxt(FolderPath + "/BlockOffers.txt", np.float64, delimiter='\t', skiprows=1, usecols=range(1, 26)))
aBidPrice = np.atleast_2d(np.loadtxt(FolderPath + "/BidPrice.txt", np.float64,delimiter='\t',skiprows=1,usecols=range(1,25)))
aBidVolume = np.atleast_2d(np.loadtxt(FolderPath + "/BidVolume.txt", np.float64,delimiter='\t',skiprows=1,usecols=range(1,25)))
it = np.nditer([aBidPrice,aBidVolume], flags=['multi_index'])
for x, y in it:
tmpBid = HourlyBid(it.multi_index[0],it.multi_index[1],x,-y)
self.BidOfferList.append(tmpBid)
it = np.nditer([aOfferPrice,aOfferVolume], flags=['multi_index'])
for x, y in it:
tmpBid = HourlyBid(it.multi_index[0],it.multi_index[1],x,y)
self.BidOfferList.append(tmpBid)
for row in aBlockOffers:
tmpBlk = BlockOffer(0,row[0],row[1:])
if(tmpBlk.TotalVolume > 0):
self.BlockOffers.append(tmpBlk)
self.BlockOfferVolumes = np.zeros([len(self.BlockOffers),24])
for i in range(0,self.BlockOfferVolumes.shape[0]):
for j in range(0, self.BlockOfferVolumes.shape[1]):
self.BlockOfferVolumes[i,j] = self.BlockOffers[i].Volume[j]
print("Loaded From Text")
class BlockOffer:
def __init__(self, inOwnerID, inPrice, inVolume):
self.OwnerID = inOwnerID
self.Price = inPrice
self.Volume = inVolume
self.TotalVolume = np.sum(inVolume)
class HourlyBid:
def __init__(self, inOwnerID, inHour, inPrice, inVolume):
self.OwnerID = inOwnerID
self.HourID = inHour
self.Price = inPrice
self.Volume = inVolume
print("Initialised Hourly Bid")
| gpl-3.0 |
waynenilsen/statsmodels | statsmodels/tools/testing.py | 23 | 1443 | """assert functions from numpy and pandas testing
"""
import re
from distutils.version import StrictVersion
import numpy as np
import numpy.testing as npt
import pandas
import pandas.util.testing as pdt
# for pandas version check
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def is_pandas_min_version(min_version):
'''check whether pandas is at least min_version
'''
from pandas.version import short_version as pversion
return StrictVersion(strip_rc(pversion)) >= min_version
# local copies, all unchanged
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_equal, assert_array_less,
assert_array_max_ulp, assert_raises, assert_string_equal, assert_warns)
# adjusted functions
def assert_equal(actual, desired, err_msg='', verbose=True, **kwds):
if not is_pandas_min_version('0.14.1'):
npt.assert_equal(actual, desired, err_msg='', verbose=True)
else:
if isinstance(desired, pandas.Index):
pdt.assert_index_equal(actual, desired)
elif isinstance(desired, pandas.Series):
pdt.assert_series_equal(actual, desired, **kwds)
elif isinstance(desired, pandas.DataFrame):
pdt.assert_frame_equal(actual, desired, **kwds)
else:
npt.assert_equal(actual, desired, err_msg='', verbose=True)
| bsd-3-clause |
dgary50/eovsa | feed_rot_simulation.py | 1 | 6683 | import numpy as np
import matplotlib.pylab as plt
from pcapture2 import bl_list
bl2ord = bl_list()
def rot_sim(indict):
''' Simulates effect of non-ideal feed behavior on input data for a single baseline.
If 'unrot' is true, it takes the data as output data and applies the inverse
of the corresponding Mueller matrix to it.
Input is a dictionary (indict) with the following keys (and their defaults if
omitted)
'data' : 4 x ntimes array of data, corresponding to a set of XX, XY, YX, YY,
in that order, for each time. No default (error return if omitted)
'chi1' : Parallactic (or rotation) angle of first antenna (default 0) (scalar or ntimes array)
'chi2' : Parallactic (or rotation) angle of second antenna (default 0) (scalar or ntimes array)
'a1' : Relative amplitude of Y wrt X for first antenna (default 1)
'a2' : Relative amplitude of Y wrt X for second antenna (default 1)
'd1' : Relative cross-talk between X and Y for first antenna (default 0)
'd2' : Relative cross-talk between X and Y for second antenna (default 0)
'unrot': Whether to rotate or unrotate the input data (default False)
'verbose': Print some diagnostic messages
'doplot': Create some plots of the before and after amplitudes and phases
Result is a plot of input and output. Returns the rotated or unrotated data in
the same form as the input data.
'''
# Input is a dictionary, contained needed keys. Any missing
# keys are filled in with defaults:
data = indict.get('data') # No default
if data is None:
print 'Must supply "data" key in input dictionary'
return
chi1 = indict.get('chi1',0)
chi2 = indict.get('chi2',0)
a1 = indict.get('a1',1)
a2 = indict.get('a2',1)
d1 = indict.get('d1',0)
d2 = indict.get('d2',0)
titl = indict.get('title','')
verbose = indict.get('verbose',False)
unrot = indict.get('unrot',False)
doplot = indict.get('doplot',False)
titl = titl+' a1:'+str(a1)+' a2:'+str(a2)+' d1:'+str(d1)+' d2:'+str(d2)
if unrot:
titl += ' Unrot'
# Do some sanity checks. Any or all inputs can have only 1 time (assumed constant at other times)
# but any that do have times must have the same number of times
try:
dn, dnt = data.shape
if dn != 4:
print 'Number of data elements for each time must be 4 [XX, XY, YX, YY].'
return
except:
if len(data) == 4:
dnt = 1
else:
print 'Number of data elements for each time must be 4 [XX, XY, YX, YY].'
return
try:
c1nt, = chi1.shape
except:
c1nt = 1
try:
c2nt, = chi2.shape
except:
c2nt = 1
if dnt > 1 or c1nt > 1 or c2nt > 1:
# Multiple times are requested, so ensure input is compatible
nt = np.max(np.array([dnt,c1nt,c2nt]))
if dnt == 1:
# Expand data to 4 x nt times
data = np.rollaxis(np.tile(data,(nt,1)),1)
if c1nt == 1:
# Expand chi1 to nt times
chi1 = chi1*np.ones(nt)
if c2nt == 1:
# Expand chi2 to nt times
chi2 = chi2*np.ones(nt)
# Now see if they all have the same length
if nt != len(data[0]) or nt != len(chi1) or nt != len(chi2):
print 'Number of times in data, chi1 and chi2 are not compatible.'
return
else:
nt = 1
if verbose:
print 'ntimes=',nt
print 'shapes of data, chi1, chi2:',data.shape,chi1.shape,chi2.shape
# At this point, we should have uniformity of times
# Rotation matrix for first antenna
R1 = np.array([[np.cos(chi1), np.sin(chi1)],[-np.sin(chi1), np.cos(chi1)]])
if verbose:
print 'Rotation matrix R1 shape:',R1.shape,'for first time is'
print R1[:,:,0]
# Amplitude matrix for first antenna
A = np.array([[1,d1],[-d1,a1]])
# Resultant Jones matrix for first antenna
JA = []
for i in range(nt):
JA.append(np.dot(A, R1[:,:,i]))
if verbose:
print 'First element of Jones matrix A:\n',JA[0]
# Rotation matrix for second antenna
R2 = np.array([[np.cos(chi2), np.sin(chi2)],[-np.sin(chi2), np.cos(chi2)]])
if verbose:
print 'Rotation matrix R2 shape:',R2.shape,'for first time is'
print R2[:,:,0]
# Amplitude matrix for second antenna
B = np.array([[1,d2],[-d2,a2]])
# Resultant Jones matrix for second antenna
JB = []
for i in range(nt):
JB.append(np.dot(B, R2[:,:,i]))
if verbose:
print 'First element of Jones matrix B:\n',JB[0]
# Resultant Mueller matrix
M = []
for i in range(nt):
M.append(np.kron(JA[i],np.conj(JB[i])))
if verbose and i == 0:
print 'Mueller matrix at first time is:\n',M[0]
if unrot:
M[i] = np.linalg.inv(M[i])
# Apply matrix to data
out = np.zeros_like(data)
if nt == 1:
out = np.dot(M[0],data)
else:
for i in range(nt):
out[:,i] = np.dot(M[i],data[:,i])
# Now plot a comparison of the input data to output data:
if doplot:
f, ax = plt.subplots(4,2)
f.set_size_inches(5, 6.5)
f.suptitle(titl+' Amp')
pol = ['XX','XY','YX','YY']
dirxn = [' (IN)',' (OUT)']
for i in range(4):
ax[i,0].plot(abs(data[i]),'.')
ax[i,0].set_ylabel('Rel. Amp')
ax[i,1].plot(abs(out[i]),'.')
for j in range(2):
if i == 3:
ax[i,j].set_xlabel('Time index')
ax[i,j].set_ylim(-0.05,2)
ax[i,j].grid()
ax[i,j].text(0.05,0.8,pol[i]+dirxn[j],transform=ax[i,j].transAxes)
f, ax = plt.subplots(4,2)
f.set_size_inches(5, 6.5)
f.suptitle(titl+' Phase')
pol = ['XX','XY','YX','YY']
dirxn = [' (IN)',' (OUT)']
for i in range(4):
ax[i,0].plot(np.angle(data[i]),'.')
ax[i,0].set_ylabel('Phase')
ax[i,1].plot(np.angle(out[i]),'.')
for j in range(2):
if i == 3:
ax[i,j].set_xlabel('Time index')
ax[i,j].set_ylim(-4,4)
ax[i,j].grid()
ax[i,j].text(0.05,0.8,pol[i]+dirxn[j],transform=ax[i,j].transAxes)
return out | gpl-2.0 |
KaiSmith/Pynstein | genrel/BianchiAnisotropicSE.py | 1 | 3483 | """
Numerically evolves a Bianchi Class I universe with anisotripic stress energy given initial conditions
David Clark, Kai Smith
Case Western Reserve University
2014
"""
from math import *
import numpy as np
import sympy as sp
import scipy.integrate
import matplotlib.pyplot as pplot
from math import pi
a0 = 10.0
b0 = 10.0
c0 = 10.0
a_dot0 = 1.0
b_dot0 = 1.0
c_dot0 = 1.0
A0 = a_dot0/a0
B0 = b_dot0/b0
C0 = c_dot0/c0
omega0 = 1
#Open -1
#Flat 0
#Closed 1
k = 0
t = np.linspace(0, 1, 100)
I0 = A0*B0+B0*C0+A0*C0
H0 = A0+B0+C0
V0 = a0*b0*c0
chi0 = (omega0*I0*H0)/(3*(a_dot0+b_dot0+c_dot0))
#const = 8*pi*G*p0
const = 1
def dydt(y, t):
a, a_dot, b, b_dot, c, c_dot = y
"""
a_dot_dot = (a/2.0)*(chi0*(a0/a - b0/b - c0/c)*(V0/(a*b*c) + k) - (a_dot*b_dot)/(a*b) - (a_dot*c_dot)/(a*c) + (b_dot*c_dot)/(b*c))
b_dot_dot = (b/2.0)*(chi0*(-a0/a + b0/b - c0/c)*(V0/(a*b*c) + k) - (a_dot*b_dot)/(a*b) + (a_dot*c_dot)/(a*c) - (b_dot*c_dot)/(b*c))
c_dot_dot = (c/2.0)*(chi0*(-a0/a - b0/b + c0/c)*(V0/(a*b*c) + k) + (a_dot*b_dot)/(a*b) - (a_dot*c_dot)/(a*c) - (b_dot*c_dot)/(b*c))
"""
a_dot_dot = (a/2.0)*(-const*(V0/(a*b*c))*(-a0/a + b0/b + c0/c) - k*(-1/a**2 + 1/b**2 + 1/c**2) - (a_dot*b_dot)/(a*b) - (a_dot*c_dot)/(a*c) + (b_dot*c_dot)/(b*c))
b_dot_dot = (b/2.0)*(-const*(V0/(a*b*c))*(a0/a - b0/b + c0/c) -k*(1/a**2 - 1/b**2 + 1/c**2) - (a_dot*b_dot)/(a*b) + (a_dot*c_dot)/(a*c) - (b_dot*c_dot)/(b*c))
c_dot_dot = (c/2.0)*(-const*(V0/(a*b*c))*(a0/a + b0/b - c0/c) -k*(1/a**2 + 1/b**2 - 1/c**2) + (a_dot*b_dot)/(a*b) - (a_dot*c_dot)/(a*c) - (b_dot*c_dot)/(b*c))
return [a_dot, a_dot_dot, b_dot, b_dot_dot, c_dot, c_dot_dot]
def plot_evolution():
t = np.linspace(0, 1, 100)
y0 = [a0, a_dot0, b0, b_dot0, c0, c_dot0]
y = scipy.integrate.odeint(dydt, y0, t)
a = [value[0] for value in y]
a_dot = [value[1] for value in y]
b = [value[2] for value in y]
b_dot = [value[3] for value in y]
c = [value[4] for value in y]
c_dot = [value[5] for value in y]
stop = len(t) - 1
for values in [a, a_dot, b, b_dot, c, c_dot]:
for i in range(1, len(t)):
if abs(values[i]/values[i-1]) > 1000 and i < stop:
stop = i
break
a, a_dot, b, b_dot, c, c_dot, t = a[:stop], a_dot[:stop], b[:stop], b_dot[:stop], c[:stop], c_dot[:stop], t[:stop]
A = [a_dot[i]/a[i] for i in range(len(t))]
B = [b_dot[i]/b[i] for i in range(len(t))]
C = [c_dot[i]/c[i] for i in range(len(t))]
V = [a[i]*b[i]*c[i] for i in range(len(t))]
"""
pplot.scatter(t, a_dot, c = 'r')
pplot.scatter(t, b_dot, c = 'g')
pplot.scatter(t, c_dot, c = 'b')
pplot.title('First Derivatives')
pplot.show()
"""
pplot.scatter(t, a, c = 'r')
pplot.scatter(t, b, c = 'g')
pplot.scatter(t, c, c = 'b')
pplot.title('Scale Factors')
pplot.show()
pplot.scatter(t, A, c = 'r')
pplot.scatter(t, B, c = 'g')
pplot.scatter(t, C, c = 'b')
pplot.title('Hubble Parameters')
pplot.show()
pplot.scatter(t, V, c = 'r')
pplot.title('Volume')
pplot.show()
def print_long_term_ratios():
t = np.linspace(0, 1000000, 100000)
y0 = [a0, a_dot0, b0, b_dot0, c0, c_dot0]
y = scipy.integrate.odeint(dydt, y0, t)
A = [value[1]/value[0] for value in y]
B = [value[3]/value[2] for value in y]
C = [value[5]/value[4] for value in y]
B_over_C = [A[i]/B[i] for i in range(len(t))]
C_over_A = [C[i]/A[i] for i in range(len(t))]
B_over_A = [B[i]/A[i] for i in range(len(t))]
print('B/C: ' + str(B_over_C[-1]))
print('C/A: ' + str(C_over_A[-1]))
print('B/A: ' + str(B_over_A[-1]))
plot_evolution() | mit |
nomadcube/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
alheinecke/tensorflow-xsmm | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 130 | 9577 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
| apache-2.0 |
bikashmishra/TwitterAnalysis | Classifier/dotraining.py | 1 | 1091 | import nltk
import utility as util
from TweetClassifier import MultinomialNBTextClassifier
from utility import Timer
import pandas as pd
import numpy as np
from math import ceil
def getClassifier():
# 1. Training set 2 - Sentiment tweets from
# http://thinknook.com/twitter-sentiment-analysis-training-corpus-dataset-2012-09-22/
# Only positive and negative tweets are used
# This set has ~1million tweets. Only 1000 are used initially for POC
df2 = pd.io.parsers.read_csv('../TrainingData/Sentiment_Analysis_Dataset.csv', header=0, delimiter='\t', nrows=100)
sentiment = {0:'negative', 1:'positive'}
tweets = []
for idx, row in df2.iterrows():
tweets.append(((row['SentimentText'],sentiment[row['Sentiment']])))
classifier = MultinomialNBTextClassifier(classes=['negative','positive'])
with Timer() as t:
num_train = int(len(tweets))
num_test = len(tweets)-num_train
classifier.train(tweets[:num_train])
print 'Elaspsed time for Training %s s'%t.secs
return classifier | apache-2.0 |
mrcslws/htmresearch | htmresearch/frameworks/sp_paper/sp_metrics.py | 6 | 32444 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import random
import matplotlib.pyplot as plt
import numpy as np
from nupic.bindings.math import GetNTAReal
# !/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import random
import numpy as np
import pandas as pd
uintType = "uint32"
def getConnectedSyns(sp):
numInputs = sp.getNumInputs()
numColumns = np.prod(sp.getColumnDimensions())
connectedSyns = np.zeros((numColumns, numInputs), dtype=uintType)
for columnIndex in range(numColumns):
sp.getConnectedSynapses(columnIndex, connectedSyns[columnIndex, :])
connectedSyns = connectedSyns.astype('float32')
return connectedSyns
def getMovingBar(startLocation,
direction,
imageSize=(20, 20),
steps=5,
barHalfLength=3,
orientation='horizontal'):
"""
Generate a list of bars
:param startLocation:
(list) start location of the bar center, e.g. (10, 10)
:param direction:
direction of movement, e.g., (1, 0)
:param imageSize:
(list) number of pixels on horizontal and vertical dimension
:param steps:
(int) number of steps
:param barHalfLength:
(int) length of the bar
:param orientation:
(string) "horizontal" or "vertical"
:return:
"""
startLocation = np.array(startLocation)
direction = np.array(direction)
barMovie = []
for step in range(steps):
barCenter = startLocation + step * direction
barMovie.append(getBar(imageSize,
barCenter,
barHalfLength,
orientation))
return barMovie
def getBar(imageSize, barCenter, barHalfLength, orientation='horizontal'):
"""
Generate a single horizontal or vertical bar
:param imageSize
a list of (numPixelX. numPixelY). The number of pixels on horizontal
and vertical dimension, e.g., (20, 20)
:param barCenter:
(list) center of the bar, e.g. (10, 10)
:param barHalfLength
(int) half length of the bar. Full length is 2*barHalfLength +1
:param orientation:
(string) "horizontal" or "vertical"
:return:
"""
(nX, nY) = imageSize
(xLoc, yLoc) = barCenter
bar = np.zeros((nX, nY), dtype=uintType)
if orientation == 'horizontal':
xmin = max(0, (xLoc - barHalfLength))
xmax = min(nX - 1, (xLoc + barHalfLength + 1))
bar[xmin:xmax, yLoc] = 1
elif orientation == 'vertical':
ymin = max(0, (yLoc - barHalfLength))
ymax = min(nY - 1, (yLoc + barHalfLength + 1))
bar[xLoc, ymin:ymax] = 1
else:
raise RuntimeError("orientation has to be horizontal or vertical")
return bar
def getCross(nX, nY, barHalfLength):
cross = np.zeros((nX, nY), dtype=uintType)
xLoc = np.random.randint(barHalfLength, nX - barHalfLength)
yLoc = np.random.randint(barHalfLength, nY - barHalfLength)
cross[(xLoc - barHalfLength):(xLoc + barHalfLength + 1), yLoc] = 1
cross[xLoc, (yLoc - barHalfLength):(yLoc + barHalfLength + 1)] = 1
return cross
def generateRandomSDR(numSDR, numDims, numActiveInputBits, seed=42):
"""
Generate a set of random SDR's
@param numSDR:
@param nDim:
@param numActiveInputBits:
"""
randomSDRs = np.zeros((numSDR, numDims), dtype=uintType)
indices = np.array(range(numDims))
np.random.seed(seed)
for i in range(numSDR):
randomIndices = np.random.permutation(indices)
activeBits = randomIndices[:numActiveInputBits]
randomSDRs[i, activeBits] = 1
return randomSDRs
def getRandomBar(imageSize, barHalfLength, orientation='horizontal'):
(nX, nY) = imageSize
if orientation == 'horizontal':
xLoc = np.random.randint(barHalfLength, nX - barHalfLength)
yLoc = np.random.randint(0, nY)
bar = getBar(imageSize, (xLoc, yLoc), barHalfLength, orientation)
elif orientation == 'vertical':
xLoc = np.random.randint(0, nX)
yLoc = np.random.randint(barHalfLength, nY - barHalfLength)
bar = getBar(imageSize, (xLoc, yLoc), barHalfLength, orientation)
else:
raise RuntimeError("orientation has to be horizontal or vertical")
# shift bar with random phases
bar = np.roll(bar, np.random.randint(10 * nX), 0)
bar = np.roll(bar, np.random.randint(10 * nY), 1)
return bar
def generateCorrelatedSDRPairs(numInputVectors,
inputSize,
numInputVectorPerSensor,
numActiveInputBits,
corrStrength=0.1,
seed=42):
inputVectors1 = generateRandomSDR(
numInputVectorPerSensor, int(inputSize / 2), numActiveInputBits, seed)
inputVectors2 = generateRandomSDR(
numInputVectorPerSensor, int(inputSize / 2), numActiveInputBits, seed + 1)
# for each input on sensor 1, how many inputs on the 2nd sensor are
# strongly correlated with it?
numCorrPairs = 2
numInputVector1 = numInputVectorPerSensor
numInputVector2 = numInputVectorPerSensor
corrPairs = np.zeros((numInputVector1, numInputVector2))
for i in range(numInputVector1):
idx = np.random.choice(np.arange(numInputVector2),
size=(numCorrPairs,), replace=False)
corrPairs[i, idx] = 1.0 / numCorrPairs
uniformDist = np.ones((numInputVector1, numInputVector2)) / numInputVector2
sampleProb = corrPairs * corrStrength + uniformDist * (1 - corrStrength)
inputVectors = np.zeros((numInputVectors, inputSize))
for i in range(numInputVectors):
vec1 = np.random.randint(numInputVector1)
vec2 = np.random.choice(np.arange(numInputVector2), p=sampleProb[vec1, :])
inputVectors[i][:] = np.concatenate((inputVectors1[vec1],
inputVectors2[vec2]))
return inputVectors, inputVectors1, inputVectors2, corrPairs
def generateDenseVectors(numVectors, inputSize, seed):
np.random.seed(seed)
inputVectors = np.zeros((numVectors, inputSize), dtype=uintType)
for i in range(numVectors):
for j in range(inputSize):
inputVectors[i][j] = random.randrange(2)
return inputVectors
def convertToBinaryImage(image, thresh=75):
binaryImage = np.zeros(image.shape)
binaryImage[image > np.percentile(image, thresh)] = 1
return binaryImage
def getImageData(numInputVectors):
from htmresearch.algorithms.image_sparse_net import ImageSparseNet
DATA_PATH = "../sparse_net/data/IMAGES.mat"
DATA_NAME = "IMAGES"
DEFAULT_SPARSENET_PARAMS = {
"filterDim": 64,
"outputDim": 64,
"batchSize": numInputVectors,
"numLcaIterations": 75,
"learningRate": 2.0,
"decayCycle": 100,
"learningRateDecay": 1.0,
"lcaLearningRate": 0.1,
"thresholdDecay": 0.95,
"minThreshold": 1.0,
"thresholdType": 'soft',
"verbosity": 0, # can be changed to print training loss
"showEvery": 500,
"seed": 42,
}
network = ImageSparseNet(**DEFAULT_SPARSENET_PARAMS)
print "Loading training data..."
images = network.loadMatlabImages(DATA_PATH, DATA_NAME)
nDim1, nDim2, numImages = images.shape
binaryImages = np.zeros(images.shape)
for i in range(numImages):
binaryImages[:, :, i] = convertToBinaryImage(images[:, :, i])
inputVectors = network._getDataBatch(binaryImages)
inputVectors = inputVectors.T
return inputVectors
class SDRDataSet(object):
"""
Generate, store, and manipulate SDR dataset
"""
def __init__(self,
params):
self._params = params
self._inputVectors = []
self._dataType = params['dataType']
self._additionalInfo = {}
self.generateInputVectors(params)
def generateInputVectors(self, params):
if params['dataType'] == 'randomSDR':
self._inputVectors = generateRandomSDR(
params['numInputVectors'],
params['inputSize'],
params['numActiveInputBits'],
params['seed'])
elif params['dataType'] == 'denseVectors':
self._inputVectors = generateDenseVectors(
params['numInputVectors'],
params['inputSize'],
params['seed'])
elif params['dataType'] == 'randomBarPairs':
inputSize = params['nX'] * params['nY']
numInputVectors = params['numInputVectors']
self._inputVectors = np.zeros((numInputVectors, inputSize),
dtype=uintType)
for i in range(numInputVectors):
bar1 = getRandomBar((params['nX'], params['nY']),
params['barHalfLength'], 'horizontal')
bar2 = getRandomBar((params['nX'], params['nY']),
params['barHalfLength'], 'vertical')
data = bar1 + bar2
data[data > 0] = 1
self._inputVectors[i, :] = np.reshape(data, newshape=(1, inputSize))
elif params['dataType'] == 'randomBarSets':
inputSize = params['nX'] * params['nY']
numInputVectors = params['numInputVectors']
self._inputVectors = np.zeros((numInputVectors, inputSize),
dtype=uintType)
for i in range(numInputVectors):
data = 0
for barI in range(params['numBarsPerInput']):
orientation = np.random.choice(['horizontal', 'vertical'])
bar = getRandomBar((params['nX'], params['nY']),
params['barHalfLength'], orientation)
data += bar
data[data > 0] = 1
self._inputVectors[i, :] = np.reshape(data, newshape=(1, inputSize))
elif params['dataType'] == 'randomCross':
inputSize = params['nX'] * params['nY']
numInputVectors = params['numInputVectors']
self._inputVectors = np.zeros((numInputVectors, inputSize),
dtype=uintType)
for i in range(numInputVectors):
data = getCross(params['nX'], params['nY'], params['barHalfLength'])
self._inputVectors[i, :] = np.reshape(data, newshape=(1, inputSize))
elif params['dataType'] == 'correlatedSDRPairs':
(inputVectors, inputVectors1, inputVectors2, corrPairs) = \
generateCorrelatedSDRPairs(
params['numInputVectors'],
params['inputSize'],
params['numInputVectorPerSensor'],
params['numActiveInputBits'],
params['corrStrength'],
params['seed'])
self._inputVectors = inputVectors
self._additionalInfo = {"inputVectors1": inputVectors1,
"inputVectors2": inputVectors2,
"corrPairs": corrPairs}
elif params['dataType'] == 'nyc_taxi':
from nupic.encoders.scalar import ScalarEncoder
df = pd.read_csv('./data/nyc_taxi.csv', header=0, skiprows=[1, 2])
inputVectors = np.zeros((5000, params['n']))
for i in range(5000):
inputRecord = {
"passenger_count": float(df["passenger_count"][i]),
"timeofday": float(df["timeofday"][i]),
"dayofweek": float(df["dayofweek"][i]),
}
enc = ScalarEncoder(w=params['w'],
minval=params['minval'],
maxval=params['maxval'],
n=params['n'])
inputSDR = enc.encode(inputRecord["passenger_count"])
inputVectors[i, :] = inputSDR
self._inputVectors = inputVectors
def getInputVectors(self):
return self._inputVectors
def getAdditionalInfo(self):
return self._additionalInfo
from nupic.math.topology import coordinatesFromIndex
realDType = GetNTAReal()
uintType = "uint32"
def percentOverlap(x1, x2):
"""
Computes the percentage of overlap between vectors x1 and x2.
@param x1 (array) binary vector
@param x2 (array) binary vector
@param size (int) length of binary vectors
@return percentOverlap (float) percentage overlap between x1 and x2
"""
nonZeroX1 = np.count_nonzero(x1)
nonZeroX2 = np.count_nonzero(x2)
percentOverlap = 0
minX1X2 = min(nonZeroX1, nonZeroX2)
if minX1X2 > 0:
overlap = float(np.dot(x1.T, x2))
percentOverlap = overlap / minX1X2
return percentOverlap
def addNoiseToVector(inputVector, noiseLevel, vectorType):
"""
Add noise to SDRs
@param inputVector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
@param vectorType (string) "sparse" or "dense"
"""
if vectorType == 'sparse':
corruptSparseVector(inputVector, noiseLevel)
elif vectorType == 'dense':
corruptDenseVector(inputVector, noiseLevel)
else:
raise ValueError("vectorType must be 'sparse' or 'dense' ")
def corruptDenseVector(vector, noiseLevel):
"""
Corrupts a binary vector by inverting noiseLevel percent of its bits.
@param vector (array) binary vector to be corrupted
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
size = len(vector)
for i in range(size):
rnd = random.random()
if rnd < noiseLevel:
if vector[i] == 1:
vector[i] = 0
else:
vector[i] = 1
def corruptSparseVector(sdr, noiseLevel):
"""
Add noise to sdr by turning off numNoiseBits active bits and turning on
numNoiseBits in active bits
@param sdr (array) Numpy array of the SDR
@param noiseLevel (float) amount of noise to be applied on the vector.
"""
numNoiseBits = int(noiseLevel * np.sum(sdr))
if numNoiseBits <= 0:
return sdr
activeBits = np.where(sdr > 0)[0]
inActiveBits = np.where(sdr == 0)[0]
turnOffBits = np.random.permutation(activeBits)
turnOnBits = np.random.permutation(inActiveBits)
turnOffBits = turnOffBits[:numNoiseBits]
turnOnBits = turnOnBits[:numNoiseBits]
sdr[turnOffBits] = 0
sdr[turnOnBits] = 1
def calculateOverlapCurve(sp, inputVectors):
"""
Evalulate noise robustness of SP for a given set of SDRs
@param sp a spatial pooler instance
@param inputVectors list of arrays.
:return:
"""
columnNumber = np.prod(sp.getColumnDimensions())
numInputVector, inputSize = inputVectors.shape
outputColumns = np.zeros((numInputVector, columnNumber), dtype=uintType)
outputColumnsCorrupted = np.zeros((numInputVector, columnNumber),
dtype=uintType)
noiseLevelList = np.linspace(0, 1.0, 21)
inputOverlapScore = np.zeros((numInputVector, len(noiseLevelList)))
outputOverlapScore = np.zeros((numInputVector, len(noiseLevelList)))
for i in range(numInputVector):
for j in range(len(noiseLevelList)):
inputVectorCorrupted = copy.deepcopy(inputVectors[i][:])
corruptSparseVector(inputVectorCorrupted, noiseLevelList[j])
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
sp.compute(inputVectorCorrupted, False,
outputColumnsCorrupted[i][:])
inputOverlapScore[i][j] = percentOverlap(inputVectors[i][:],
inputVectorCorrupted)
outputOverlapScore[i][j] = percentOverlap(outputColumns[i][:],
outputColumnsCorrupted[i][:])
return noiseLevelList, inputOverlapScore, outputOverlapScore
def classifySPoutput(targetOutputColumns, outputColumns):
"""
Classify the SP output
@param targetOutputColumns (list) The target outputs, corresponding to
different classes
@param outputColumns (array) The current output
@return classLabel (int) classification outcome
"""
numTargets, numDims = targetOutputColumns.shape
overlap = np.zeros((numTargets,))
for i in range(numTargets):
overlap[i] = percentOverlap(outputColumns, targetOutputColumns[i, :])
classLabel = np.argmax(overlap)
return classLabel
def classificationAccuracyVsNoise(sp, inputVectors, noiseLevelList):
"""
Evaluate whether the SP output is classifiable, with varying amount of noise
@param sp a spatial pooler instance
@param inputVectors (list) list of input SDRs
@param noiseLevelList (list) list of noise levels
:return:
"""
numInputVector, inputSize = inputVectors.shape
if sp is None:
targetOutputColumns = copy.deepcopy(inputVectors)
else:
columnNumber = np.prod(sp.getColumnDimensions())
# calculate target output given the uncorrupted input vectors
targetOutputColumns = np.zeros((numInputVector, columnNumber),
dtype=uintType)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, targetOutputColumns[i][:])
outcomes = np.zeros((len(noiseLevelList), numInputVector))
for i in range(len(noiseLevelList)):
for j in range(numInputVector):
corruptedInputVector = copy.deepcopy(inputVectors[j][:])
corruptSparseVector(corruptedInputVector, noiseLevelList[i])
if sp is None:
outputColumns = copy.deepcopy(corruptedInputVector)
else:
outputColumns = np.zeros((columnNumber, ), dtype=uintType)
sp.compute(corruptedInputVector, False, outputColumns)
predictedClassLabel = classifySPoutput(targetOutputColumns, outputColumns)
outcomes[i][j] = predictedClassLabel == j
predictionAccuracy = np.mean(outcomes, 1)
return predictionAccuracy
def plotExampleInputOutput(sp, inputVectors, saveFigPrefix=None):
"""
Plot example input & output
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors
"""
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType)
connectedCounts = np.zeros((numColumns,), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
winnerInputOverlap = np.zeros(numInputVector)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
inputOverlap[i][:] = sp.getOverlaps()
activeColumns = np.where(outputColumns[i][:] > 0)[0]
if len(activeColumns) > 0:
winnerInputOverlap[i] = np.mean(
inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]])
fig, axs = plt.subplots(2, 1)
axs[0].imshow(inputVectors[:, :200], cmap='gray', interpolation="nearest")
axs[0].set_ylabel('input #')
axs[0].set_title('input vectors')
axs[1].imshow(outputColumns[:, :200], cmap='gray', interpolation="nearest")
axs[1].set_ylabel('input #')
axs[1].set_title('output vectors')
if saveFigPrefix is not None:
plt.savefig('figures/{}_example_input_output.pdf'.format(saveFigPrefix))
inputDensity = np.sum(inputVectors, 1) / float(inputSize)
outputDensity = np.sum(outputColumns, 1) / float(numColumns)
fig, axs = plt.subplots(2, 1)
axs[0].plot(inputDensity)
axs[0].set_xlabel('input #')
axs[0].set_ylim([0, 0.2])
axs[1].plot(outputDensity)
axs[1].set_xlabel('input #')
axs[1].set_ylim([0, 0.05])
if saveFigPrefix is not None:
plt.savefig('figures/{}_example_input_output_density.pdf'.format(saveFigPrefix))
def inspectSpatialPoolerStats(sp, inputVectors, saveFigPrefix=None):
"""
Inspect the statistics of a spatial pooler given a set of input vectors
@param sp: an spatial pooler instance
@param inputVectors: a set of input vectors
"""
numInputVector, inputSize = inputVectors.shape
numColumns = np.prod(sp.getColumnDimensions())
outputColumns = np.zeros((numInputVector, numColumns), dtype=uintType)
inputOverlap = np.zeros((numInputVector, numColumns), dtype=uintType)
connectedCounts = np.zeros((numColumns, ), dtype=uintType)
sp.getConnectedCounts(connectedCounts)
winnerInputOverlap = np.zeros(numInputVector)
for i in range(numInputVector):
sp.compute(inputVectors[i][:], False, outputColumns[i][:])
inputOverlap[i][:] = sp.getOverlaps()
activeColumns = np.where(outputColumns[i][:] > 0)[0]
if len(activeColumns) > 0:
winnerInputOverlap[i] = np.mean(
inputOverlap[i][np.where(outputColumns[i][:] > 0)[0]])
avgInputOverlap = np.mean(inputOverlap, 0)
entropy = calculateEntropy(outputColumns)
activationProb = np.mean(outputColumns.astype(realDType), 0)
dutyCycleDist, binEdge = np.histogram(activationProb,
bins=10, range=[-0.005, 0.095])
dutyCycleDist = dutyCycleDist.astype('float32') / np.sum(dutyCycleDist)
binCenter = (binEdge[1:] + binEdge[:-1])/2
fig, axs = plt.subplots(2, 2)
axs[0, 0].hist(connectedCounts)
axs[0, 0].set_xlabel('# Connected Synapse')
axs[0, 1].hist(winnerInputOverlap)
axs[0, 1].set_xlabel('# winner input overlap')
axs[1, 0].bar(binEdge[:-1]+0.001, dutyCycleDist, width=.008)
axs[1, 0].set_xlim([-0.005, .1])
axs[1, 0].set_xlabel('Activation Frequency')
axs[1, 0].set_title('Entropy: {}'.format(entropy))
axs[1, 1].plot(connectedCounts, activationProb, '.')
axs[1, 1].set_xlabel('connection #')
axs[1, 1].set_ylabel('activation freq')
plt.tight_layout()
if saveFigPrefix is not None:
plt.savefig('figures/{}_network_stats.pdf'.format(saveFigPrefix))
return fig
def getRFCenters(sp, params, type='connected'):
numColumns = np.product(sp.getColumnDimensions())
dimensions = (params['nX'], params['nY'])
meanCoordinates = np.zeros((numColumns, 2))
avgDistToCenter = np.zeros((numColumns, 2))
for columnIndex in range(numColumns):
receptiveField = np.zeros((sp.getNumInputs(), ))
if type == 'connected':
sp.getConnectedSynapses(columnIndex, receptiveField)
elif type == 'potential':
sp.getPotential(columnIndex, receptiveField)
else:
raise RuntimeError('unknown RF type')
connectedSynapseIndex = np.where(receptiveField)[0]
if len(connectedSynapseIndex) == 0:
continue
coordinates = []
for synapseIndex in connectedSynapseIndex:
coordinate = coordinatesFromIndex(synapseIndex, dimensions)
coordinates.append(coordinate)
coordinates = np.array(coordinates)
coordinates = coordinates.astype('float32')
angularCoordinates = np.array(coordinates)
angularCoordinates[:, 0] = coordinates[:, 0] / params['nX'] * 2 * np.pi
angularCoordinates[:, 1] = coordinates[:, 1] / params['nY'] * 2 * np.pi
for i in range(2):
meanCoordinate = np.arctan2(
np.sum(np.sin(angularCoordinates[:, i])),
np.sum(np.cos(angularCoordinates[:, i])))
if meanCoordinate < 0:
meanCoordinate += 2 * np.pi
dist2Mean = angularCoordinates[:, i] - meanCoordinate
dist2Mean = np.arctan2(np.sin(dist2Mean), np.cos(dist2Mean))
dist2Mean = np.max(np.abs(dist2Mean))
meanCoordinate *= dimensions[i] / (2 * np.pi)
dist2Mean *= dimensions[i] / (2 * np.pi)
avgDistToCenter[columnIndex, i] = dist2Mean
meanCoordinates[columnIndex, i] = meanCoordinate
return meanCoordinates, avgDistToCenter
def binaryEntropyVectorized(x):
"""
Calculate entropy for a list of binary random variables
:param x: (numpy array) the probability of the variable to be 1.
:return: entropy: (numpy array) entropy
"""
entropy = - x*np.log2(x) - (1-x)*np.log2(1-x)
entropy[x*(1 - x) == 0] = 0
return entropy
def renyiEntropyVectorized(x):
entropy = -np.log2(np.square(x) + np.square(1-x))
return entropy
def calculateEntropy(activeColumns, type='binary'):
"""
calculate the mean entropy given activation history
@param activeColumns (array) 2D numpy array of activation history
@return entropy (float) mean entropy
"""
activationProb = np.mean(activeColumns, 0)
if type == 'binary':
totalEntropy = np.sum(binaryEntropyVectorized(activationProb))
elif type == 'renyi':
totalEntropy = np.sum(renyiEntropyVectorized(activationProb))
else:
raise ValueError('unknown entropy type')
numberOfColumns = activeColumns.shape[1]
# return mean entropy
return totalEntropy/numberOfColumns
def calculateInputOverlapMat(inputVectors, sp):
numColumns = np.product(sp.getColumnDimensions())
numInputVector, inputSize = inputVectors.shape
overlapMat = np.zeros((numColumns, numInputVector))
for c in range(numColumns):
connectedSynapses = np.zeros((inputSize, ), dtype=uintType)
sp.getConnectedSynapses(c, connectedSynapses)
for i in range(numInputVector):
overlapMat[c, i] = percentOverlap(connectedSynapses, inputVectors[i, :])
return overlapMat
def calculateStability(activeColumnsCurrentEpoch, activeColumnsPreviousEpoch):
activeColumnsStable = np.logical_and(activeColumnsCurrentEpoch,
activeColumnsPreviousEpoch)
stability = np.mean(np.sum(activeColumnsStable, 1))/\
np.mean(np.sum(activeColumnsCurrentEpoch, 1))
return stability
def calculateInputSpaceCoverage(sp):
numInputs = np.prod(sp.getInputDimensions())
numColumns = np.prod(sp.getColumnDimensions())
inputSpaceCoverage = np.zeros(numInputs)
connectedSynapses = np.zeros((numInputs), dtype=uintType)
for columnIndex in range(numColumns):
sp.getConnectedSynapses(columnIndex, connectedSynapses)
inputSpaceCoverage += connectedSynapses
inputSpaceCoverage = np.reshape(inputSpaceCoverage, sp.getInputDimensions())
return inputSpaceCoverage
def reconstructionError(sp, inputVectors, activeColumnVectors, threshold=0.):
"""
Computes a reconstruction error. The reconstuction $r(x)$ of an input vector $x$
is given by the sum of the active column's connected synapses vector of
the SDR representation $sdr(x)$ of $x$ normalized by $1/numActiveColumns$.
The error is the normalized sum over the "hamming distance" (i.e. distance
induced by L1 norm) of $x$ and its reconstruction $r(x)$, i.e. (mathy stuff in LaTex)
\[
Reconstruction Error = (1/batchSize) * \sum_{x \in InputBatch} \| x - r(x) \|_1 .
\]
Note that $r(x)$ can be expressed as
\[
r(x) = (1/numActiveColumns) * C * sdr(x) ,
\]
where we view $sdr(x)$ as a binary column vector and $C$ is the
binary matrix whose jth column encodes the synaptic connectivity of
the pooler's columns and the input bits, i.e.
\[
c_{i,j} = 1 :<=> column j has a stable synaptic
connection to input bit i.
\]
Note: Turns out that in our setting (x and syn(i) binary vectors) we have
\[
Reconstruction Error = Witness Error.
\]
It can be shown that the error is optimized by the Hebbian-like update rule
of the spatial pooler.
@param sp (SpatialPooler) the spatial pooler instance
@param inputVectors (array) 2D numpy array of input vectors
@param activeColumnVectors (array) 2D numpy array of activation history
@param threshold (float) if set > 0 it serves as threshold for a step function
applied to the reconstruction vectors (values smaller than
threshold are set to zero, and values bigger to one)
@return error (float) the reconstruction error
"""
batchSize = inputVectors.shape[0]
connectionMatrix = getConnectedSyns(sp)
reconstructionVectors = np.dot(activeColumnVectors, connectionMatrix)
numActiveColumns = np.sum(activeColumnVectors, 1)[0]
reconstructionVectors = reconstructionVectors/numActiveColumns
if threshold > 0.:
reconstructionVectors =np.where(
reconstructionVectors > threshold,
np.ones( reconstructionVectors.shape),
np.zeros(reconstructionVectors.shape))
Err = np.sum(np.absolute(reconstructionVectors - inputVectors))
return Err/batchSize
def witnessError(sp, inputVectors, activeColumnsCurrentEpoch):
"""
Computes a variation of a reconstruction error. It measures the average
hamming distance of an active column's connected synapses vector and its witnesses.
An input vector is called witness for a column, iff the column is among
the active columns for the input computed by the spatial pooler.
The error is given by
\[
Witness Error = (1/batchSize) * \sum_{x \in InputBatch}
(1/numActiveColumns) * \sum_{i active column of sdr(x)} \| x - syn(i) \|_1.
\]
Note: Turns out that in our setting (x and syn(i) binary vectors) we have
\[
Witness Error = Reconstruction Error.
\]
It can be shown that the error is optimized by the Hebbian-like update rule
of the spatial pooler.
"""
connectionMatrix = getConnectedSyns(sp)
batchSize = inputVectors.shape[0]
# 1st sum... over each input in batch
Err = 0.
for i in range(batchSize):
activeColumns = np.where(activeColumnsCurrentEpoch[i] > 0.)[0]
numActiveColumns = activeColumns.shape[0]
# 2nd sum... over each active colum
err = 0.
for j in activeColumns:
# Compute hamming distance and accumulate
err += np.sum(np.absolute(connectionMatrix[j] - inputVectors[i]))
Err += err/numActiveColumns
return Err/batchSize
def mutualInformation(sp, activeColumnsCurrentEpoch, column_1, column_2):
"""
Computes the mutual information of the binary variables that represent
the activation probabilities of two columns. The mutual information I(X,Y)
of two random variables is given by
\[
I (X,Y) = \sum_{x,y} p(x,y) log( p(x,y) / ( p(x) p(y) ) ).
\]
(https://en.wikipedia.org/wiki/Mutual_information)
"""
i, j = column_1, column_2
batchSize = activeColumnsCurrentEpoch.shape[0]
# Activity Counts
ci, cj, cij = 0., 0., dict([((0,0),0.), ((1,0),0.), ((0,1),0.), ((1,1),0.)])
for t in range(batchSize):
ai = activeColumnsCurrentEpoch[t, i]
aj = activeColumnsCurrentEpoch[t, j]
cij[(ai, aj)] += 1.
ci += ai
cj += aj
# Mutual information calculation
Iij = 0
for a,b in [(0,0), (1,0), (0,1), (1,1)]:
# Compute probabilities
pij = cij[(a,b)]/batchSize
pi = ci/batchSize if a == 1 else 1. - ci/batchSize
pj = cj/batchSize if b == 1 else 1. - cj/batchSize
# Add current term of mutual information
Iij += pij * np.log2(pij/(pi*pj)) if pij > 0 else 0
return Iij
def meanMutualInformation(sp, activeColumnsCurrentEpoch, columnsUnderInvestigation = []):
"""
Computes the mean of the mutual information
of pairs taken from a list of columns.
"""
if len(columnsUnderInvestigation) == 0:
columns = range(np.prod(sp.getColumnDimensions()))
else:
columns = columnsUnderInvestigation
numCols = len(columns)
sumMutualInfo = 0
normalizingConst = numCols*(numCols - 1)/2
for i in range(numCols):
for j in range(i+1, numCols):
sumMutualInfo += mutualInformation(sp, activeColumnsCurrentEpoch, columns[i], columns[j])
return sumMutualInfo/normalizingConst
| agpl-3.0 |
robcarver17/pysystemtrade | sysbrokers/IB/client/ib_price_client.py | 1 | 11239 | from dateutil.tz import tz
import datetime
import pandas as pd
from ib_insync import Contract as ibContract
from ib_insync import util
from sysbrokers.IB.client.ib_client import PACING_INTERVAL_SECONDS
from sysbrokers.IB.client.ib_contracts_client import ibContractsClient
from sysbrokers.IB.ib_positions import resolveBS_for_list
from syscore.objects import missing_contract, missing_data
from syscore.dateutils import adjust_timestamp_to_include_notional_close_and_time_offset, strip_timezone_fromdatetime, Frequency, DAILY_PRICE_FREQ
from syslogdiag.logger import logger
from syslogdiag.log_to_screen import logtoscreen
from sysobjects.contracts import futuresContract
from sysexecution.trade_qty import tradeQuantity
class tickerWithBS(object):
def __init__(self, ticker, BorS: str):
self.ticker = ticker
self.BorS = BorS
# we don't include ibClient since we get that through contracts client
class ibPriceClient(ibContractsClient):
def broker_get_historical_futures_data_for_contract(
self,
contract_object_with_ib_broker_config: futuresContract,
bar_freq: Frequency = DAILY_PRICE_FREQ,
allow_expired = False
) -> pd.DataFrame:
"""
Get historical daily data
:param contract_object_with_ib_broker_config: contract where instrument has ib metadata
:param freq: str; one of D, H, 5M, M, 10S, S
:return: futuresContractPriceData
"""
specific_log = contract_object_with_ib_broker_config.specific_log(self.log)
ibcontract = self.ib_futures_contract(
contract_object_with_ib_broker_config,
allow_expired=allow_expired)
if ibcontract is missing_contract:
specific_log.warn(
"Can't resolve IB contract %s"
% str(contract_object_with_ib_broker_config)
)
return missing_data
price_data = self._get_generic_data_for_contract(
ibcontract, log=specific_log, bar_freq=bar_freq, whatToShow="TRADES")
return price_data
def get_ticker_object(
self, contract_object_with_ib_data: futuresContract,
trade_list_for_multiple_legs: tradeQuantity=None
) -> tickerWithBS:
specific_log = contract_object_with_ib_data.specific_log(self.log)
ibcontract = self.ib_futures_contract(
contract_object_with_ib_data,
trade_list_for_multiple_legs=trade_list_for_multiple_legs,
)
if ibcontract is missing_contract:
specific_log.warn(
"Can't find matching IB contract for %s"
% str(contract_object_with_ib_data)
)
return missing_contract
self.ib.reqMktData(ibcontract, "", False, False)
ticker = self.ib.ticker(ibcontract)
ib_BS_str, ib_qty = resolveBS_for_list(trade_list_for_multiple_legs)
ticker_with_bs = tickerWithBS(ticker, ib_BS_str)
return ticker_with_bs
def cancel_market_data_for_contract_object(
self, contract_object_with_ib_data: futuresContract,
trade_list_for_multiple_legs: tradeQuantity=None
):
specific_log = contract_object_with_ib_data.specific_log(self.log)
ibcontract = self.ib_futures_contract(
contract_object_with_ib_data,
trade_list_for_multiple_legs=trade_list_for_multiple_legs,
)
if ibcontract is missing_contract:
specific_log.warn(
"Can't find matching IB contract for %s"
% str(contract_object_with_ib_data)
)
return missing_contract
self.ib.cancelMktData(ibcontract)
def ib_get_recent_bid_ask_tick_data(
self,
contract_object_with_ib_data: futuresContract,
tick_count=200,
) -> list:
"""
:param contract_object_with_ib_data:
:return:
"""
specific_log = self.log.setup(
instrument_code=contract_object_with_ib_data.instrument_code,
contract_date=contract_object_with_ib_data.date_str,
)
if contract_object_with_ib_data.is_spread_contract():
error_msg = "Can't get historical data for combo"
specific_log.critical(error_msg)
raise Exception(error_msg)
ibcontract = self.ib_futures_contract(
contract_object_with_ib_data
)
if ibcontract is missing_contract:
specific_log.warn(
"Can't find matching IB contract for %s"
% str(contract_object_with_ib_data)
)
return missing_contract
recent_ib_time = self.ib.reqCurrentTime() - datetime.timedelta(seconds=60)
tick_data = self.ib.reqHistoricalTicks(
ibcontract, recent_ib_time, "", tick_count, "BID_ASK", useRth=False
)
return tick_data
def _get_generic_data_for_contract(
self,
ibcontract: ibContract,
log:logger=None,
bar_freq: Frequency= DAILY_PRICE_FREQ,
whatToShow:str="TRADES"
) -> pd.DataFrame:
"""
Get historical daily data
:param contract_object_with_ib_data: contract where instrument has ib metadata
:param freq: str; one of D, H, 5M, M, 10S, S
:return: futuresContractPriceData
"""
if log is None:
log = self.log
try:
barSizeSetting, durationStr = _get_barsize_and_duration_from_frequency(
bar_freq)
except Exception as exception:
log.warn(exception)
return missing_data
price_data_raw = self._ib_get_historical_data_of_duration_and_barSize(
ibcontract,
durationStr=durationStr,
barSizeSetting=barSizeSetting,
whatToShow=whatToShow,
log=log,
)
price_data_as_df = self._raw_ib_data_to_df(price_data_raw=price_data_raw, log=log)
return price_data_as_df
def _raw_ib_data_to_df(self, price_data_raw: pd.DataFrame, log:logger) -> pd.DataFrame:
if price_data_raw is None:
log.warn("No price data from IB")
return missing_data
price_data_as_df = price_data_raw[[
"open", "high", "low", "close", "volume"]]
price_data_as_df.columns = ["OPEN", "HIGH", "LOW", "FINAL", "VOLUME"]
date_index = [
self._ib_timestamp_to_datetime(price_row)
for price_row in price_data_raw["date"]
]
price_data_as_df.index = date_index
return price_data_as_df
### TIMEZONE STUFF
def _ib_timestamp_to_datetime(self, timestamp_ib) -> datetime.datetime:
"""
Turns IB timestamp into pd.datetime as plays better with arctic, converts IB time (UTC?) to local,
and adjusts yyyymm to closing vector
:param timestamp_str: datetime.datetime
:return: pd.datetime
"""
local_timestamp_ib = self._adjust_ib_time_to_local(timestamp_ib)
timestamp = pd.to_datetime(local_timestamp_ib)
adjusted_ts = adjust_timestamp_to_include_notional_close_and_time_offset(timestamp)
return adjusted_ts
def _adjust_ib_time_to_local(self, timestamp_ib) -> datetime.datetime:
if getattr(timestamp_ib, "tz_localize", None) is None:
# daily, nothing to do
return timestamp_ib
timestamp_ib_with_tz = self._add_tz_to_ib_time(timestamp_ib)
local_timestamp_ib_with_tz = timestamp_ib_with_tz.astimezone(
tz.tzlocal())
local_timestamp_ib = strip_timezone_fromdatetime(local_timestamp_ib_with_tz)
return local_timestamp_ib
def _add_tz_to_ib_time(self, timestamp_ib):
ib_tz = self._get_ib_timezone()
timestamp_ib_with_tz = timestamp_ib.tz_localize(ib_tz)
return timestamp_ib_with_tz
def _get_ib_timezone(self):
# cache
ib_tz = getattr(self, "_ib_time_zone", None)
if ib_tz is None:
ib_time = self.ib.reqCurrentTime()
ib_tz = ib_time.timetz().tzinfo
self._ib_time_zone = ib_tz
return ib_tz
# HISTORICAL DATA
# Works for FX and futures
def _ib_get_historical_data_of_duration_and_barSize(
self,
ibcontract: ibContract,
durationStr: str="1 Y",
barSizeSetting: str="1 day",
whatToShow="TRADES",
log: logger=None,
) -> pd.DataFrame:
"""
Returns historical prices for a contract, up to today
ibcontract is a Contract
:returns list of prices in 4 tuples: Open high low close volume
"""
if log is None:
log = self.log
last_call = self.last_historic_price_calltime
_avoid_pacing_violation(last_call, log=log)
bars = self.ib.reqHistoricalData(
ibcontract,
endDateTime="",
durationStr=durationStr,
barSizeSetting=barSizeSetting,
whatToShow=whatToShow,
useRTH=True,
formatDate=1,
)
df = util.df(bars)
self.last_historic_price_calltime = datetime.datetime.now()
return df
def _get_barsize_and_duration_from_frequency(bar_freq: Frequency) -> (str, str):
barsize_lookup = dict(
[
(Frequency.Day, "1 day"),
(Frequency.Hour, "1 hour"),
(Frequency.Minutes_15, "15 mins"),
(Frequency.Minutes_5, "5 mins"),
(Frequency.Minute, "1 min"),
(Frequency.Seconds_10, "10 secs"),
(Frequency.Second, "1 secs"),
]
)
duration_lookup = dict(
[
(Frequency.Day, "1 Y"),
(Frequency.Hour, "1 M"),
(Frequency.Minutes_15, "1 W"),
(Frequency.Minutes_5, "1 W"),
(Frequency.Minute, "1 D"),
(Frequency.Seconds_10, "14400 S"),
(Frequency.Second, "1800 S"),
]
)
try:
assert bar_freq in barsize_lookup.keys()
assert bar_freq in duration_lookup.keys()
except:
raise Exception(
"Barsize %s not recognised should be one of %s"
% (str(bar_freq), str(barsize_lookup.keys()))
)
ib_barsize = barsize_lookup[bar_freq]
ib_duration = duration_lookup[bar_freq]
return ib_barsize, ib_duration
def _avoid_pacing_violation(last_call_datetime: datetime.datetime, log: logger=logtoscreen("")):
printed_warning_already = False
while _pause_for_pacing(last_call_datetime):
if not printed_warning_already:
log.msg("Pausing %f seconds to avoid pacing violation" %
(datetime.datetime.now() - last_call_datetime).total_seconds())
printed_warning_already = True
pass
def _pause_for_pacing(last_call_datetime: datetime.datetime):
time_since_last_call = datetime.datetime.now() - last_call_datetime
seconds_since_last_call = time_since_last_call.total_seconds()
should_pause = seconds_since_last_call<PACING_INTERVAL_SECONDS
return should_pause
| gpl-3.0 |
maxmouchet/tb | F4B516/Fall Detection/FallDetection.py | 1 | 5197 | from __future__ import division
import cv2
import sys
import numpy as np
from matplotlib import pyplot as plt
from sklearn.mixture import GaussianMixture
from sklearn.mixture import BayesianGaussianMixture
# Parameters
n_components = 1 # Number of gaussian to fit
mhi_duration = 0.99 # Duration of the Motion Interval Images (in seconds)
learning_rate = 0.01
# Video source
path = sys.argv[1]
cap = cv2.VideoCapture(path)
# Background subtractor
fgbg = cv2.BackgroundSubtractorMOG2()
fgbg.setBool('detectShadows', True)
fgbg.setInt('history', 10)
fgbg.setDouble('varThresholdGen', 32)
# Morphologic element to remove background noise
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
# Video attributes
nb_frame = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.cv.CV_CAP_PROP_FPS))
print("Loaded video " + path)
print(str(width) + "x" + str(height) + "@" + str(fps) + "fps")
print(str(nb_frame) + " frames")
# Store last gaussian means to initialize the EM algorithm at the next iteration
# This ensure the same gaussians converges to the same persons in the video
last_means = np.concatenate((np.random.rand(n_components,1)*height, np.random.rand(n_components,1)*width), 1)
# MHI initialization
MHI_DURATION = int(mhi_duration*fps)
raw_mhi = []
for i in range(0, n_components):
raw_mhi.append(np.zeros((height,width), dtype=np.float32))
mhi = np.zeros((n_components,nb_frame,height,width), dtype=np.uint8)
a_s = np.zeros((n_components,nb_frame))
b_s = np.zeros((n_components,nb_frame))
ab_s = np.zeros((n_components,nb_frame))
theta_s =np.zeros((n_components,nb_frame))
i = 0
while(1):
ret, frame = cap.read()
print(i)
# Exit the loop if the video is finished
if ret == False:
break
# Extract background and create a feature vector from the positions of the white pixels
fgmask = fgbg.apply(frame,learningRate=learning_rate)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
X = np.transpose(np.asarray(np.where(fgmask == 255)))
# Go to the next frame if there is no foreground
if X.size == 0:
continue
mixture = GaussianMixture(n_components=n_components, means_init=last_means)
# mixture = BayesianGaussianMixture(n_components=6, weight_concentration_prior=1, mean_precision_prior=1, covariance_prior=1 * np.eye(2))
mixture.fit(X)
last_means = np.copy(mixture.means_)
# Process each gaussian (person) individually
for k in range(0, n_components):
mean = mixture.means_[k,:]
cov = mixture.covariances_[k,:]
# Center
pt1 = (int(mean[1]), int(mean[0]))
v, w = np.linalg.eigh(cov)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
# Convert to degrees
angle = 180 * angle / np.pi
v = 2. * np.sqrt(2.) * np.sqrt(v)
a_s[k,i]=v[0]
b_s[k,i]=v[1]
ab_s[k,i]=(v[0]/v[1])
theta_s[k,i]=(angle)
# Draw ellipse on video
cv2.ellipse(frame, pt1, (int(v[1]), int(v[0])), -angle, 0, 360, (64, k*255, 64), 3)
# Compute silhouette and update mhi
mask=np.zeros_like(fgmask)
cv2.ellipse(mask, pt1, (int(v[1]), int(v[0])), -angle, 0, 360, (255, 255, 255), -1)
silhouette = 255 - np.bitwise_and(fgmask,mask)
cv2.updateMotionHistory(silhouette, raw_mhi[k], i, MHI_DURATION)
# Scale and store mhi for processing
out = cv2.convertScaleAbs(raw_mhi[k], None, 255.0/MHI_DURATION, (MHI_DURATION - i)*255.0/MHI_DURATION)
mhi[k,i,:,:] = 255 - out
#cv2.imshow(str(k), mhi[k,i,:,:])
cv2.imshow('fgmask', fgmask)
cv2.imshow('frame', frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
i += 1
cap.release()
cv2.destroyAllWindows()
# Fall detection w/ thresholding
std_interval = int(0.5*fps)
std_theta = np.zeros((n_components,nb_frame))
std_ratio = np.zeros((n_components,nb_frame))
Cmotion = np.zeros((n_components,nb_frame))
intervalle = 5
seuil1 = 0.2
seuil2 = 15
for k in range(0,n_components):
for i in range(0,int(nb_frame)):
std_theta[k,i] = np.std(theta_s[k,i:i+std_interval])
std_ratio[k,i] = np.std(ab_s[k,i:i*std_interval])
Cmotion[k,i] = np.mean(mhi[k,i,:,:])
for i in range(fps,nb_frame):
Cmotion_avg = np.mean(Cmotion[k,i-intervalle:i])
Cmotion_max=np.amax(Cmotion[k,i-intervalle:i]) - Cmotion_avg
if(Cmotion_max>seuil1):
std_theta_max=np.amax(std_theta[k,i-intervalle:i])
if(std_theta_max>seuil2):
print(str(k) + " is falling on frame " + str(i))
# print(str(Cmotion_max) + ' ' + str(std_theta_max))
for k in range(0,n_components):
plt.figure(k)
plt.subplot(4,1,1)
plt.plot(Cmotion[k,:])
plt.ylabel('Cmotion ' + str(k))
plt.subplot(4,1,2)
plt.plot(std_theta[k,:])
plt.ylabel('std angle')
plt.subplot(4,1,3)
plt.plot(std_ratio[k,:])
plt.ylabel('std ratio')
plt.subplot(4,1,4)
plt.plot(theta_s[k,:])
plt.ylabel('angle')
plt.show()
| mit |
tomlof/scikit-learn | sklearn/tests/test_cross_validation.py | 79 | 47914 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train],
return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(np.unique(y[test],
return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(len(train) + len(test), y.size)
assert_equal(len(train), train_size)
assert_equal(len(test), test_size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
cbertinato/pandas | pandas/tests/series/indexing/test_alter_index.py | 1 | 17832 | from datetime import datetime
import numpy as np
from numpy import nan
import pytest
import pandas as pd
from pandas import Categorical, Series, date_range, isna
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
@pytest.mark.parametrize(
'first_slice,second_slice', [
[[2, None], [None, -5]],
[[None, 0], [None, -5]],
[[None, -5], [None, 0]],
[[None, 0], [None, 0]]
])
@pytest.mark.parametrize('fill', [None, -1])
def test_align(test_data, first_slice, second_slice, join_type, fill):
a = test_data.ts[slice(*first_slice)]
b = test_data.ts[slice(*second_slice)]
aa, ab = a.align(b, join=join_type, fill_value=fill)
join_index = a.index.join(b.index, how=join_type)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
assert (aa.reindex(diff_a) == fill).all()
if len(diff_b) > 0:
assert (ab.reindex(diff_b) == fill).all()
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
assert aa.name == 'ts'
assert ea.name == 'ts'
assert ab.name == 'ts'
assert eb.name == 'ts'
@pytest.mark.parametrize(
'first_slice,second_slice', [
[[2, None], [None, -5]],
[[None, 0], [None, -5]],
[[None, -5], [None, 0]],
[[None, 0], [None, 0]]
])
@pytest.mark.parametrize('method', ['pad', 'bfill'])
@pytest.mark.parametrize('limit', [None, 1])
def test_align_fill_method(test_data,
first_slice, second_slice,
join_type, method, limit):
a = test_data.ts[slice(*first_slice)]
b = test_data.ts[slice(*second_slice)]
aa, ab = a.align(b, join=join_type, method=method, limit=limit)
join_index = a.index.join(b.index, how=join_type)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
def test_align_nocopy(test_data):
b = test_data.ts[:5].copy()
# do copy
a = test_data.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
assert not (a[:5] == 5).any()
# do not copy
a = test_data.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
assert (a[:5] == 5).all()
# do copy
a = test_data.ts.copy()
b = test_data.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
assert not (b[:3] == 5).any()
# do not copy
a = test_data.ts.copy()
b = test_data.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
assert (b[:2] == 5).all()
def test_align_same_index(test_data):
a, b = test_data.ts.align(test_data.ts, copy=False)
assert a.index is test_data.ts.index
assert b.index is test_data.ts.index
a, b = test_data.ts.align(test_data.ts, copy=True)
assert a.index is not test_data.ts.index
assert b.index is not test_data.ts.index
def test_align_multiindex():
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(test_data):
identity = test_data.series.reindex(test_data.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
assert np.may_share_memory(test_data.series.index, identity.index)
except AttributeError:
pass
assert identity.index.is_(test_data.series.index)
assert identity.index.identical(test_data.series.index)
subIndex = test_data.series.index[10:20]
subSeries = test_data.series.reindex(subIndex)
for idx, val in subSeries.items():
assert val == test_data.series[idx]
subIndex2 = test_data.ts.index[10:20]
subTS = test_data.ts.reindex(subIndex2)
for idx, val in subTS.items():
assert val == test_data.ts[idx]
stuffSeries = test_data.ts.reindex(subIndex)
assert np.isnan(stuffSeries).all()
# This is extremely important for the Cython code to not screw up
nonContigIndex = test_data.ts.index[::2]
subNonContig = test_data.ts.reindex(nonContigIndex)
for idx, val in subNonContig.items():
assert val == test_data.ts[idx]
# return a copy the same index here
result = test_data.ts.reindex()
assert not (result is test_data.ts)
def test_reindex_nan():
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_series_add_nat():
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(range(15))
assert np.issubdtype(result.dtype, np.dtype('M8[ns]'))
mask = result.isna()
assert mask[-5:].all()
assert not mask[:-5].any()
def test_reindex_with_datetimes():
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_reindex_corner(test_data):
# (don't forget to fix this) I think it's fixed
test_data.empty.reindex(test_data.ts.index, method='pad') # it works
# corner case: pad empty series
reindexed = test_data.empty.reindex(test_data.ts.index, method='pad')
# pass non-Index
reindexed = test_data.ts.reindex(list(test_data.ts.index))
assert_series_equal(test_data.ts, reindexed)
# bad fill method
ts = test_data.ts[::2]
msg = (r"Invalid fill method\. Expecting pad \(ffill\), backfill"
r" \(bfill\) or nearest\. Got foo")
with pytest.raises(ValueError, match=msg):
ts.reindex(test_data.ts.index, method='foo')
def test_reindex_pad():
s = Series(np.arange(10), dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a', 'g', 'c', 'f']
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inference of new dtype
s = Series([True, False, False, True], index=list('abcd'))
new_index = 'agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=range(0, 5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False, index=range(0, 5))
assert_series_equal(result, expected)
def test_reindex_nearest():
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest',
tolerance=[1, 2, 3, 4])
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest',
tolerance=[0.3, 0.01, 0.4, 3])
expected = Series([0, np.nan, np.nan, 2], target)
assert_series_equal(expected, actual)
def test_reindex_backfill():
pass
def test_reindex_int(test_data):
ts = test_data.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(test_data.ts.index)
# if NaNs introduced
assert reindexed_int.dtype == np.float_
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
assert reindexed_int.dtype == np.int_
def test_reindex_bool(test_data):
# A series other than float, int, string, or object
ts = test_data.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(test_data.ts.index)
# if NaNs introduced
assert reindexed_bool.dtype == np.object_
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
assert reindexed_bool.dtype == np.bool_
def test_reindex_bool_pad(test_data):
# fail
ts = test_data.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(test_data.ts.index, method='pad')
assert isna(filled_bool[:5]).all()
def test_reindex_categorical():
index = date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_reindex_like(test_data):
other = test_data.ts[::2]
assert_series_equal(test_data.ts.reindex(other.index),
test_data.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013, 3, 5)
day2 = datetime(2013, 5, 5)
day3 = datetime(2014, 3, 5)
series1 = Series([5, None, None], [day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value():
# -----------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
# -----------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
assert issubclass(result.dtype.type, np.integer)
assert_series_equal(result, expected)
# -----------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
# ------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_reindex_datetimeindexes_tz_naive_and_aware():
# GH 8306
idx = date_range('20131101', tz='America/Chicago', periods=7)
newidx = date_range('20131103', periods=10, freq='H')
s = Series(range(7), index=idx)
with pytest.raises(TypeError):
s.reindex(newidx, method='ffill')
def test_reindex_empty_series_tz_dtype():
# GH 20869
result = Series(dtype='datetime64[ns, UTC]').reindex([0, 1])
expected = Series([pd.NaT] * 2, dtype='datetime64[ns, UTC]')
tm.assert_equal(result, expected)
def test_rename():
# GH 17407
s = Series(range(1, 6), index=pd.Index(range(2, 7), name='IntIndex'))
result = s.rename(str)
expected = s.rename(lambda i: str(i))
assert_series_equal(result, expected)
assert result.name == expected.name
@pytest.mark.parametrize(
'data, index, drop_labels,'
' axis, expected_data, expected_index',
[
# Unique Index
([1, 2], ['one', 'two'], ['two'],
0, [1], ['one']),
([1, 2], ['one', 'two'], ['two'],
'rows', [1], ['one']),
([1, 1, 2], ['one', 'two', 'one'], ['two'],
0, [1, 2], ['one', 'one']),
# GH 5248 Non-Unique Index
([1, 1, 2], ['one', 'two', 'one'], 'two',
0, [1, 2], ['one', 'one']),
([1, 1, 2], ['one', 'two', 'one'], ['one'],
0, [1], ['two']),
([1, 1, 2], ['one', 'two', 'one'], 'one',
0, [1], ['two'])])
def test_drop_unique_and_non_unique_index(data, index, axis, drop_labels,
expected_data, expected_index):
s = Series(data=data, index=index)
result = s.drop(drop_labels, axis=axis)
expected = Series(data=expected_data, index=expected_index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'data, index, drop_labels,'
' axis, error_type, error_desc',
[
# single string/tuple-like
(range(3), list('abc'), 'bc',
0, KeyError, 'not found in axis'),
# bad axis
(range(3), list('abc'), ('a',),
0, KeyError, 'not found in axis'),
(range(3), list('abc'), 'one',
'columns', ValueError, 'No axis named columns')])
def test_drop_exception_raised(data, index, drop_labels,
axis, error_type, error_desc):
with pytest.raises(error_type, match=error_desc):
Series(data, index=index).drop(drop_labels, axis=axis)
def test_drop_with_ignore_errors():
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
tm.assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.iloc[1:]
tm.assert_series_equal(result, expected)
# GH 8522
s = Series([2, 3], index=[True, False])
assert s.index.is_object()
result = s.drop(True)
expected = Series([3], index=[False])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('index', [[1, 2, 3], [1, 1, 3]])
@pytest.mark.parametrize('drop_labels', [[], [1], [3]])
def test_drop_empty_list(index, drop_labels):
# GH 21494
expected_index = [i for i in index if i not in drop_labels]
series = pd.Series(index=index).drop(drop_labels)
tm.assert_series_equal(series, pd.Series(index=expected_index))
@pytest.mark.parametrize('data, index, drop_labels', [
(None, [1, 2, 3], [1, 4]),
(None, [1, 2, 2], [1, 4]),
([2, 3], [0, 1], [False, True])
])
def test_drop_non_empty_list(data, index, drop_labels):
# GH 21494 and GH 16877
with pytest.raises(KeyError, match='not found in axis'):
pd.Series(data=data, index=index).drop(drop_labels)
| bsd-3-clause |
vhaasteren/piccard | testing/eptaoutput.py | 1 | 14915 | from __future__ import division
import numpy as np
import math
import scipy.linalg as sl, scipy.special as ss
import h5py as h5
import matplotlib.pyplot as plt
import os as os
import glob
import sys
import json
import tempfile
try:
import statsmodels.api as smapi
sm = smapi
except ImportError:
sm = None
"""
Given a collection of samples, return the 2-sigma confidence intervals
samples: an array of samples
sigmalevel: either 1, 2, or 3. Which sigma limit must be given
onesided: Give one-sided limits (useful for setting upper or lower limits)
"""
def confinterval(samples, sigmalevel=2, onesided=False, weights=None):
# The probabilities for different sigmas
sigma = [0.68268949, 0.95449974, 0.99730024, 0.90]
bins = 200
xmin = min(samples)
xmax = max(samples)
# If we don't have any weighting (MCMC chain), use the statsmodels package
if weights is None and sm != None:
# Create the ecdf function
ecdf = sm.distributions.ECDF(samples)
# Create the binning
x = np.linspace(xmin, xmax, bins)
y = ecdf(x)
else:
# MultiNest chain with weights or no statsmodel.api package
# hist, xedges = np.histogram(samples[:], bins=bins, range=(xmin,xmax), weights=weights, density=True)
hist, xedges = np.histogram(samples[:], bins=bins, range=(xmin,xmax), weights=weights)
x = np.delete(xedges, -1) + 0.5*(xedges[1] - xedges[0]) # This was originally 1.5*, but turns out this is a bug plotting of 'stepstyle' in matplotlib
y = np.cumsum(hist) / np.sum(hist)
# Find the intervals
if(onesided):
bound = 1 - sigma[sigmalevel-1]
else:
bound = 0.5*(1-sigma[sigmalevel-1])
x2min = x[0]
for i in range(len(y)):
if y[i] >= bound:
x2min = x[i]
break
if(onesided):
bound = sigma[sigmalevel-1]
else:
bound = 1 - 0.5 * (1 - sigma[sigmalevel-1])
x2max = x[-1]
for i in reversed(range(len(y))):
if y[i] <= bound:
x2max = x[i]
break
return x2min, x2max
"""
Given a 2D matrix of (marginalised) likelihood levels, this function returns
the 1, 2, 3- sigma levels. The 2D matrix is usually either a 2D histogram or a
likelihood scan
"""
def getsigmalevels(hist2d):
# We will draw contours with these levels
sigma1 = 0.68268949
level1 = 0
sigma2 = 0.95449974
level2 = 0
sigma3 = 0.99730024
level3 = 0
#
lik = hist2d.reshape(hist2d.size)
sortlik = np.sort(lik)
# Figure out the 1sigma level
dTotal = np.sum(sortlik)
nIndex = sortlik.size
dSum = 0
while (dSum < dTotal * sigma1):
nIndex -= 1
dSum += sortlik[nIndex]
level1 = sortlik[nIndex]
# 2 sigma level
nIndex = sortlik.size
dSum = 0
while (dSum < dTotal * sigma2):
nIndex -= 1
dSum += sortlik[nIndex]
level2 = sortlik[nIndex]
# 3 sigma level
nIndex = sortlik.size
dSum = 0
while (dSum < dTotal * sigma3):
nIndex -= 1
dSum += sortlik[nIndex]
level3 = sortlik[nIndex]
return level1, level2, level3
"""
Obtain the MCMC chain as a numpy array, and a list of parameter indices
@param chainfile: name of the MCMC file
@param parametersfile: name of the file with the parameter labels
@param mcmctype: what method was used to generate the mcmc chain (auto=autodetect)
other options are: 'emcee', 'MultiNest', 'ptmcmc'
@param nolabels: set to true if ok to print without labels
@param incextra: Whether or not we need to return the stype, pulsar, and
pso ML as well
@return: logposterior (1D), loglikelihood (1D), parameter-chain (2D), parameter-labels(1D)
"""
def ReadMCMCFile(chainfile, parametersfile=None, sampler='auto', nolabels=False,
incextra=False):
parametersfile = chainfile+'.parameters.txt'
mnparametersfile = chainfile+'.mnparameters.txt'
mnparametersfile2 = chainfile+'post_equal_weights.dat.mnparameters.txt'
ptparametersfile = chainfile+'/ptparameters.txt'
psofile = chainfile + '/pso.txt'
if not os.path.exists(psofile):
psofile = None
if sampler.lower() == 'auto':
# Auto-detect the sampler
if os.path.exists(mnparametersfile2):
chainfile = chainfile + 'post_equal_weights.dat'
mnparametersfile = mnparametersfile2
# Determine the type of sampler we've been using through the parameters
# file
if os.path.exists(mnparametersfile):
sampler = 'MultiNest'
parametersfile = mnparametersfile
chainfile = chainfile
figurefileeps = chainfile+'.fig.eps'
figurefilepng = chainfile+'.fig.png'
elif os.path.exists(ptparametersfile):
sampler = 'PTMCMC'
parametersfile = ptparametersfile
if os.path.exists(chainfile+'/chain_1.0.txt'):
figurefileeps = chainfile+'/chain_1.0.fig.eps'
figurefilepng = chainfile+'chain_1.0.fig.png'
chainfile = chainfile+'/chain_1.0.txt'
elif os.path.exists(chainfile+'/chain_1.txt'):
figurefileeps = chainfile+'/chain_1.fig.eps'
figurefilepng = chainfile+'chain_1.fig.png'
chainfile = chainfile+'/chain_1.txt'
else:
raise IOError, "No valid chain found for PTMCMC_Generic"
elif os.path.exists(parametersfile):
sampler = 'emcee'
chainfile = chainfile
figurefileeps = chainfile+'.fig.eps'
figurefilepng = chainfile+'.fig.png'
else:
if not nolabels:
raise IOError, "No valid parameters file found!"
else:
chainfile = chainfile
figurefileeps = chainfile+'.fig.eps'
figurefilepng = chainfile+'.fig.png'
sampler = 'emcee'
elif sampler.lower() == 'multinest':
if os.path.exists(mnparametersfile2):
chainfile = chainfile + 'post_equal_weights.dat'
mnparametersfile = mnparametersfile2
parametersfile = mnparametersfile
figurefileeps = chainfile+'.fig.eps'
figurefilepng = chainfile+'.fig.png'
elif sampler.lower() == 'emcee':
figurefileeps = chainfile+'.fig.eps'
figurefilepng = chainfile+'.fig.png'
elif sampler.lower() == 'ptmcmc':
parametersfile = ptparametersfile
if os.path.exists(chainfile+'/chain_1.0.txt'):
figurefileeps = chainfile+'/chain_1.0.fig.eps'
figurefilepng = chainfile+'chain_1.0.fig.png'
chainfile = chainfile+'/chain_1.0.txt'
elif os.path.exists(chainfile+'/chain_1.txt'):
figurefileeps = chainfile+'/chain_1.fig.eps'
figurefilepng = chainfile+'chain_1.fig.png'
chainfile = chainfile+'/chain_1.txt'
if not nolabels:
# Read the parameter labels
if os.path.exists(parametersfile):
parfile = open(parametersfile)
lines=[line.strip() for line in parfile]
parlabels=[]
stypes=[]
pulsars=[]
pulsarnames=[]
for i in range(len(lines)):
lines[i]=lines[i].split()
if int(lines[i][0]) >= 0:
# If the parameter has an index
parlabels.append(lines[i][5])
stypes.append(lines[i][2])
pulsars.append(int(lines[i][1]))
if len(lines[i]) > 6:
pulsarnames.append(lines[i][6])
else:
pulsarnames.append("Pulsar {0}".format(lines[i][1]))
parfile.close()
else:
raise IOError, "No valid parameters file found!"
else:
parlabels = None
stypes = []
pulsars = []
if os.path.exists(parametersfile):
chain = np.loadtxt(chainfile)
else:
raise IOError, "No valid chain-file found!"
if psofile is not None:
mldat = np.loadtxt(psofile)
mlpso = mldat[0]
mlpsopars = mldat[1:]
else:
mlpso = None
mlpsopars = None
if sampler.lower() == 'emcee':
logpost = chain[:,1]
loglik = None
samples = chain[:,2:]
elif sampler.lower() == 'multinest':
loglik = chain[:,-1]
logpost = None
samples = chain[:,:-1]
elif sampler.lower() == 'ptmcmc':
logpost = chain[:,0]
loglik = chain[:,1]
samples = chain[:,3:]
if incextra:
retvals = (logpost, loglik, samples, parlabels, \
pulsars, pulsarnames, stypes, mlpso, mlpsopars)
else:
retvals = (logpost, loglik, samples, parlabels)
return retvals
def eptaOutput(chainfile, outputdir, burnin=0, thin=1, \
parametersfile=None, sampler='auto', make1dplots=True, \
maxpages=-1):
"""
Given an MCMC chain file, and an output directory, produce an output file
with all the results EPTA-style
@param chainfile: name of the MCMC file
@param outputdir: output directory where all the plots will be saved
@param burnin: Number of steps to be considered burn-in
@param thin: Number of steps to skip in between samples (thinning)
@param parametersfile: name of the file with the parameter labels
@param sampler: What method was used to generate the mcmc chain
(auto=autodetect). Options:('emcee', 'MultiNest',
'ptmcmc')
EPTA-style format:
[1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11]
pulsarname parname1 parname2 maxL low68% up68% low90% up90% Priortype(lin/log) minP maxP
J**** DM amp 4E5 1E-14 1E-13 1E-15 1E-12 log 1E-20 1E-10
J**** DM slope -3E0 -4E0 -2E0 -5E0 -1E0 lin -1E-10 -1E1
J**** RN amp
J**** RN slope
[J**** SingS amp
J**** SingS freq
J**** SingS phase]<----those three lines for model2
J**** EFAC backname1
J**** EFAC backname2
J**** EFAC backname3
J**** EFAC backname4
....
J**** EQUAD backname1
J**** EQUAD backname2
J**** EQUAD backname3
J**** EQUAD backname4
....
"""
# Read the mcmc chain
(llf, lpf, chainf, labels, pulsarid, pulsarname, stype, mlpso, mlpsopars) = \
ReadMCMCFile(chainfile, parametersfile=parametersfile, \
sampler=sampler, incextra=True)
# Remove burn-in and thin the chain
ll = llf[burnin::thin]
lp = lpf[burnin::thin]
chain = chainf[burnin::thin, :]
# Obtain the maximum from the chain
mlind = np.argmax(lp)
mlchain = lp[mlind]
mlchainpars = chain[mlind, :]
if mlpso is None:
ml = mlchain
mlpars = None
mlpars2 = mlchainpars
else:
ml = mlpso
mlpars = mlpsopars
mlpars2 = mlchainpars
# List all varying parameters
dopar = np.array([1]*len(labels), dtype=np.bool)
table = []
for ll, label in enumerate(labels):
fields = np.empty(11, dtype='a64')
fields[0] = pulsarname[ll]
fields[3] = '0.0'
if stype[ll] == 'efac':
fields[1] = 'EFAC'
fields[8] = 'lin'
fields[9] = '0.001'
fields[10] = '50.0'
lab = labels[ll][15:]
if len(lab) > 0:
fields[2] = labels[ll][15:]
else:
fields[2] = pulsarname[ll]
elif stype[ll] == 'equad':
fields[1] = 'EQUAD'
fields[8] = 'log'
fields[9] = '-10.0'
fields[10] = '-4.0'
lab = labels[ll][16:]
if len(lab) > 0:
fields[2] = labels[ll][16:]
else:
fields[2] = pulsarname[ll]
elif stype[ll] == 'dmpowerlaw' and \
labels[ll] == 'DM-Amplitude':
fields[1] = 'DM'
fields[2] = 'amp'
fields[8] = 'log'
fields[9] = '-14.0'
fields[10] = '-6.5'
elif stype[ll] == 'dmpowerlaw' and \
labels[ll] == 'DM-spectral-index':
fields[1] = 'DM'
fields[2] = 'slope'
fields[8] = 'lin'
fields[9] = '0.02'
fields[10] = '6.98'
elif stype[ll] == 'powerlaw' and \
labels[ll] == 'RN-Amplitude':
fields[1] = 'RN'
fields[2] = 'amp'
fields[8] = 'log'
fields[9] = '-20.0'
fields[10] = '-10.0'
elif stype[ll] == 'powerlaw' and \
labels[ll] == 'RN-spectral-index':
fields[1] = 'RN'
fields[2] = 'slope'
fields[8] = 'lin'
fields[9] = '0.02'
fields[10] = '6.98'
else:
continue
fmin, fmax = confinterval(chain[:, ll], sigmalevel=1)
fields[4] = str(fmin)
fields[5] = str(fmax)
fmin, fmax = confinterval(chain[:, ll], sigmalevel=4)
fields[6] = str(fmin)
fields[7] = str(fmin)
table.append(fields)
table = np.array(table)
# Sort all the fields
newtable = np.empty(table.shape, dtype='a64')
newind = 0
# Place a new sorted version of all pulsars in the new table
psrs = set(table[:,0])
for psr in psrs:
psr_rows = (table[:,0] == psr)
newpsr = table[psr_rows,:].copy()
# First do the DM
dm_rows = (newpsr[:,1] == 'DM')
numpars = np.sum(dm_rows)
newtable[newind:newind+numpars,:] = newpsr[dm_rows,:]
newind += numpars
# Next, the red noise
rn_rows = (newpsr[:,1] == 'RN')
numpars = np.sum(rn_rows)
newtable[newind:newind+numpars,:] = newpsr[rn_rows,:]
newind += numpars
# Then, the EFAC (sorted)
efac_rows = (newpsr[:,1] == 'EFAC')
efacpars = newpsr[efac_rows,:].copy()
numpars = np.sum(efac_rows)
efac_ind_srt = np.argsort(efacpars[:,2])
newtable[newind:newind+numpars,:] = efacpars[efac_ind_srt,:]
newind += numpars
# Finally, the EQUAD (sorted)
efac_rows = newpsr[:,1] == 'EQUAD'
efacpars = newpsr[efac_rows,:].copy()
numpars = np.sum(efac_rows)
efac_ind_srt = np.argsort(efacpars[:,2])
newtable[newind:newind+numpars,:] = efacpars[efac_ind_srt,:]
newind += numpars
eptafilename = outputdir + '/eptafile.txt'
eptafile = open(eptafilename, 'w')
for ii in range(newtable.shape[0]):
eptafile.write('\t'.join(['{0}'.format(newtable[ii,jj]) \
for jj in range(newtable.shape[1])]))
eptafile.write('\n')
eptafile.close()
return newtable
| gpl-3.0 |
pnedunuri/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
joernhees/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
amyshi188/osf.io | scripts/annotate_rsvps.py | 60 | 2256 | """Utilities for annotating workshop RSVP data.
Example ::
import pandas as pd
from scripts import annotate_rsvps
frame = pd.read_csv('workshop.csv')
annotated = annotate_rsvps.process(frame)
annotated.to_csv('workshop-annotated.csv')
"""
import re
import logging
from dateutil.parser import parse as parse_date
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from website.models import User, Node, NodeLog
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def find_by_email(email):
try:
return User.find_one(Q('username', 'iexact', email))
except ModularOdmException:
return None
def find_by_name(name):
try:
parts = re.split(r'\s+', name.strip())
except:
return None
if len(parts) < 2:
return None
users = User.find(
reduce(
lambda acc, value: acc & value,
[
Q('fullname', 'icontains', part.decode('utf-8', 'ignore'))
for part in parts
]
)
).sort('-date_created')
if not users:
return None
if len(users) > 1:
logger.warn('Multiple users found for name {}'.format(name))
return users[0]
def logs_since(user, date):
return NodeLog.find(
Q('user', 'eq', user._id) &
Q('date', 'gt', date)
)
def nodes_since(user, date):
return Node.find(
Q('creator', 'eq', user._id) &
Q('date_created', 'gt', date)
)
def process(frame):
frame = frame.copy()
frame['user_id'] = ''
frame['user_logs'] = ''
frame['user_nodes'] = ''
frame['last_log'] = ''
for idx, row in frame.iterrows():
user = (
find_by_email(row['Email address'].strip()) or
find_by_name(row['Name'])
)
if user:
date = parse_date(row['Workshop_date'])
frame.loc[idx, 'user_id'] = user._id
logs = logs_since(user, date)
frame.loc[idx, 'user_logs'] = logs.count()
frame.loc[idx, 'user_nodes'] = nodes_since(user, date).count()
if logs:
frame.loc[idx, 'last_log'] = logs.sort('-date')[0].date.strftime('%c')
return frame
| apache-2.0 |
samuelleblanc/flight_planning | moving_lines_v2.py | 1 | 6917 | from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import Tkinter as tk
import numpy as np
from mpl_toolkits.basemap import Basemap
import datetime
import map_utils as mu
import excel_interface as ex
import map_interactive as mi
import gui
version = 'v0.5beta'
def Create_gui(vertical=True):
'Program to set up gui interaction with figure embedded'
class ui:
pass
ui = ui
ui.root = tk.Tk()
ui.root.wm_title('Flight planning by Samuel LeBlanc, NASA Ames, '+version)
ui.root.geometry('900x950')
ui.top = tk.Frame(ui.root)
ui.bot = tk.Frame(ui.root)
if vertical:
ui.top.pack(side=tk.LEFT,expand=False)
ui.bot.pack(side=tk.RIGHT,fill=tk.BOTH,expand=True)
else:
ui.top.pack(side=tk.TOP,expand=False)
ui.bot.pack(side=tk.BOTTOM,fill=tk.BOTH,expand=True)
ui.fig = Figure()
ui.ax1 = ui.fig.add_subplot(111)
ui.canvas = FigureCanvasTkAgg(ui.fig,master=ui.root)
ui.canvas.show()
ui.canvas.get_tk_widget().pack(in_=ui.bot,side=tk.BOTTOM,fill=tk.BOTH,expand=1)
ui.tb = NavigationToolbar2TkAgg(ui.canvas,ui.root)
ui.tb.pack(in_=ui.bot,side=tk.BOTTOM)
ui.tb.update()
ui.canvas._tkcanvas.pack(in_=ui.bot,side=tk.TOP,fill=tk.BOTH,expand=1)
return ui
def build_buttons(ui,lines,vertical=True):
'Program to set up the buttons'
import gui
import Tkinter as tk
from matplotlib.colors import cnames
if vertical:
side = tk.TOP
h = 2
w = 20
else:
side = tk.LEFT
h = 20
w = 2
g = gui.gui(lines,root=ui.root,noplt=True)
g.refresh = tk.Button(g.root,text='Refresh',
command=g.refresh,
bg='chartreuse')
g.bopenfile = tk.Button(g.root,text='Open Excel file',
command=g.gui_open_xl)
g.bsavexl = tk.Button(g.root,text='Save Excel file',
command=g.gui_save_xl)
g.bsavetxt = tk.Button(g.root,text='Save text file',
command=g.gui_save_txt)
g.bsaveas2kml = tk.Button(g.root,text='SaveAs to Kml',
command=g.gui_saveas2kml)
g.bsave2kml = tk.Button(g.root,text='Update Kml',
command=g.gui_save2kml)
g.bsave2gpx = tk.Button(g.root,text='Save to GPX',
command=g.gui_save2gpx)
g.refresh.pack(in_=ui.top,side=side,fill=tk.X,pady=8)
g.bopenfile.pack(in_=ui.top,side=side)
g.bsavexl.pack(in_=ui.top,side=side)
g.bsavetxt.pack(in_=ui.top,side=side)
g.bsaveas2kml.pack(in_=ui.top,side=side)
g.bsave2kml.pack(in_=ui.top,side=side)
g.bsave2gpx.pack(in_=ui.top,side=side)
tk.Frame(g.root,height=h,width=w,bg='black',relief='sunken'
).pack(in_=ui.top,side=side,padx=8,pady=5)
g.bplotalt = tk.Button(g.root,text='Plot alt vs time',
command=g.gui_plotalttime)
g.bplotalt.pack(in_=ui.top,side=side)
g.bplotsza = tk.Button(g.root,text='Plot SZA',
command=g.gui_plotsza)
g.bplotsza.pack(in_=ui.top,side=side)
tk.Frame(g.root,height=h,width=w,bg='black',relief='sunken'
).pack(in_=ui.top,side=side,padx=8,pady=5)
g.frame_select = tk.Frame(g.root,relief=tk.SUNKEN,bg='white')
g.frame_select.pack(in_=ui.top,side=side,fill=tk.BOTH)
g.flightselect_arr = []
g.flightselect_arr.append(tk.Radiobutton(g.root,text=lines.ex.name,
fg=lines.ex.color,
variable=g.iactive,value=0,
indicatoron=0,
command=g.gui_changeflight,
state=tk.ACTIVE))
g.flightselect_arr[0].pack(in_=g.frame_select,side=side,padx=4,pady=2,fill=tk.BOTH)
g.flightselect_arr[0].select()
g.newflightpath = tk.Button(g.root,text='New flight path',
command = g.gui_newflight)
g.newflightpath.pack(in_=ui.top,padx=5,pady=5)
tk.Frame(g.root,height=h,width=w,bg='black',relief='sunken'
).pack(in_=ui.top,side=side,padx=8,pady=5)
g.baddsat = tk.Button(g.root,text='Add Satellite tracks',
command = g.gui_addsat)
g.baddsat.pack(in_=ui.top)
g.baddbocachica = tk.Button(g.root,text='Add Forecast\nfrom Bocachica',
command = g.gui_addbocachica)
g.baddbocachica.pack(in_=ui.top)
g.baddfigure = tk.Button(g.root,text='Add Forecast\nfrom image',
command = g.gui_addfigure)
g.baddfigure.pack(in_=ui.top)
tk.Frame(g.root,height=h,width=w,bg='black',relief='sunken'
).pack(in_=ui.top,side=side,padx=8,pady=5)
tk.Button(g.root,text='Quit',command=g.stopandquit,bg='lightcoral'
).pack(in_=ui.top,side=side)
ui.g = g
def get_datestr(ui):
import tkSimpleDialog
from datetime import datetime
import re
ui.datestr = tkSimpleDialog.askstring('Flight Date','Flight Date (yyyy-mm-dd):')
if not ui.datestr:
ui.datestr = datetime.utcnow().strftime('%Y-%m-%d')
else:
while not re.match('[0-9]{4}-[0-9]{2}-[0-9]{2}',ui.datestr):
ui.datestr = tkSimpleDialog.askstring('Flight Date',
'Bad format, please retry!\nFlight Date (yyyy-mm-dd):')
if not ui.datestr:
ui.datestr = datetime.utcnow().strftime('%Y-%m-%d')
ui.ax1.set_title(ui.datestr)
def savetmp(ui,wb):
import tempfile, os
tmpfilename = os.path.join(tempfile.gettempdir(),ui.datestr+'.xlsx')
try:
wb.save2xl(tmpfilename)
except:
print 'unable to save excel to temp file:'+tmpfilename
print 'continuing ...'
def init_plot(m,color='red'):
lat0,lon0 = mi.pll('22 58.783S'), mi.pll('14 38.717E')
x0,y0 = m(lon0,lat0)
line, = m.plot([x0],[y0],'o-',color=color,linewidth=3)
text = ('Press s to stop interaction\\n'
'Press i to restart interaction\\n')
return line
def Create_interaction(test=False,**kwargs):
ui = Create_gui()
m = mi.build_basemap(ax=ui.ax1)
line = init_plot(m,color='red')
flabels = 'labels.txt'
faero = 'aeronet_locations.txt'
try:
mi.plot_map_labels(m,flabels)
mi.plot_map_labels(m,faero,marker='*',skip_lines=2,color='y')
except:
print 'Label files not found!'
get_datestr(ui)
wb = ex.dict_position(datestr=ui.datestr,color=line.get_color(),**kwargs)
lines = mi.LineBuilder(line,m=m,ex=wb,tb=ui.tb)
savetmp(ui,wb)
build_buttons(ui,lines)
if not test:
ui.root.mainloop()
return lines,ui
if __name__ == "__main__":
lines,ui = Create_interaction(test=True)
| gpl-3.0 |
hsaputra/tensorflow | tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py | 10 | 12183 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN's estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl as estimator
from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses
from tensorflow.contrib.learn.python.learn.learn_io import graph_io
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def generator_fn(noise_dict, mode):
del mode
noise = noise_dict['x']
return layers.fully_connected(noise, noise.shape[1].value)
def discriminator_fn(data, _):
return layers.fully_connected(data, 1)
def mock_head(testcase, expected_generator_inputs, expected_real_data,
generator_scope_name):
"""Returns a mock head that validates logits values and variable names."""
discriminator_scope_name = 'Discriminator' # comes from TFGAN defaults
generator_var_names = set([
'%s/fully_connected/weights:0' % generator_scope_name,
'%s/fully_connected/biases:0' % generator_scope_name])
discriminator_var_names = set([
'%s/fully_connected/weights:0' % discriminator_scope_name,
'%s/fully_connected/biases:0' % discriminator_scope_name])
def _create_estimator_spec(features, mode, logits, labels):
gan_model = logits # renaming for clarity
is_predict = mode == model_fn_lib.ModeKeys.PREDICT
testcase.assertIsNone(features)
testcase.assertIsNone(labels)
testcase.assertIsInstance(gan_model, namedtuples.GANModel)
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
expected_var_names = (generator_var_names if is_predict else
generator_var_names | discriminator_var_names)
testcase.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
assertions = []
def _or_none(x):
return None if is_predict else x
testcase.assertEqual(expected_generator_inputs, gan_model.generator_inputs)
# TODO(joelshor): Add check on `generated_data`.
testcase.assertItemsEqual(
generator_var_names,
set([x.name for x in gan_model.generator_variables]))
testcase.assertEqual(generator_scope_name, gan_model.generator_scope.name)
testcase.assertEqual(_or_none(expected_real_data), gan_model.real_data)
# TODO(joelshor): Add check on `discriminator_real_outputs`.
# TODO(joelshor): Add check on `discriminator_gen_outputs`.
if is_predict:
testcase.assertIsNone(gan_model.discriminator_scope)
else:
testcase.assertEqual(discriminator_scope_name,
gan_model.discriminator_scope.name)
testcase.assertEqual(_or_none(discriminator_fn), gan_model.discriminator_fn)
with ops.control_dependencies(assertions):
if mode == model_fn_lib.ModeKeys.TRAIN:
return model_fn_lib.EstimatorSpec(
mode=mode, loss=array_ops.zeros([]),
train_op=control_flow_ops.no_op(), training_hooks=[])
elif mode == model_fn_lib.ModeKeys.EVAL:
return model_fn_lib.EstimatorSpec(
mode=mode, predictions=gan_model.generated_data,
loss=array_ops.zeros([]))
elif mode == model_fn_lib.ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
mode=mode, predictions=gan_model.generated_data)
else:
testcase.fail('Invalid mode: {}'.format(mode))
head = test.mock.NonCallableMagicMock(spec=head_lib._Head)
head.create_estimator_spec = test.mock.MagicMock(
wraps=_create_estimator_spec)
return head
class GANModelFnTest(test.TestCase):
"""Tests that _gan_model_fn passes expected logits to mock head."""
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_logits_helper(self, mode):
"""Tests that the expected logits are passed to mock head."""
with ops.Graph().as_default():
training_util.get_or_create_global_step()
generator_inputs = {'x': array_ops.zeros([5, 4])}
real_data = (None if mode == model_fn_lib.ModeKeys.PREDICT else
array_ops.zeros([5, 4]))
generator_scope_name = 'generator'
head = mock_head(self,
expected_generator_inputs=generator_inputs,
expected_real_data=real_data,
generator_scope_name=generator_scope_name)
estimator_spec = estimator._gan_model_fn(
features=generator_inputs,
labels=real_data,
mode=mode,
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_scope_name=generator_scope_name,
head=head)
with monitored_session.MonitoredTrainingSession(
checkpoint_dir=self._model_dir) as sess:
if mode == model_fn_lib.ModeKeys.TRAIN:
sess.run(estimator_spec.train_op)
elif mode == model_fn_lib.ModeKeys.EVAL:
sess.run(estimator_spec.loss)
elif mode == model_fn_lib.ModeKeys.PREDICT:
sess.run(estimator_spec.predictions)
else:
self.fail('Invalid mode: {}'.format(mode))
def test_logits_predict(self):
self._test_logits_helper(model_fn_lib.ModeKeys.PREDICT)
def test_logits_eval(self):
self._test_logits_helper(model_fn_lib.ModeKeys.EVAL)
def test_logits_train(self):
self._test_logits_helper(model_fn_lib.ModeKeys.TRAIN)
# TODO(joelshor): Add pandas test.
class GANEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size,
lr_decay=False):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([x for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim])
def test_numpy_input_fn_lrdecay(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim],
lr_decay=True)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dim = 4
batch_size = 6
data = np.zeros([batch_size, input_dim])
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
'y': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(
serialized_examples, feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
prediction_size=[batch_size, input_dim])
if __name__ == '__main__':
test.main()
| apache-2.0 |
Eric89GXL/vispy | vispy/util/check_environment.py | 2 | 1779 | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import os
from distutils.version import LooseVersion
from vispy.util import use_log_level
def has_matplotlib(version='1.2'):
"""Determine if mpl is a usable version"""
try:
import matplotlib
except Exception:
has_mpl = False
else:
if LooseVersion(matplotlib.__version__) >= LooseVersion(version):
has_mpl = True
else:
has_mpl = False
return has_mpl
def has_skimage(version='0.11'):
"""Determine if scikit-image is a usable version"""
try:
import skimage
except ImportError:
return False
sk_version = LooseVersion(skimage.__version__)
return sk_version >= LooseVersion(version)
def has_backend(backend, has=(), capable=(), out=()):
from ..app.backends import BACKENDMAP
using = os.getenv('_VISPY_TESTING_APP', None)
if using is not None and using != backend:
# e.g., we are on a 'pyglet' run but the test requires PyQt4
ret = (False,) if len(out) > 0 else False
for o in out:
ret += (None,)
return ret
# let's follow the standard code path
module_name = BACKENDMAP[backend.lower()][1]
with use_log_level('warning', print_msg=False):
mod = __import__('app.backends.%s' % module_name, globals(), level=2)
mod = getattr(mod.backends, module_name)
good = mod.testable
for h in has:
good = (good and getattr(mod, 'has_%s' % h))
for cap in capable:
good = (good and mod.capability[cap])
ret = (good,) if len(out) > 0 else good
for o in out:
ret += (getattr(mod, o),)
return ret
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e443.py | 2 | 18533 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
425: FF auto encoder with single appliance (Fridge)
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 5000
N_SEQ_PER_BATCH = 64
SEQ_LENGTH = 1024
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['washer dryer', 'washing machine'],
'hair straighteners',
'television',
'dish washer',
['fridge freezer', 'fridge', 'freezer']
],
max_appliance_powers=[2400, 500, 200, 2500, 200],
# max_input_power=200,
max_diff=200,
on_power_thresholds=[5] * 5,
min_on_durations=[1800, 60, 60, 1800, 60],
min_off_durations=[600, 12, 12, 1800, 12],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.75,
skip_probability_for_first_appliance=0.2,
one_target_per_seq=False,
n_seq_per_batch=N_SEQ_PER_BATCH,
# subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=False,
target_is_prediction=False,
# independently_center_inputs=True,
standardise_input=True,
standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False,
# two_pass=True,
# clock_type='ramp',
# clock_period=SEQ_LENGTH
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
2000: 1e-3,
10000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{ # MIDDLE LAYER
'type': DenseLayer,
'num_units': 32,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_b(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{ # MIDDLE LAYER
'type': DenseLayer,
'num_units': 32,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_c(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{ # MIDDLE LAYER
'type': DenseLayer,
'num_units': 32,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 32,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 32,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{ # MIDDLE LAYER
'type': DenseLayer,
'num_units': 32,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 2,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{ # MIDDLE LAYER
'type': DenseLayer,
'num_units': 32,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_f(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 3,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 3,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{ # MIDDLE LAYER
'type': DenseLayer,
'num_units': 32,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def exp_g(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 8,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 8,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{ # MIDDLE LAYER
'type': DenseLayer,
'num_units': 32,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('defg')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=20000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e443.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
ellisdg/3DUnetCNN | unet3d/scripts/train.py | 1 | 10228 | import os
import argparse
import pandas as pd
import numpy as np
from unet3d.train import run_training
from unet3d.utils.filenames import wrapped_partial, generate_filenames, load_bias, load_sequence
from unet3d.utils.sequences import (WholeVolumeToSurfaceSequence, HCPRegressionSequence, ParcelBasedSequence,
WindowedAutoEncoderSequence)
from unet3d.utils.pytorch.dataset import (WholeBrainCIFTI2DenseScalarDataset, HCPRegressionDataset, AEDataset,
WholeVolumeSegmentationDataset, WindowedAEDataset)
from unet3d.utils.utils import load_json, in_config, dump_json
from unet3d.utils.custom import get_metric_data_from_config
from unet3d.models.keras.resnet.resnet import compare_scores
from unet3d.scripts.predict import format_parser as format_prediction_args
from unet3d.scripts.predict import run_inference
from unet3d.scripts.script_utils import get_machine_config, add_machine_config_to_parser
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--config_filename", required=True,
help="JSON configuration file specifying the parameters for model training.")
parser.add_argument("--model_filename",
help="Location to save the model during and after training. If this filename exists "
"prior to training, the model will be loaded from the filename.",
required=True)
parser.add_argument("--training_log_filename",
help="CSV filename to save the to save the training and validation results for each epoch.",
required=True)
parser.add_argument("--fit_gpu_mem", type=float,
help="Specify the amount of gpu memory available on a single gpu and change the image size to "
"fit into gpu memory automatically. Will try to find the largest image size that will fit "
"onto a single gpu. The batch size is overwritten and set to the number of gpus available."
" The new image size will be written to a new config file ending named "
"'<original_config>_auto.json'. This option is experimental and only works with the UNet "
"model. It has only been tested with gpus that have 12GB and 32GB of memory.")
parser.add_argument("--group_average_filenames")
add_machine_config_to_parser(parser)
subparsers = parser.add_subparsers(help="sub-commands", dest='sub_command')
prediction_parser = subparsers.add_parser(name="predict",
help="Run prediction after the model has finished training")
format_prediction_args(prediction_parser, sub_command=True)
args = parser.parse_args()
return args
def check_hierarchy(config):
if in_config("labels", config["sequence_kwargs"]) and in_config("use_label_hierarchy", config["sequence_kwargs"]):
config["sequence_kwargs"].pop("use_label_hierarchy")
labels = config["sequence_kwargs"].pop("labels")
new_labels = list()
while len(labels):
new_labels.append(labels)
labels = labels[1:]
config["sequence_kwargs"]["labels"] = new_labels
def compute_unet_number_of_voxels(window, channels, n_layers):
n_voxels = 0
for i in range(n_layers):
n_voxels = n_voxels + ((1/(2**(3*i))) * window[0] * window[1] * window[2] * channels * 2**i * 2)
return n_voxels
def compute_window_size(step, step_size, ratios):
step_ratios = np.asarray(ratios) * step * step_size
mod = np.mod(step_ratios, step_size)
return np.asarray(step_ratios - mod + np.round(mod / step_size) * step_size, dtype=int)
def update_config_to_fit_gpu_memory(config, n_gpus, gpu_memory, output_filename, voxels_per_gb=17000000.0,
ratios=(1.22, 1.56, 1.0)):
max_voxels = voxels_per_gb * gpu_memory
n_layers = len(config["model_kwargs"]["encoder_blocks"])
step_size = 2**(n_layers - 1)
step = 1
window = compute_window_size(step, step_size, ratios)
n_voxels = compute_unet_number_of_voxels(window, config["model_kwargs"]["base_width"], n_layers)
while n_voxels <= max_voxels:
step = step + 1
window = compute_window_size(step, step_size, ratios)
n_voxels = compute_unet_number_of_voxels(window, config["model_kwargs"]["base_width"], n_layers)
window = compute_window_size(step - 1, step_size, ratios).tolist()
print("Setting window size to {} x {} x {}".format(*window))
print("Setting batch size to", n_gpus)
config["window"] = window
config["model_kwargs"]["input_shape"] = window
config["batch_size"] = n_gpus
config["validation_batch_size"] = n_gpus
print("Writing new configuration file:", output_filename)
dump_json(config, output_filename)
def main():
import nibabel as nib
nib.imageglobals.logger.level = 40
namespace = parse_args()
print("Config: ", namespace.config_filename)
config = load_json(namespace.config_filename)
if "package" in config:
package = config["package"]
else:
package = "keras"
if "metric_names" in config and not config["n_outputs"] == len(config["metric_names"]):
raise ValueError("n_outputs set to {}, but number of metrics is {}.".format(config["n_outputs"],
len(config["metric_names"])))
print("Model: ", namespace.model_filename)
print("Log: ", namespace.training_log_filename)
system_config = get_machine_config(namespace)
if namespace.fit_gpu_mem and namespace.fit_gpu_mem > 0:
update_config_to_fit_gpu_memory(config=config, n_gpus=system_config["n_gpus"], gpu_memory=namespace.fit_gpu_mem,
output_filename=namespace.config_filename.replace(".json", "_auto.json"))
if namespace.group_average_filenames is not None:
group_average = get_metric_data_from_config(namespace.group_average_filenames, namespace.config_filename)
model_metrics = [wrapped_partial(compare_scores, comparison=group_average)]
metric_to_monitor = "compare_scores"
else:
model_metrics = []
if config['skip_validation']:
metric_to_monitor = "loss"
else:
metric_to_monitor = "val_loss"
if config["skip_validation"]:
groups = ("training",)
else:
groups = ("training", "validation")
for name in groups:
key = name + "_filenames"
if key not in config:
config[key] = generate_filenames(config, name, system_config)
if "directory" in system_config:
directory = system_config.pop("directory")
else:
directory = "."
if "sequence" in config:
sequence_class = load_sequence(config["sequence"])
elif "_wb_" in os.path.basename(namespace.config_filename):
if "package" in config and config["package"] == "pytorch":
if config["sequence"] == "AEDataset":
sequence_class = AEDataset
elif config["sequence"] == "WholeVolumeSegmentationDataset":
sequence_class = WholeVolumeSegmentationDataset
else:
sequence_class = WholeBrainCIFTI2DenseScalarDataset
else:
sequence_class = WholeVolumeToSurfaceSequence
elif config["sequence"] == "WindowedAutoEncoderSequence":
sequence_class = WindowedAutoEncoderSequence
elif config["sequence"] == "WindowedAEDataset":
sequence_class = WindowedAEDataset
elif "_pb_" in os.path.basename(namespace.config_filename):
sequence_class = ParcelBasedSequence
config["sequence_kwargs"]["parcellation_template"] = os.path.join(
directory, config["sequence_kwargs"]["parcellation_template"])
else:
if config["package"] == "pytorch":
sequence_class = HCPRegressionDataset
else:
sequence_class = HCPRegressionSequence
if "bias_filename" in config and config["bias_filename"] is not None:
bias = load_bias(config["bias_filename"])
else:
bias = None
check_hierarchy(config)
if in_config("add_contours", config["sequence_kwargs"], False):
config["n_outputs"] = config["n_outputs"] * 2
if sequence_class == ParcelBasedSequence:
target_parcels = config["sequence_kwargs"].pop("target_parcels")
for target_parcel in target_parcels:
config["sequence_kwargs"]["target_parcel"] = target_parcel
print("Training on parcel: {}".format(target_parcel))
if type(target_parcel) == list:
parcel_id = "-".join([str(i) for i in target_parcel])
else:
parcel_id = str(target_parcel)
_training_log_filename = namespace.training_log_filename.replace(".csv", "_{}.csv".format(parcel_id))
if os.path.exists(_training_log_filename):
_training_log = pd.read_csv(_training_log_filename)
if (_training_log[metric_to_monitor].values.argmin()
<= len(_training_log) - int(config["early_stopping_patience"])):
print("Already trained")
continue
run_training(package,
config,
namespace.model_filename.replace(".h5", "_{}.h5".format(parcel_id)),
_training_log_filename,
sequence_class=sequence_class,
model_metrics=model_metrics,
metric_to_monitor=metric_to_monitor,
**system_config)
else:
run_training(package, config, namespace.model_filename, namespace.training_log_filename,
sequence_class=sequence_class,
model_metrics=model_metrics, metric_to_monitor=metric_to_monitor, bias=bias, **system_config)
if namespace.sub_command == "predict":
run_inference(namespace)
if __name__ == '__main__':
main()
| mit |
erikness/AlephOne | zipline/sources/data_frame_source.py | 2 | 4639 |
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to generate data sources.
"""
import pandas as pd
from zipline.gens.utils import hash_args
from zipline.sources.data_source import DataSource
class DataFrameSource(DataSource):
"""
Yields all events in event_list that match the given sid_filter.
If no event_list is specified, generates an internal stream of events
to filter. Returns all events if filter is None.
Configuration options:
sids : list of values representing simulated internal sids
start : start date
delta : timedelta between internal events
filter : filter to remove the sids
"""
def __init__(self, data, **kwargs):
assert isinstance(data.index, pd.tseries.index.DatetimeIndex)
self.data = data
# Unpack config dictionary with default values.
self.sids = kwargs.get('sids', data.columns)
self.start = kwargs.get('start', data.index[0])
self.end = kwargs.get('end', data.index[-1])
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt, series in self.data.iterrows():
for sid, price in series.iteritems():
if sid in self.sids:
event = {
'dt': dt,
'sid': sid,
'price': price,
'volume': 1000,
}
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
class DataPanelSource(DataSource):
"""
Yields all events in event_list that match the given sid_filter.
If no event_list is specified, generates an internal stream of events
to filter. Returns all events if filter is None.
Configuration options:
sids : list of values representing simulated internal sids
start : start date
delta : timedelta between internal events
filter : filter to remove the sids
"""
def __init__(self, data, **kwargs):
assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex)
self.data = data
# Unpack config dictionary with default values.
self.sids = kwargs.get('sids', data.items)
self.start = kwargs.get('start', data.major_axis[0])
self.end = kwargs.get('end', data.major_axis[-1])
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
@property
def mapping(self):
mapping = {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
# Add additional fields.
for field_name in self.data.minor_axis:
if field_name in ['price', 'volume', 'dt', 'sid']:
continue
mapping[field_name] = (lambda x: x, field_name)
return mapping
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt in self.data.major_axis:
df = self.data.major_xs(dt)
for sid, series in df.iteritems():
if sid in self.sids:
event = {
'dt': dt,
'sid': sid,
}
for field_name, value in series.iteritems():
event[field_name] = value
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
| apache-2.0 |
mikeireland/pynrm | pynrm/rl_deconv.py | 1 | 7547 | """This makes a simple Richardson-Lucy deconvolution on a cleaned data cube, with
some reference calibrator images. Input data have to be neatly packaged in a single
data cube.
To make a "good_ims.fits" file, run "choose_psfs.py" after cleaning the data
(e.g. with process_block called in a script go.py or run_clean)."""
from __future__ import print_function, division
import astropy.io.fits as pyfits
import numpy as np
import sys
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pdb
import aplpy
import opticstools as ot
plt.ion()
def rl_deconv(tgt_fn=None, cal_fn=None, good_ims_fn=None, niter=50):
"""Deconvolve a target using RL deconvolution and either a pair of target and
calibrator cubes, or a set of manually selected "good" files
Parameters
----------
tgt_fn: string
Name of the target cube file
cal_fn: string
Name of the calibrator cube file
good_ims_fn: string
Name of the "good_ims" filename, from choose_psfs.
"""
if good_ims_fn is None:
header = pyfits.getheader(tgt_fn)
radec = [header['RA'],header['DEC']]
pa = np.mean(pyfits.getdata(tgt_fn,1)['pa'])
tgt_ims = pyfits.getdata(tgt_fn)
cal_ims = pyfits.getdata(cal_fn)
else:
header = pyfits.getheader(good_ims_fn)
radec = [header['RA'],header['DEC']]
pas = pyfits.getdata(good_ims_fn,2)['pa']
#Check for too much sky rotation.
pa_diffs = pas - pas[0]
pa_diffs = ((pa_diffs + 180) % 360) - 180
if np.max(np.abs(pa_diffs)) > 30:
raise UserWarning("Too much sky rotation! Re-write code or reduce number of files.")
#Average the pas modulo 360
pa = pas[0] + np.mean(pa_diffs)
tgt_ims = pyfits.getdata(good_ims_fn, 0)
cal_ims = pyfits.getdata(good_ims_fn, 1)
subtract_median=True
sz = tgt_ims.shape[1]
best_models = np.zeros( tgt_ims.shape )
best_rms = np.zeros( tgt_ims.shape[0] )
if subtract_median:
for i in range(len(cal_ims)):
for j in range(len(cal_ims[i])):
cal_ims[i][j] -= np.median(cal_ims[i])
for i in range(len(tgt_ims)):
for j in range(len(tgt_ims[i])):
tgt_ims[i][j] -= np.median(tgt_ims[i])
#Loop through all target images, and make the best deconvolution possible for each image.
for i in range(tgt_ims.shape[0]):
print("Working on image {0:d}".format(i))
#Create a blank model image
model_ims = np.zeros( cal_ims.shape )
#Create a blank array of RMS of the model fits to the data
rms = np.zeros( cal_ims.shape[0] )
#Extract the data image from the cube and normalise it
data = tgt_ims[i,:,:]
data /= np.sum(data)
#In order for RL deconvolution to work, we need to put in a background offset for
#flux. We'll subtract this at the end.
data += 1.0/data.size
#Find the peak pixel in the data.
max_ix_data = np.argmax(data)
max_ix_data = np.unravel_index(max_ix_data,data.shape)
#Try to deconvolve with each calibrator image one at a time.
for j in range(cal_ims.shape[0]):
#Extract and normalise the Point-Spread Function
psf = cal_ims[j,:,:]
psf /= np.sum(psf)
#Find the maximum pixel for the PSF, and roll the PSF so that (0,0) is the
#peak pixel.
max_ix = np.argmax(psf)
max_ix = np.unravel_index(max_ix, psf.shape)
psf = np.roll(np.roll(psf, -max_ix[0], axis=0), -max_ix[1],axis=1)
#To save computational time, pre-compute the Fourier transform of the PSF
psf_ft = np.fft.rfft2(psf)
#The initial model just has a "star" at the location of the data maximum
model = np.zeros(data.shape)
model += 1.0/data.size
model[max_ix_data] = 1.0
#Do the RL magical algorithm. See
for k in range(niter):
# u (convolved) p is our model of the data. Compute this first.
model_convolved = np.fft.irfft2(np.fft.rfft2(model)*psf_ft)
# Update the model according to the RL algorithm
model *= np.fft.irfft2(np.fft.rfft2(data / model_convolved)*np.conj(psf_ft))
model_convolved = np.fft.irfft2(np.fft.rfft2(model)*psf_ft)
#Record the RMS difference between the model and the data.
rms[j] = np.sqrt(np.mean( (model_convolved - data)**2)) * data.size
#Subtract off our offset.
model -= 1.0/data.size
#Shift the final model to the middle, so we can add together target images on
#different pixel coordinates
model_ims[j,:,:] = np.roll(np.roll(model,sz//2-max_ix_data[0], axis=0), sz//2-max_ix_data[1], axis=1)
#Only use the calibrator with the best RMS. i.e. we assume this is the best PSF for our data.
best_cal = np.argmin(rms)
best_models[i,:,:] = model_ims[best_cal,:,:]
best_rms[i] = rms[best_cal]
ptsrc_fluxes = best_models[:,sz//2,sz//2].copy()
#set the central pixel to zero.
best_models[:,sz//2,sz//2]=0
final_image = np.mean(best_models,axis=0)
image = final_image/np.max(final_image)
image_sub = image - np.roll(np.roll(image[::-1,::-1],1,axis=0),1,axis=1)
image[sz//2,sz//2]=1.0
image_sub[sz//2,sz//2]=1.0
plt.imshow(np.arcsinh(image/0.1), interpolation='nearest', cmap=cm.cubehelix)
plt.plot(sz//2,sz//2, 'r*', markersize=20)
tic_min = np.min(image)
tic_max = np.max(image)
tics = np.arcsinh(tic_min/0.1) + np.arange(8)/7.0*(np.arcsinh(tic_max/0.1) - np.arcsinh(tic_min/0.1))
tics = np.sinh(tics)*0.1
hdu = pyfits.PrimaryHDU(image)
costerm = np.cos(np.radians(pa))*0.01/3600.
sinterm = np.sin(np.radians(pa))*0.01/3600.
hdu.header['CRVAL1']=radec[0]
hdu.header['CRVAL2']=radec[1]
hdu.header['CTYPE1']='RA---TAN'
hdu.header['CTYPE2']='DEC--TAN'
hdu.header['CRPIX1']=sz//2
hdu.header['CRPIX2']=sz//2
hdu.header['CD1_1']=-costerm
hdu.header['CD2_2']=costerm
hdu.header['CD1_2']=sinterm
hdu.header['CD2_1']=sinterm
#hdu.header['RADECSYS']='FK5'
hdulist = pyfits.HDUList([hdu])
hdu.data = image
hdulist.writeto('deconv_image.fits', clobber=True)
fig = aplpy.FITSFigure('deconv_image.fits')
fig.show_colorscale(cmap=cm.cubehelix, stretch='arcsinh',vmax=1, vmid=0.05)
fig.add_colorbar()
fig.add_grid()
hdu.data=image_sub
hdulist.writeto('deconv_image_sub.fits', clobber=True)
fig2 = aplpy.FITSFigure('deconv_image_sub.fits')
fig2.show_colorscale(cmap=cm.cubehelix, stretch='arcsinh',vmax=1, vmid=0.05)
fig2.add_colorbar()
fig2.add_grid()
fig3 = aplpy.FITSFigure('deconv_image.fits')
fig3.show_colorscale(cmap=cm.cubehelix, stretch='linear',vmax=1, vmin=0.0)
fig3.add_colorbar()
fig3.add_grid()
plt.figure(1)
plt.clf()
rr, ii = ot.azimuthalAverage(image,returnradii=True,center=[64,64],binsize=0.7)
plt.plot(rr*0.01,ii)
plt.axis([0,.3,-0.05,0.8])
plt.xlabel('Radius (arcsec)')
plt.ylabel('Azi. Ave. Intensity (rel. to disk peak)')
plt.plot([0.11,0.11],[-0.1,1],'r')
plt.plot([0.17,0.17],[-0.1,1],'r')
plt.annotate("Companion Radius", [0.11,0.6],[0.18,0.6],arrowprops={"arrowstyle":"->"})
plt.annotate("Wall Radius", [0.17,0.3],[0.2,0.3],arrowprops={"arrowstyle":"->"})
| mit |
simonsfoundation/CaImAn | use_cases/eLife_scripts/figure_8/Figure_8d_1p_sim.py | 2 | 4705 | # -*- coding: utf-8 -*-
"""
This script reproduces the results for Figure 8d, timing information for
analyzing 1p data using CaImAn. The script tries several combinations of
number or processes and patch size and plots the required processing time
vs the overall memory usage. As such it takes a couple of hours to run.
More info can be found in the companion paper
"""
import caiman as cm
from caiman.source_extraction import cnmf
import pickle
from memory_profiler import memory_usage
from multiprocessing import Pool, cpu_count
import numpy as np
from time import time, sleep
import matplotlib.pyplot as plt
from scipy.io import loadmat
import os
# set MKL_NUM_THREADS and OPENBLAS_NUM_THREADS to 1 outside via export!
# takes about 170 mins for all runs
base_folder = '/mnt/ceph/neuro/DataForPublications/DATA_PAPER_ELIFE/WEBSITE/'
fname = os.path.join(base_folder,'test_sim.mat')
dims = (253, 316)
Yr = loadmat(fname)['Y']
Y = Yr.T.reshape((-1,) + dims, order='F')
cm.save_memmap([Y], base_name='Yr', order='C')
def main(n_processes=None, patches=True, rf=64):
t = -time()
Yr, dims, T = cm.load_memmap(os.path.abspath('./Yr_d1_253_d2_316_d3_1_order_C_frames_2000_.mmap'))
Y = Yr.T.reshape((T,) + dims, order='F')
# c, dview, n_processes = cm.cluster.setup_cluster(backend='local', n_processes=n_processes)
# above line doesn't work cause memory_profiler creates some multiprocessing object itself
if n_processes is None:
n_processes = cpu_count()
dview = Pool(n_processes) if patches else None
print('{0} processes'.format(n_processes))
patch_args = dict(nb_patch=0, del_duplicates=True, rf=(rf, rf), stride=(16, 16)) \
if patches else {}
cnm = cnmf.CNMF(n_processes=n_processes, method_init='corr_pnr', k=None, dview=dview,
gSig=(3, 3), gSiz=(10, 10), merge_thresh=.8, p=1, tsub=2, ssub=1,
only_init_patch=True, gnb=0, min_corr=.9, min_pnr=15, normalize_init=False,
ring_size_factor=1.5, center_psf=True, ssub_B=2, init_iter=1, **patch_args)
cnm.fit(Y)
if patches:
dview.terminate()
t += time()
sleep(1) # just in case Pool takes some time to terminate
return t
try:
dview.terminate()
except:
pass
results = {'32': {}, '48': {}, '64': {}, 'noPatches': {}}
n_procs = [1, 2, 4, 6, 8, 12, 16, 24]
runs = 5
for n_proc in n_procs:
for rf in [32, 48, 64]:
results[str(rf)]['%dprocess' % n_proc] = [memory_usage(
proc=lambda: main(n_processes=n_proc, rf=rf), include_children=True, retval=True)
for run in range(runs)]
results['noPatches'] = [memory_usage(
proc=lambda: main(patches=False), include_children=True, retval=True)
for run in range(runs)]
with open('memory.pkl', 'wb') as fp: # save results
pickle.dump(results, fp)
#%% PLOT RESULTS
"""change some defaults for plotting"""
plt.rc('figure', facecolor='white', frameon=False)
plt.rc('lines', lw=2)
plt.rc('legend', **{'fontsize': 16, 'frameon': False, 'labelspacing': .3, 'handletextpad': .3})
plt.rc('axes', linewidth=2)
plt.rc('xtick.major', size=10, width=1.5)
plt.rc('ytick.major', size=10, width=1.5)
plt.rc('font', **{'family': 'Myriad Pro', 'weight': 'regular', 'size': 24})
plt.rcParams['pdf.fonttype'] = 42
def get_max_mem(rf='64'):
patch = []
for proc in n_procs:
tmp = results[rf]['%dprocess' % proc]
t = np.array(list(map(lambda a: a[1], tmp)))
m = np.array(list(map(lambda a: max(a[0]), tmp)))
patch.append([t, m])
return np.transpose(patch)
patch = {}
for rf in ('64', '48', '32'):
patch[rf] = get_max_mem(rf)
nopatch = np.array([list(map(lambda a: a[1], results['noPatches'])),
list(map(lambda a: max(a[0]), results['noPatches']))])
max_time = max([patch[rf][:, 0].max() for rf in ('64', '48', '32')]) / 60
max_mem = max([patch[rf][:, 1].max() for rf in ('64', '48', '32')]) / 1024
plt.figure()
for rf in ('64', '48', '32'):
size = int(rf) * 2
plt.errorbar(patch[rf].mean(0)[0], patch[rf].mean(0)[1],
xerr=patch[rf].std(0)[0], yerr=patch[rf].std(0)[1],
ls='None', capsize=5, capthick=2,
label=(('w/ patches ' + ' ' * (size < 100) + '{0}x{0}'.format(size))))
plt.errorbar(nopatch[0].mean(), nopatch[1].mean(),
xerr=nopatch[0].std(), yerr=nopatch[1].std(),
ls='None', capsize=5, capthick=2, label='w/o patches')
plt.legend()
plt.xticks(60 * np.arange(0, max_time), range(int(max_time + 1)))
plt.yticks(1024 * np.arange(0, max_mem, 5), range(0, int(max_mem + 1), 5))
plt.xlabel('Time [min]')
plt.ylabel('Peak memory [GB]')
plt.tight_layout(0)
plt.show()
| gpl-2.0 |
DonBeo/scikit-learn | sklearn/ensemble/__init__.py | 44 | 1228 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
yyjiang/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
ablifedev/ABLIRC | ABLIRC/bin/Dataclean/stat_uniq_tag.py | 1 | 10331 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
####################################################################################
### Copyright (C) 2015-2019 by ABLIFE
####################################################################################
####################################################################################
####################################################################################
# Date Version Author ChangeLog
#
#
#
#
#####################################################################################
"""
程序功能说明:
1.读取annovar的txt文件
2.通过关键字获取统计信息
"""
import re, os, sys, logging, time, datetime
from optparse import OptionParser, OptionGroup
reload(sys)
sys.setdefaultencoding('utf-8')
import subprocess
import threading
import gffutils
import numpy
import HTSeq
import multiprocessing
import pysam
import profile
from matplotlib import pyplot
from ablib.utils.tools import *
from ablib.utils.distribution import *
if sys.version_info < (2, 7):
print("Python Version error: please use phthon2.7")
sys.exit(-1)
_version = 'v0.1'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def configOpt():
"""Init for option
"""
usage = 'Usage: %prog [-f] [other option] [-h]'
p = OptionParser(usage)
##basic options
p.add_option(
'-f', '--file', dest='file', action='store',
type='string', help='annotation file,for the statistic')
p.add_option(
'-o', '--outfile', dest='outfile', action='store',
type='string', help='insection_distribution.txt')
# p.add_option(
# '-n', '--samplename', dest='samplename', default='', action='store',
# type='string', help='sample name,default is ""')
group = OptionGroup(p, "Preset options")
##preset options
group.add_option(
'-O', '--outDir', dest='outDir', default='./', action='store',
type='string', help='output directory', metavar="DIR")
group.add_option(
'-L', '--logDir', dest='logDir', default='', action='store',
type='string', help='log dir ,default is same as outDir')
group.add_option(
'-P', '--logPrefix', dest='logPrefix', default='', action='store',
type='string', help='log file prefix')
group.add_option(
'-E', '--email', dest='email', default='none', action='store',
type='string',
help='email address, if you want get a email when this job is finished,default is no email',
metavar="EMAIL")
group.add_option(
'-Q', '--quiet', dest='quiet', default=False, action='store_true',
help='do not print messages to stdout')
group.add_option(
'-K', '--keepTemp', dest='keepTemp', default=False, action='store_true',
help='keep temp dir')
group.add_option(
'-T', '--test', dest='isTest', default=False, action='store_true',
help='run this program for test')
p.add_option_group(group)
opt, args = p.parse_args()
return (p, opt, args)
def listToString(x):
"""获得完整的命令
"""
rVal = ''
for a in x:
rVal += a + ' '
return rVal
opt_parser, opt, args = configOpt()
if opt.logDir == "":
opt.logDir = opt.outDir + '/log/'
# sample = ""
# if opt.samplename != "":
# sample = opt.samplename
# if opt.outfile == 'distance2tss_peaks.txt':
# opt.outfile = sample + '_distance2tss_peaks.txt'
#
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = scriptPath + '/bin' # absolute bin path
outPath = os.path.abspath(opt.outDir) # absolute output path
os.mkdir(outPath) if not os.path.isdir(outPath) else None
logPath = os.path.abspath(opt.logDir)
os.mkdir(logPath) if not os.path.isdir(logPath) else None
tempPath = outPath + '/temp/' # absolute bin path
# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None
resultPath = outPath + '/result/'
# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s : %(levelname)s] %(message)s',
datefmt='%y-%m-%d %H:%M',
filename=logFilename,
filemode='w')
if not opt.quiet:
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(asctime)s : %(levelname)s] %(message)s',
datefmt='%y-%m-%d %H:%M')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
dt = datetime.datetime.now()
logFile = logPath + '/' + opt.logPrefix + 'log.' + str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'
initLogging(logFile)
logging.debug(sys.modules[__name__].__doc__)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug('Program version: %s' % _version)
logging.debug('Start the program with [%s]\n', listToString(sys.argv))
startTime = datetime.datetime.now()
logging.debug("计时器:Program start at %s" % startTime)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### S
# -----------------------------------------------------------------------------------
def get_Data(infile,outfile):
Reads = {}
Uniq_tag = 0
Total = 0
for eachLine in open(infile):
if eachLine.startswith("@"):
if eachLine in Reads.keys():
Reads[eachLine] += int(Reads[eachLine]) + 1
else:
Reads[eachLine] = 1
# try:
# Reads[eachLine] = int(Reads[eachLine]) + 1
# except:
# Reads[eachLine] = 1
for key in Reads.keys():
if Reads[key] == 1:
Uniq_tag += 1
Total += 1
else:
Total = Total + int(Reads[key])
Uniq_Percent = '{:.2f}'.format(int(Uniq_tag)/float(Total))
with open(outfile,'w') as OUT:
OUT.writelines(os.path.basename(infile) + "\t" + str(Total) + "\t" + str(Uniq_tag) + "\t" + str(Uniq_Percent) + "\n")
# -----------------------------------------------------------------------------------
### E
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def main():
print("Main procedure start...")
get_Data(opt.file,opt.outfile)
if __name__ == '__main__':
main()
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if not opt.keepTemp:
os.system('rm -rf ' + tempPath)
logging.debug("Temp folder is deleted..")
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug("Program ended")
currentTime = datetime.datetime.now()
runningTime = (currentTime - startTime).seconds # in seconds
logging.debug("计时器:Program start at %s" % startTime)
logging.debug("计时器:Program end at %s" % currentTime)
logging.debug("计时器:Program ran %.2d:%.2d:%.2d" % (
runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if opt.email != "none":
run_cmd = listToString(sys.argv)
sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)
logging.info("发送邮件通知到 %s" % opt.email)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
| mit |
cbmoore/statsmodels | statsmodels/datasets/grunfeld/data.py | 24 | 2794 | """Grunfeld (1950) Investment Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """This is the Grunfeld (1950) Investment Data.
The source for the data was the original 11-firm data set from Grunfeld's Ph.D.
thesis recreated by Kleiber and Zeileis (2008) "The Grunfeld Data at 50".
The data can be found here.
http://statmath.wu-wien.ac.at/~zeileis/grunfeld/
For a note on the many versions of the Grunfeld data circulating see:
http://www.stanford.edu/~clint/bench/grunfeld.htm
"""
DESCRSHORT = """Grunfeld (1950) Investment Data for 11 U.S. Firms."""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of observations - 220 (20 years for 11 firms)
Number of variables - 5
Variables name definitions::
invest - Gross investment in 1947 dollars
value - Market value as of Dec. 31 in 1947 dollars
capital - Stock of plant and equipment in 1947 dollars
firm - General Motors, US Steel, General Electric, Chrysler,
Atlantic Refining, IBM, Union Oil, Westinghouse, Goodyear,
Diamond Match, American Steel
year - 1935 - 1954
Note that raw_data has firm expanded to dummy variables, since it is a
string categorical variable.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Loads the Grunfeld data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
raw_data has the firm variable expanded to dummy variables for each
firm (ie., there is no reference dummy)
"""
from statsmodels.tools import categorical
data = _get_data()
raw_data = categorical(data, col='firm', drop=True)
ds = du.process_recarray(data, endog_idx=0, stack=False)
ds.raw_data = raw_data
return ds
def load_pandas():
"""
Loads the Grunfeld data and returns a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
raw_data has the firm variable expanded to dummy variables for each
firm (ie., there is no reference dummy)
"""
from pandas import DataFrame
from statsmodels.tools import categorical
data = _get_data()
raw_data = categorical(data, col='firm', drop=True)
ds = du.process_recarray_pandas(data, endog_idx=0)
ds.raw_data = DataFrame(raw_data)
return ds
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/grunfeld.csv','rb'), delimiter=",",
names=True, dtype="f8,f8,f8,a17,f8")
return data
| bsd-3-clause |
zero-rp/miniblink49 | v8_7_5/tools/ignition/bytecode_dispatches_report.py | 14 | 9248 | #! /usr/bin/python
#
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import heapq
import json
from matplotlib import colors
from matplotlib import pyplot
import numpy
import struct
import sys
__DESCRIPTION = """
Process v8.ignition_dispatches_counters.json and list top counters,
or plot a dispatch heatmap.
Please note that those handlers that may not or will never dispatch
(e.g. Return or Throw) do not show up in the results.
"""
__HELP_EPILOGUE = """
examples:
# Print the hottest bytecodes in descending order, reading from
# default filename v8.ignition_dispatches_counters.json (default mode)
$ tools/ignition/bytecode_dispatches_report.py
# Print the hottest 15 bytecode dispatch pairs reading from data.json
$ tools/ignition/bytecode_dispatches_report.py -t -n 15 data.json
# Save heatmap to default filename v8.ignition_dispatches_counters.svg
$ tools/ignition/bytecode_dispatches_report.py -p
# Save heatmap to filename data.svg
$ tools/ignition/bytecode_dispatches_report.py -p -o data.svg
# Open the heatmap in an interactive viewer
$ tools/ignition/bytecode_dispatches_report.py -p -i
# Display the top 5 sources and destinations of dispatches to/from LdaZero
$ tools/ignition/bytecode_dispatches_report.py -f LdaZero -n 5
"""
__COUNTER_BITS = struct.calcsize("P") * 8 # Size in bits of a pointer
__COUNTER_MAX = 2**__COUNTER_BITS - 1
def warn_if_counter_may_have_saturated(dispatches_table):
for source, counters_from_source in iteritems(dispatches_table):
for destination, counter in iteritems(counters_from_source):
if counter == __COUNTER_MAX:
print("WARNING: {} -> {} may have saturated.".format(source,
destination))
def find_top_bytecode_dispatch_pairs(dispatches_table, top_count):
def flattened_counters_generator():
for source, counters_from_source in iteritems(dispatches_table):
for destination, counter in iteritems(counters_from_source):
yield source, destination, counter
return heapq.nlargest(top_count, flattened_counters_generator(),
key=lambda x: x[2])
def print_top_bytecode_dispatch_pairs(dispatches_table, top_count):
top_bytecode_dispatch_pairs = (
find_top_bytecode_dispatch_pairs(dispatches_table, top_count))
print("Top {} bytecode dispatch pairs:".format(top_count))
for source, destination, counter in top_bytecode_dispatch_pairs:
print("{:>12d}\t{} -> {}".format(counter, source, destination))
def find_top_bytecodes(dispatches_table):
top_bytecodes = []
for bytecode, counters_from_bytecode in iteritems(dispatches_table):
top_bytecodes.append((bytecode, sum(itervalues(counters_from_bytecode))))
top_bytecodes.sort(key=lambda x: x[1], reverse=True)
return top_bytecodes
def print_top_bytecodes(dispatches_table):
top_bytecodes = find_top_bytecodes(dispatches_table)
print("Top bytecodes:")
for bytecode, counter in top_bytecodes:
print("{:>12d}\t{}".format(counter, bytecode))
def find_top_dispatch_sources_and_destinations(
dispatches_table, bytecode, top_count, sort_source_relative):
sources = []
for source, destinations in iteritems(dispatches_table):
total = float(sum(itervalues(destinations)))
if bytecode in destinations:
count = destinations[bytecode]
sources.append((source, count, count / total))
destinations = []
bytecode_destinations = dispatches_table[bytecode]
bytecode_total = float(sum(itervalues(bytecode_destinations)))
for destination, count in iteritems(bytecode_destinations):
destinations.append((destination, count, count / bytecode_total))
return (heapq.nlargest(top_count, sources,
key=lambda x: x[2 if sort_source_relative else 1]),
heapq.nlargest(top_count, destinations, key=lambda x: x[1]))
def print_top_dispatch_sources_and_destinations(dispatches_table, bytecode,
top_count, sort_relative):
top_sources, top_destinations = find_top_dispatch_sources_and_destinations(
dispatches_table, bytecode, top_count, sort_relative)
print("Top sources of dispatches to {}:".format(bytecode))
for source_name, counter, ratio in top_sources:
print("{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, source_name))
print("\nTop destinations of dispatches from {}:".format(bytecode))
for destination_name, counter, ratio in top_destinations:
print("{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, destination_name))
def build_counters_matrix(dispatches_table):
labels = sorted(dispatches_table.keys())
counters_matrix = numpy.empty([len(labels), len(labels)], dtype=int)
for from_index, from_name in enumerate(labels):
current_row = dispatches_table[from_name];
for to_index, to_name in enumerate(labels):
counters_matrix[from_index, to_index] = current_row.get(to_name, 0)
# Reverse y axis for a nicer appearance
xlabels = labels
ylabels = list(reversed(xlabels))
counters_matrix = numpy.flipud(counters_matrix)
return counters_matrix, xlabels, ylabels
def plot_dispatches_table(dispatches_table, figure, axis):
counters_matrix, xlabels, ylabels = build_counters_matrix(dispatches_table)
image = axis.pcolor(
counters_matrix,
cmap="jet",
norm=colors.LogNorm(),
edgecolor="grey",
linestyle="dotted",
linewidth=0.5
)
axis.xaxis.set(
ticks=numpy.arange(0.5, len(xlabels)),
label="From bytecode handler"
)
axis.xaxis.tick_top()
axis.set_xlim(0, len(xlabels))
axis.set_xticklabels(xlabels, rotation="vertical")
axis.yaxis.set(
ticks=numpy.arange(0.5, len(ylabels)),
label="To bytecode handler",
ticklabels=ylabels
)
axis.set_ylim(0, len(ylabels))
figure.colorbar(
image,
ax=axis,
fraction=0.01,
pad=0.01
)
def parse_command_line():
command_line_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__DESCRIPTION,
epilog=__HELP_EPILOGUE
)
command_line_parser.add_argument(
"--plot-size", "-s",
metavar="N",
default=30,
help="shorter side in inches of the output plot (default 30)"
)
command_line_parser.add_argument(
"--plot", "-p",
action="store_true",
help="plot dispatch pairs heatmap"
)
command_line_parser.add_argument(
"--interactive", "-i",
action="store_true",
help="open the heatmap in an interactive viewer, instead of writing to file"
)
command_line_parser.add_argument(
"--top-bytecode-dispatch-pairs", "-t",
action="store_true",
help="print the top bytecode dispatch pairs"
)
command_line_parser.add_argument(
"--top-entries-count", "-n",
metavar="N",
type=int,
default=10,
help="print N top entries when running with -t or -f (default 10)"
)
command_line_parser.add_argument(
"--top-dispatches-for-bytecode", "-f",
metavar="<bytecode name>",
help="print top dispatch sources and destinations to the specified bytecode"
)
command_line_parser.add_argument(
"--output-filename", "-o",
metavar="<output filename>",
default="v8.ignition_dispatches_table.svg",
help=("file to save the plot file to. File type is deduced from the "
"extension. PDF, SVG, PNG supported")
)
command_line_parser.add_argument(
"--sort-sources-relative", "-r",
action="store_true",
help=("print top sources in order to how often they dispatch to the "
"specified bytecode, only applied when using -f")
)
command_line_parser.add_argument(
"input_filename",
metavar="<input filename>",
default="v8.ignition_dispatches_table.json",
nargs='?',
help="Ignition counters JSON file"
)
return command_line_parser.parse_args()
def itervalues(d):
return d.values() if sys.version_info[0] > 2 else d.itervalues()
def iteritems(d):
return d.items() if sys.version_info[0] > 2 else d.iteritems()
def main():
program_options = parse_command_line()
with open(program_options.input_filename) as stream:
dispatches_table = json.load(stream)
warn_if_counter_may_have_saturated(dispatches_table)
if program_options.plot:
figure, axis = pyplot.subplots()
plot_dispatches_table(dispatches_table, figure, axis)
if program_options.interactive:
pyplot.show()
else:
figure.set_size_inches(program_options.plot_size,
program_options.plot_size)
pyplot.savefig(program_options.output_filename)
elif program_options.top_bytecode_dispatch_pairs:
print_top_bytecode_dispatch_pairs(
dispatches_table, program_options.top_entries_count)
elif program_options.top_dispatches_for_bytecode:
print_top_dispatch_sources_and_destinations(
dispatches_table, program_options.top_dispatches_for_bytecode,
program_options.top_entries_count, program_options.sort_sources_relative)
else:
print_top_bytecodes(dispatches_table)
if __name__ == "__main__":
main()
| apache-2.0 |
Aasmi/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
tejasnikumbh/AllSATParallel | lib/python2.7/site-packages/numpy/linalg/linalg.py | 35 | 67345 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return wrap(gufunc(a, signature=signature, extobj=extobj).astype(result_t))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _ssyevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t))
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be always be of complex type. When `a` is real
the resulting eigenvalues will be real (0 imaginary part) or
occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
A : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues, not necessarily ordered.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t))
vt = vt.astype(result_t)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t)
s = s.astype(_realType(result_t))
vt = vt.astype(result_t)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t))
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[0]/s[-1]
else:
return norm(x, p)*norm(inv(x), p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 1.6.0.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
return sign.astype(result_t), logdet.astype(real_t)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(2, 2, 2
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return _umath_linalg.det(a, signature=signature).astype(result_t)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute the extreme singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Check the default case first and handle it immediately.
if ord is None and axis is None:
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
return sqrt(sqnorm)
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis)
elif ord == -Inf:
return abs(x).min(axis=axis)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
return _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
return _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
return add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
return add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
return add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
return add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
return sqrt(add.reduce((x.conj() * x).real, axis=axis))
else:
raise ValueError("Invalid norm order for matrices.")
else:
raise ValueError("Improper number of dimensions to norm.")
| mit |
ryanfobel/dmf_control_board | dmf_control_board_firmware/calibrate/impedance_benchmarks.py | 3 | 9822 | # coding: utf-8
import pandas as pd
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib.colors import Colormap
from matplotlib.gridspec import GridSpec
import numpy as np
pd.set_option('display.width', 300)
def plot_capacitance_vs_frequency(df, **kwargs):
cleaned_df = df.dropna().copy()
fb_resistor_df = cleaned_df.set_index(cleaned_df.fb_resistor)
axis = kwargs.pop('axis', None)
s = kwargs.pop('s', 50)
facecolor = kwargs.pop('facecolor', 'none')
if axis is None:
fig = plt.figure()
axis = fig.add_subplot(111)
stats = fb_resistor_df[['frequency', 'C']].describe()
axis.set_xlim(0.8 * stats.frequency['min'], 1.2 * stats.frequency['max'])
axis.set_ylim(0.8 * stats.C['min'], 1.2 * stats.C['max'])
frequencies = fb_resistor_df.frequency.unique()
# Plot nominal test capacitance lines.
for C in fb_resistor_df.test_capacitor.unique():
axis.plot(frequencies, [C] * len(frequencies), '--', alpha=0.7,
color='0.5', linewidth=1)
# Plot scatter of _measured_ capacitance vs. frequency.
for k, v in fb_resistor_df[['frequency', 'C']].groupby(level=0):
try:
color = axis._get_lines.color_cycle.next()
except: # make compatible with matplotlib v1.5
color = axis._get_lines.prop_cycler.next()['color']
v.plot(kind='scatter', x='frequency', y='C', loglog=True,
label='R$_{fb,%d}$' % k, ax=axis, color=color,
s=s, facecolor=facecolor, **kwargs)
axis.legend(loc='upper right')
axis.set_xlabel('Frequency (Hz)')
axis.set_ylabel('C$_{device}$ (F)')
axis.set_title('C$_{device}$')
plt.tight_layout()
return axis
def estimate_relative_error_in_nominal_capacitance(df):
# Calculate the relative percentage difference in the mean capacitance
# values measured relative to the nominal values.
cleaned_df = df.dropna().copy()
C_relative_error = (cleaned_df.groupby('test_capacitor')
.apply(lambda x: ((x['C'] - x['test_capacitor']) /
x['test_capacitor']).describe()))
pd.set_eng_float_format(accuracy=1, use_eng_prefix=True)
print ('Estimated relative error in nominal capacitance values = %.1f%% '
' +/-%.1f%%' % (C_relative_error['mean'].mean() * 100,
C_relative_error['mean'].std() * 100))
print C_relative_error[['mean', 'std']] * 100
print
return C_relative_error
def plot_impedance_vs_frequency(data):
test_loads = data['test_loads']
frequencies = data['frequencies']
C = data['C']
fb_resistor = data['fb_resistor']
calibration = data['calibration']
# create a masked array version of the capacitance matrix
C = np.ma.masked_invalid(C)
# create frequency matrix to match shape of C
f = np.tile(np.reshape(frequencies,
[len(frequencies)] + [1]*(len(C.shape) - 1)),
[1] + list(C.shape[1:]))
# Plot the impedance of each experiment vs frequency (with the data points
# color-coded according to the feedback resistor).
# Note that impedance, $Z$, can be computed as:
#
# 1
# Z = ──────────
# 2⋅π⋅freq⋅C
#
plt.figure(figsize=figsize)
legend = []
for i in range(len(calibration.R_fb)):
legend.append("R$_{fb,%d}$" % i)
ind = mlab.find(fb_resistor == i)
plt.loglog(f.flatten()[ind], 1.0 / (2 * np.pi * f.flatten()[ind] *
C.flatten()[ind]), 'o')
plt.xlim(0.8 * np.min(frequencies), 1.2 * np.max(frequencies))
for C_device in test_loads:
# TODO: What is the reason for the `np.ones` below?
plt.plot(frequencies, 1.0 / (2 * np.pi * C_device *
np.ones(len(frequencies)) * frequencies),
'--', color='0.5')
plt.legend(legend)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Z$_{device}$ ($\Omega$)')
plt.title('Z$_{device}$')
plt.tight_layout()
def calculate_stats(df, groupby='test_capacitor'):
cleaned_df = df.dropna().copy()
stats = cleaned_df.groupby(groupby)['C'].agg(['mean', 'std', 'median'])
stats['bias %'] = (cleaned_df.groupby(groupby)
.apply(lambda x: ((x['C'] - x['test_capacitor'])).mean()
/ x['C'].mean())) * 100
stats['RMSE %'] = 100 * (cleaned_df.groupby(groupby)
.apply(lambda x: np.sqrt(((x['C'] -
x['test_capacitor']) **
2).mean()) /
x['C'].mean()))
stats['cv %'] = stats['std'] / stats['mean'] * 100
return stats
def print_detailed_stats_by_condition(data, stats):
test_loads = data['test_loads']
frequencies = data['frequencies']
mean = stats['mean']
CV = stats['CV']
bias = stats['bias']
RMSE = stats['RMSE']
# print the RMSE, CV, and bias for each test capacitor and frequency combination
for i, (channel, C_device) in enumerate(test_loads):
print "\n%.2f pF" % (C_device*1e12)
for j in range(len(frequencies)):
print "%.1fkHz: mean(C)=%.2f pF, RMSE=%.1f%%, CV=%.1f%%, bias=%.1f%%" % (frequencies[j]/1e3,
1e12*mean[j,i],
RMSE[j,i],
CV[j,i],
bias[j,i])
print
def plot_measured_vs_nominal_capacitance_for_each_frequency(data, stats):
# plot the measured vs nominal capacitance for each frequency
frequencies = data['frequencies']
test_loads = data['test_loads']
mean_C = stats['mean']
std_C = stats['std']
for i in range(len(frequencies)):
plt.figure()
plt.title('(frequency=%.2fkHz)' % (frequencies[i]/1e3))
for j, (channel, C_device) in enumerate(test_loads):
plt.errorbar(C_device, mean_C[i,j],
std_C[i,j], fmt='k')
C_device = np.array([x for channel, x in test_loads])
plt.loglog(C_device, C_device, 'k:')
plt.xlim(min(C_device)*.9, max(C_device)*1.1)
plt.ylim(min(C_device)*.9, max(C_device)*1.1)
plt.xlabel('C$_{nom}$ (F)')
plt.ylabel('C$_{measured}$ (F)')
def plot_colormap(stats, column, axis=None, fig=None):
freq_vs_C_rmse = stats.reindex_axis(
pd.Index([(i, j) for i in stats.index.levels[0]
for j in stats.index.levels[1]],
name=['test_capacitor',
'frequency'])).reset_index().pivot(index='frequency',
columns=
'test_capacitor',
values=column)
if axis is None:
fig = plt.figure()
axis = fig.add_subplot(111)
frequencies = stats.index.levels[1]
axis.set_xlabel('Capacitance')
axis.set_ylabel('Frequency')
vmin = freq_vs_C_rmse.fillna(0).values.min()
vmax = freq_vs_C_rmse.fillna(0).values.max()
if vmin < 0:
vmax = np.abs([vmin, vmax]).max()
vmin = -vmax
cmap=plt.cm.coolwarm
else:
vmin = 0
cmap=plt.cm.Reds
mesh = axis.pcolormesh(freq_vs_C_rmse.fillna(0).values, vmin=vmin,
vmax=vmax, cmap=cmap)
if fig is not None:
fig.colorbar(mesh)
else:
plt.colorbar()
axis.set_xticks(np.arange(freq_vs_C_rmse.shape[1]) + 0.5)
axis.set_xticklabels(["%.1fpF" % (c*1e12)
for c in freq_vs_C_rmse.columns],
rotation=90)
axis.set_yticks(np.arange(len(frequencies)) + 0.5)
axis.set_yticklabels(["%.2fkHz" % (f / 1e3) for f in frequencies])
axis.set_xlim(0, freq_vs_C_rmse.shape[1])
axis.set_ylim(0, freq_vs_C_rmse.shape[0])
return axis
def plot_stat_summary(df, fig=None):
'''
Plot stats grouped by test capacitor load _and_ frequency.
In other words, we calculate the mean of all samples in the data
frame for each test capacitance and frequency pairing, plotting
the following stats:
- Root mean squared error
- Coefficient of variation
- Bias
## [Coefficient of variation][1] ##
> In probability theory and statistics, the coefficient of
> variation (CV) is a normalized measure of dispersion of a
> probability distribution or frequency distribution. It is defined
> as the ratio of the standard deviation to the mean.
[1]: http://en.wikipedia.org/wiki/Coefficient_of_variation
'''
if fig is None:
fig = plt.figure(figsize=(8, 8))
# Define a subplot layout, 3 rows, 2 columns
grid = GridSpec(3, 2)
stats = calculate_stats(df, groupby=['test_capacitor',
'frequency']).dropna()
for i, stat in enumerate(['RMSE %', 'cv %', 'bias %']):
axis = fig.add_subplot(grid[i, 0])
axis.set_title(stat)
# Plot a colormap to show how the statistical value changes
# according to frequency/capacitance pairs.
plot_colormap(stats, stat, axis=axis, fig=fig)
axis = fig.add_subplot(grid[i, 1])
axis.set_title(stat)
# Plot a histogram to show the distribution of statistical
# values across all frequency/capacitance pairs.
try:
axis.hist(stats[stat].values, bins=50)
except AttributeError:
print stats[stat].describe()
fig.tight_layout()
| gpl-3.0 |
Connor-R/nba_shot_charts | charting/TODO_custom_charts.py | 1 | 26868 | # CURRENT VERSION AS temp_helper.py
import requests
import urllib
import os
import shutil
import csv
import sys
import glob
import math
import pandas as pd
import numpy as np
import argparse
import matplotlib as mpb
import matplotlib.pyplot as plt
from matplotlib import offsetbox as osb
from matplotlib.patches import RegularPolygon
from datetime import date, datetime, timedelta
from time import time
from py_data_getter import data_getter
from py_db import db
db = db('nba_shots')
# setting the color map we want to use
mymap = mpb.cm.YlOrRd
whitelist_pngs = ['charts_description.png', 'nba_logo.png', '0.png', 'chart_icon.png']
base_path = os.getcwd()+"/shot_charts_custom_charts/"
def initiate(_type, _names, season_type, start_date, end_date, custom_title=None, custom_text=None, custom_img=None, custom_file=None):
start_time = time()
print '\n\ncharting.....'
if _type == 'Player':
print '\n\tplayers:\t',
else:
print '\n\tteams:\t\t',
for n in _names:
print n.replace(' ',''),
print '\n\tseason type:\t' + str(season_type)
print '\tstart date: \t' + str(start_date)
print '\tend date: \t' + str(end_date)
if custom_title is not None:
print '\ttitle: \t\t' + str(custom_title)
if custom_text is not None:
print '\ttext: \t\t' + str(custom_text)
if custom_file is None:
path_add = str(date.today())+'_'+str(datetime.now().hour)+'_'+str(datetime.now().minute)+'_'+str(datetime.now().second)+'.png'
else:
path_add = str(custom_file).replace(' ', '').replace(',','-') + '.png'
print '\tfilename: \t' + str(custom_file).replace(' ', '_').replace(',','-') + '.png'
path = base_path + path_add
id_list = []
print '\n\t\tgetting ids.....'
if _type.lower() == 'player':
for _name in _names:
idq = """SELECT player_id FROM players WHERE CONCAT(fname, " ", lname) = '%s'""" % (_name)
_id = int(db.query(idq)[0][0])
id_list.append(_id)
elif _type.lower() == 'team':
for _name in _names:
idq = """SELECT team_id FROM teams WHERE CONCAT(city, " ", tname) = '%s'""" % (_name)
_id = int(db.query(idq)[0][0])
id_list.append(_id)
print '\t\t\tDONE'
ids = tuple(id_list)
if len(ids) == 1:
ids = '('+ str(ids[0]) + ')'
# path_ids = zip(_names, id_list)
# print len(path_ids)
# raw_input(path_ids)
if custom_title is None:
custom_title = ''
for n in _names[:-1]:
custom_title += n
custom_title += _names[-1]
start2, end2 = get_dates(_type, ids, start_date, end_date, season_type)
print '\t\tacquiring shooting data.....'
shot_df = acquire_shootingData(ids, start_date, end_date, _type, season_type)
print '\t\t\tDONE'
if shot_df is not None and len(shot_df.index) != 0:
shooting_plot(path, _type, shot_df, ids, _names, season_type, start_date, end_date, custom_title, custom_text, custom_img, start2, end2)
end_time = time()
elapsed_time = float(end_time - start_time)
print "time elapsed (in seconds): \t" + str(elapsed_time)
print "time elapsed (in minutes): \t" + str(elapsed_time/60.0)
print "\n\n =================================================================================="
def acquire_shootingData(ids, start_date, end_date, _type='Player', season_type='Reg'):
shot_query = """SELECT
season_id, game_id,
team_id, game_date,
event_type, shot_type,
shot_zone_basic, shot_zone_area, LOC_X, LOC_Y,
IF(event_type='Made Shot', 1, 0) AS SHOT_MADE_FLAG,
zone_pct_plus,
efg_plus
FROM shots
JOIN shots_%s_Relative_Year USING (season_id, %s_id, season_type, shot_zone_basic, shot_zone_area)
WHERE %s_id IN %s
AND game_date >= '%s'
AND game_date <= '%s'
AND season_type = '%s';
"""
shot_q = shot_query % (_type, _type, _type, ids, start_date, end_date, season_type)
# raw_input(shot_q)
shots = db.query(shot_q)
shot_data = {'season_id':[], 'game_id':[], 'team_id':[], 'game_date':[], 'event_type':[], 'shot_type':[], 'shot_zone_basic':[], 'shot_zone_area':[], 'LOC_X':[], 'LOC_Y':[], 'SHOT_MADE_FLAG':[], 'zone_pct_plus':[], 'efg_plus':[]}
for row in shots:
season_id, game_id, team_id, game_date, event_type, shot_type, shot_zone_basic, shot_zone_area, LOC_X, LOC_Y, SHOT_MADE_FLAG, zone_pct_plus, efg_plus = row
shot_data['season_id'].append(season_id)
shot_data['game_id'].append(game_id)
shot_data['team_id'].append(team_id)
shot_data['game_date'].append(game_date)
shot_data['event_type'].append(event_type)
shot_data['shot_type'].append(shot_type)
shot_data['shot_zone_basic'].append(shot_zone_basic)
shot_data['shot_zone_area'].append(shot_zone_area)
shot_data['LOC_X'].append(LOC_X)
shot_data['LOC_Y'].append(LOC_Y)
shot_data['SHOT_MADE_FLAG'].append(SHOT_MADE_FLAG)
shot_data['zone_pct_plus'].append(zone_pct_plus)
shot_data['efg_plus'].append(efg_plus)
shot_df = pd.DataFrame(shot_data, columns=shot_data.keys())
return shot_df
def shooting_plot(path, _type, shot_df, ids, _names, season_type, start_date, end_date, custom_title, custom_text, custom_img, start2, end2, plot_size=(12,12), gridNum=30):
print '\t\tgetting shooting percentages in each zone.....'
(ShootingPctLocs, shotNumber), shot_count_all = find_shootingPcts(shot_df, gridNum)
print '\t\t\tDONE'
print '\t\tcalculating metrics.....'
metrics = calculate_metrics(_type, ids, start_date, end_date, season_type)
print '\t\t\tDONE'
all_efg_plus = float(get_metrics(metrics, 'all', 'efg_plus'))
paa = float(get_metrics(metrics, 'all', 'paa'))
color_efg = max(min(((all_efg_plus/100)-0.5),1.0),0.0)
fig = plt.figure(figsize=(12,12))
cmap = mymap
ax = plt.axes([0.05, 0.15, 0.81, 0.775])
ax.set_axis_bgcolor('#0C232E')
draw_court(outer_lines=False)
plt.xlim(-250,250)
plt.ylim(370, -30)
print '\t\tgetting icon.....'
img = acquire_custom_pic(custom_img)
ax.add_artist(img)
print '\t\t\tDONE'
max_radius_perc = 1.0
max_rad_multiplier = 100.0/max_radius_perc
area_multiplier = (3./4.)
lg_efg = float(get_lg_metrics(start_date, end_date, season_type, 'all', 'efg'))
print '\t\tplotting each hex bin.....'
# i is the bin#, and shots is the shooting% for that bin
for i, shots in enumerate(ShootingPctLocs):
x,y = shotNumber.get_offsets()[i]
# we check the distance from the hoop the bin is. If it in 3pt territory, we add a multiplier of 1.5 to the shooting% to properly encapsulate eFG%
dist = math.sqrt(x**2 + y**2)
mult = 1.0
if abs(x) >= 220:
mult = 1.5
elif dist/10 >= 23.75:
mult = 1.5
else:
mult = 1.0
# Setting the eFG% for a bin, making sure it's never over 1 (our maximum color value)
color_pct = ((shots*mult)/lg_efg)-0.5
bin_pct = max(min(color_pct, 1.0), 0.0)
hexes = RegularPolygon(
shotNumber.get_offsets()[i], #x/y coords
numVertices=6,
radius=(295/gridNum)*((max_rad_multiplier*((shotNumber.get_array()[i]))/shot_count_all)**(area_multiplier)),
color=cmap(bin_pct),
alpha=0.95,
fill=True)
# setting a maximum radius for our bins at 295 (personal preference)
if hexes.radius > 295/gridNum:
hexes.radius = 295/gridNum
ax.add_patch(hexes)
print '\t\t\tDONE'
print '\t\tcreating the frequency legend.....'
# we want to have 4 ticks in this legend so we iterate through 4 items
for i in range(0,4):
base_rad = max_radius_perc/4
# the x,y coords for our patch (the first coordinate is (-205,415), and then we move up and left for each addition coordinate)
patch_x = -205-(10*i)
patch_y = 365-(14*i)
# specifying the size of our hexagon in the frequency legend
patch_rad = (299.9/gridNum)*((base_rad+(base_rad*i))**(area_multiplier))
patch_perc = base_rad+(i*base_rad)
# the x,y coords for our text
text_x = patch_x + patch_rad + 2
text_y = patch_y
patch_axes = (patch_x, patch_y)
# the text will be slightly different for our maximum sized hexagon,
if i < 3:
text_text = ' %s%% of Attempted Shots' % ('%.2f' % patch_perc)
else:
text_text = '$\geq$%s%% of Attempted Shots' %(str(patch_perc))
# draw the hexagon. the color=map(eff_fg_all_float/100) makes the hexagons in the legend the same color as the player's overall eFG%
patch = RegularPolygon(patch_axes, numVertices=6, radius=patch_rad, color=cmap(color_efg), alpha=0.95, fill=True)
ax.add_patch(patch)
# add the text for the hexagon
ax.text(text_x, text_y, text_text, fontsize=12, horizontalalignment='left', verticalalignment='center', family='DejaVu Sans', color='white', fontweight='bold')
print '\t\t\tDONE'
# Add a title to our frequency legend (the x/y coords are hardcoded).
# Again, the color=map(eff_fg_all_float/100) makes the hexagons in the legend the same color as the player's overall eFG%
ax.text(-235, 310, 'Zone Frequencies', fontsize = 15, horizontalalignment='left', verticalalignment='bottom', family='DejaVu Sans', color=cmap(color_efg), fontweight='bold')
print '\t\tadding text.....'
# Add a title to our chart (just the player's name)
chart_title = "%s" % (custom_title)
ax.text(31.25,-40, chart_title, fontsize=29, horizontalalignment='center', verticalalignment='bottom', family='DejaVu Sans', color=cmap(color_efg), fontweight='bold')
# Add user text
ax.text(-250,-31,'CHARTS BY @NBAChartBot',
fontsize=10, horizontalalignment='left', verticalalignment = 'bottom', family='DejaVu Sans', color='white', fontweight='bold')
# Add data source text
ax.text(31.25,-31,'DATA FROM STATS.NBA.COM',
fontsize=10, horizontalalignment='center', verticalalignment = 'bottom', family='DejaVu Sans', color='white', fontweight='bold')
# Add date text
_date = date.today()
ax.text(250,-31,'AS OF %s' % (str(_date)),
fontsize=10, horizontalalignment='right', verticalalignment = 'bottom', family='DejaVu Sans', color='white', fontweight='bold')
key_text = get_key_text(_type, ids, start_date, end_date, metrics)
# adding breakdown of eFG% by shot zone at the bottom of the chart
ax.text(307,380, key_text, fontsize=12, horizontalalignment='right', verticalalignment = 'top', family='DejaVu Sans', color='white', linespacing=1.5)
if _type == 'Player':
teams_text, team_len = get_teams_text(ids, start_date, end_date, custom_text, season_type)
elif _type == 'Team':
team_len = len(_names)
if custom_text is None:
teams_text = ''
if len(_names) == 1:
teams_text = str(_names[0])
else:
i = 0
for team in _names[0:-1]:
if i%2 == 0 and i > 0:
teams_text += '\n'
text_add = '%s, ' % str(team)
teams_text += text_add
i += 1
if i%2 == 0:
teams_text += '\n'
teams_text += str(_names[-1])
else:
teams_text = custom_text
if custom_text is None:
if season_type == 'Reg':
season_type_text = 'Regular Season Shots:\n'
elif season_type == 'AS':
season_type_text = 'All Star Shots:\n'
elif season_type == 'Pre':
season_type_text = 'Pre Season Shots:\n'
elif season_type == 'Post':
season_type_text = 'Post Season Shots:\n'
else:
season_type_text = 'All Shots:\n'
else:
season_type_text = ''
if team_len > 6:
ax.text(-250,380, str(start2) + ' to ' + str(end2) + '\n'+ season_type_text + teams_text,
fontsize=8, horizontalalignment='left', verticalalignment = 'top', family='DejaVu Sans', color='white', linespacing=1.4)
else:
ax.text(-250,380,str(start2) + ' to ' + str(end2) + '\n'+ season_type_text + teams_text,
fontsize=11, horizontalalignment='left', verticalalignment = 'top', family='DejaVu Sans', color='white', linespacing=1.5)
print '\t\t\tDONE'
# adding a color bar for reference
ax2 = fig.add_axes([0.875, 0.15, 0.04, 0.775])
cb = mpb.colorbar.ColorbarBase(ax2,cmap=cmap, orientation='vertical')
cbytick_obj = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cbytick_obj, color='white', fontweight='bold')
cb.set_label('EFG+ (100 is League Average)', family='DejaVu Sans', color='white', fontweight='bold', labelpad=-4, fontsize=14)
cb.set_ticks([0.0, 0.25, 0.5, 0.75, 1.0])
cb.set_ticklabels(['$\mathbf{\leq}$50','75', '100','125', '$\mathbf{\geq}$150'])
print 'ALL DONE\n\n'
figtit = path
plt.savefig(figtit, facecolor='#26373F', edgecolor='black')
plt.clf()
def find_shootingPcts(shot_df, gridNum):
x = shot_df.LOC_X[shot_df['LOC_Y']<425.1]
y = shot_df.LOC_Y[shot_df['LOC_Y']<425.1]
# Grabbing the x and y coords, for all made shots
x_made = shot_df.LOC_X[(shot_df['SHOT_MADE_FLAG']==1) & (shot_df['LOC_Y']<425.1)]
y_made = shot_df.LOC_Y[(shot_df['SHOT_MADE_FLAG']==1) & (shot_df['LOC_Y']<425.1)]
#compute number of shots made and taken from each hexbin location
hb_shot = plt.hexbin(x, y, gridsize=gridNum, extent=(-250,250,425,-50));
plt.close()
hb_made = plt.hexbin(x_made, y_made, gridsize=gridNum, extent=(-250,250,425,-50));
plt.close()
#compute shooting percentage
ShootingPctLocs = hb_made.get_array() / hb_shot.get_array()
ShootingPctLocs[np.isnan(ShootingPctLocs)] = 0 #makes 0/0s=0
shot_count_all = len(shot_df.index)
# Returning all values
return (ShootingPctLocs, hb_shot), shot_count_all
def calculate_metrics(_type, ids, start_date, end_date, season_type):
print '\t\t\tgetting breakdown.....'
breakdown_q = """SELECT *
FROM(
SELECT shot_zone_basic,
COUNT(*) AS attempts,
SUM(CASE WHEN event_type = "Made Shot" THEN 1 ELSE 0 END) AS makes,
SUM(CASE WHEN event_type = "Made Shot" AND shot_type = '2PT Field Goal' THEN 2
WHEN event_type = "Made Shot" AND shot_type = '3PT Field Goal' THEN 3
ELSE 0 END) AS points
FROM shots
WHERE %s_id IN %s
AND game_date >= '%s'
AND game_date <= '%s'
AND season_type = '%s'
GROUP BY shot_zone_basic
UNION
SELECT 'all' AS shot_zone_basic,
COUNT(*) AS attempts,
SUM(CASE WHEN event_type = "Made Shot" THEN 1 ELSE 0 END) AS makes,
SUM(CASE WHEN event_type = "Made Shot" AND shot_type = '2PT Field Goal' THEN 2
WHEN event_type = "Made Shot" AND shot_type = '3PT Field Goal' THEN 3
ELSE 0 END) AS points
FROM shots
WHERE %s_id IN %s
AND game_date >= '%s'
AND game_date <= '%s'
AND season_type = '%s'
) a
JOIN(SELECT COUNT(DISTINCT game_id) as games
FROM shots
WHERE %s_id IN %s
AND game_date >= '%s'
AND game_date <= '%s'
AND season_type = '%s'
) b
"""
breakdown_qry = breakdown_q % (_type, ids, start_date, end_date, season_type, _type, ids, start_date, end_date, season_type, _type, ids, start_date, end_date, season_type)
# raw_input(breakdown_qry)
breakdown = db.query(breakdown_qry)
zone_data = []
allatts = 0
for row in breakdown:
_z, att, mak, pts, gms = row
efg = (float(pts)/float(att))/2.0
entry = {'zone':_z, 'attempts':float(att), 'makes':float(mak), 'points':float(pts), 'games':float(gms), 'efg':efg}
zone_data.append(entry)
if _z == 'all':
allatts = att
print '\t\t\tgetting all league metrics.....'
final_data = []
lgALL_zone = float(get_lg_metrics(start_date, end_date, season_type, 'all', 'zone_pct'))
lgALL_efg = float(get_lg_metrics(start_date, end_date, season_type, 'all', 'efg'))
print '\t\t\tgetting zone league metrics.....'
for entry in zone_data:
z_pct = float(entry.get('attempts'))/float(allatts)
entry['z_pct'] = z_pct
lg_zone = float(get_lg_metrics(start_date, end_date, season_type, entry.get('zone'), 'zone_pct'))
lg_efg = float(get_lg_metrics(start_date, end_date, season_type, entry.get('zone'), 'efg'))
if lg_zone == 0:
entry['zone_pct_plus'] = 0
else:
entry['zone_pct_plus'] = 100*(entry.get('z_pct')/lg_zone)
if lg_efg == 0:
entry['ZONE_efg_plus'] = 0
else:
entry['ZONE_efg_plus'] = 100*(entry.get('efg')/lg_efg)
if lgALL_efg == 0:
entry['efg_plus'] = 0
else:
entry['efg_plus'] = 100*(entry.get('efg')/lgALL_efg)
zone_paa = entry.get('attempts')*(entry.get('efg')-lg_efg)*2
entry['ZONE_paa'] = zone_paa
entry['ZONE_paa_per_game'] = zone_paa/(entry.get('games'))
paa = entry.get('attempts')*(entry.get('efg')-lgALL_efg)*2
entry['paa'] = paa
entry['paa_per_game'] = paa/(entry.get('games'))
final_data.append(entry)
return final_data
def get_lg_metrics(start_date, end_date, season_type, shot_zone_basic, metric):
q = """SELECT SUM(%s*attempts)/SUM(attempts)
FROM shots_League_Distribution_Year
WHERE season_id IN
(SELECT
DISTINCT season_id
FROM shots s
WHERE game_date >= '%s'
AND game_date <= '%s')
AND shot_zone_basic = '%s'
AND shot_zone_area = 'all'
AND season_type = '%s'
"""
qry = q % (metric, start_date, end_date, shot_zone_basic, season_type)
# raw_input(qry)
lg_val = db.query(qry)[0][0]
if lg_val is None:
return 0
else:
return lg_val
def get_metrics(metrics, zone, target):
for row in metrics:
if row.get('zone').lower() == zone.lower():
return row.get(target)
return 0
def draw_court(ax=None, color='white', lw=2, outer_lines=False):
from matplotlib.patches import Circle, Rectangle, Arc
if ax is None:
ax = plt.gca()
hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False)
backboard = Rectangle((-30, -7.5), 60, -1, linewidth=lw, color=color)
outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color,
fill=False)
inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color,
fill=False)
top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180,
linewidth=lw, color=color, fill=False)
bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color, linestyle='dashed')
restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw,
color=color)
corner_three_a = Rectangle((-220, -50.0), 0, 140, linewidth=lw,
color=color)
corner_three_b = Rectangle((219.75, -50.0), 0, 140, linewidth=lw, color=color)
three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw,
color=color)
center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0,
linewidth=lw, color=color)
center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0,
linewidth=lw, color=color)
court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw,
bottom_free_throw, restricted, corner_three_a,
corner_three_b, three_arc, center_outer_arc,
center_inner_arc]
if outer_lines:
outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw,
color=color, fill=False)
court_elements.append(outer_lines)
for element in court_elements:
ax.add_patch(element)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
return ax
def get_teams_text(ids, start_date, end_date, custom_text, season_type):
if custom_text is None:
team_q = """SELECT
DISTINCT CONCAT(city, ' ', tname)
FROM shots s
JOIN teams t USING (team_id)
WHERE Player_id IN %s
AND game_date >= '%s'
AND game_date <= '%s'
AND LEFT(season_id, 4) >= t.start_year
AND LEFT(season_id, 4) < t.end_year
AND season_type = '%s';
"""
team_qry = team_q % (ids, start_date, end_date, season_type)
teams = db.query(team_qry)
team_list = []
for team in teams:
team_list.append(team[0])
team_text = ""
if len(team_list) == 1:
team_text = str(team_list[0])
else:
i = 0
for team in team_list[0:-1]:
if i%2 == 0 and i > 0:
team_text += '\n'
text_add = '%s, ' % str(team)
team_text += text_add
i += 1
if i%2 == 0:
team_text += '\n'
team_text += str(team_list[-1])
return team_text, len(team_list)
else:
return custom_text, 0
def get_key_text(_type, ids, start_date, end_date, metrics):
text = ''
for zone in ('All', 'Above The Break 3', 'Corner 3', 'Mid-Range', 'In The Paint (Non-RA)', 'Restricted Area'):
if zone == 'All':
text += 'All Shots | '
elif zone == 'Above The Break 3':
text += '\n' + 'Arc 3 | '
elif zone == 'In The Paint (Non-RA)':
text += '\n' + 'Paint(Non-RA) | '
elif zone == 'Restricted Area':
text += '\n' + 'Restricted | '
else:
text += '\n' + zone + ' | '
atts = ("%.0f" % get_metrics(metrics, zone, 'attempts'))
makes = ("%.0f" % get_metrics(metrics, zone, 'makes'))
zone_pct = ("%.1f" % (float(100)*get_metrics(metrics, zone, 'z_pct')))
zone_pct_plus = ("%.1f" % get_metrics(metrics, zone, 'zone_pct_plus'))
efg = ("%.1f" % (float(100)*get_metrics(metrics, zone, 'efg')))
efg_plus = ("%.1f" % get_metrics(metrics, zone, 'efg_plus'))
zone_efg_plus = ("%.1f" % get_metrics(metrics, zone, 'ZONE_efg_plus'))
paa = ("%.1f" % get_metrics(metrics, zone, 'paa'))
paa_game = ("%.1f" % get_metrics(metrics, zone, 'paa_per_game'))
if zone == 'All':
text += str(makes) + ' for ' + str(atts) + ' | '
text += str(efg) + ' EFG% ('
text += str(efg_plus) + ' EFG+ | '
text += str(paa) + ' PAA) | '
text += str(paa_game) + ' PAA/G'
else:
text += str(makes) + '/' + str(atts) + ' | '
text += str(zone_pct) + '% z% (' + str(zone_pct_plus) + ' z%+) | '
text += str(zone_efg_plus) + ' zEFG+ ('
text += str(efg_plus) + ' EFG+ | '
text += str(paa) + ' PAA)'
return text
def get_dates(_type, ids, start_date, end_date, season_type):
q = """SELECT MIN(game_date), MAX(game_date)
FROM shots
WHERE %s_id IN %s
AND game_date >= '%s'
AND game_date <= '%s'
AND season_type = '%s';"""
qry = q % (_type, ids, start_date, end_date, season_type)
dates = db.query(qry)[0]
start_date, end_date = dates
return dates
def acquire_custom_pic(custom_img, offset=(250,370)):
from matplotlib import offsetbox as osb
import urllib
if custom_img is not None:
try:
img_path = os.getcwd()+'/'+custom_img+'.png'
player_pic = plt.imread(img_path)
except IOError:
img_path = os.getcwd()+'/chart_icon.png'
player_pic = plt.imread(img_path)
else:
img_path = os.getcwd()+'/chart_icon.png'
player_pic = plt.imread(img_path)
img = osb.OffsetImage(player_pic)
img = osb.AnnotationBbox(img, offset,xycoords='data',pad=0.0, box_alignment=(1,0), frameon=False)
return img
# def gen_charts():
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument('--_type', default = 'Player')
# parser.add_argument('--_names', default = ["Paul Pierce"])
# parser.add_argument('--season_type', default = 'Post')
# parser.add_argument('--start_date', default = '1996-04-01')
# parser.add_argument('--end_date', default = '2017-10-01')
# parser.add_argument('--custom_title', default = 'Paul Pierce - Career Playoffs')
# parser.add_argument('--custom_text', default = None)
# parser.add_argument('--custom_img', default = None)
# parser.add_argument('--custom_file', default = 'PaulPierce_Playoffs')
# parser.add_argument('--_type', default = 'Player')
# parser.add_argument('--_names', default = ['John Wall', 'DeMar DeRozan', 'Jimmy Butler', 'Draymond Green', 'DeAndre Jordan'])
# parser.add_argument('--season_type', default = 'Reg')
# parser.add_argument('--start_date', default = '2016-06-14')
# parser.add_argument('--end_date', default = date.today())
# parser.add_argument('--custom_title', default = '2016-17 All NBA 3rd Team')
# parser.add_argument('--custom_text', default = 'John Wall\nDeMar DeRozan\nJimmy Butler\nDraymond Green\nDeAndre Jordan')
# parser.add_argument('--custom_img', default = None)
# parser.add_argument('--custom_file', default = 'AllNBA_3_201617')
parser.add_argument('--_type', default = 'Player')
parser.add_argument('--_names', default = ["Drew Gooden"])
parser.add_argument('--season_type', default = 'Reg')
parser.add_argument('--start_date', default = '2010-02-20')
parser.add_argument('--end_date', default = '2010-04-14')
parser.add_argument('--custom_title', default = 'Drew Gooden - 2009/10 Clippers')
parser.add_argument('--custom_text', default = None)
parser.add_argument('--custom_img', default = None)
parser.add_argument('--custom_file', default = 'Drew_Gooden_Custom')
args = parser.parse_args()
initiate(args._type, args._names, args.season_type, args.start_date, args.end_date, args.custom_title, args.custom_text, args.custom_img, args.custom_file)
| mit |
rodluger/planetplanet | scripts/contrast.py | 1 | 9790 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
contrast.py |github|
--------------------
Plots an occultation event in two different limits: the airless limit
and the thick atmosphere limit. The asymmetry of the light curve in the
former case betrays a strong day/night temperature contrast on the occulted
planet.
.. plot::
:align: center
from scripts import contrast
contrast._test()
.. role:: raw-html(raw)
:format: html
.. |github| replace:: :raw-html:`<a href = "https://github.com/rodluger/planetplanet/blob/master/scripts/contrast.py"><i class="fa fa-github" aria-hidden="true"></i></a>`
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
import os, sys
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from planetplanet import Planet, Star, System, DrawEyeball, LimbDarkenedMap, \
RadiativeEquilibriumMap, UniformMap
from planetplanet.constants import *
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
import numpy as np
np.random.seed(1234)
def _test():
'''
'''
plot()
pl.show()
def plot():
'''
'''
# The figure for the paper
fig = pl.figure(figsize = (8, 14))
ax = [pl.subplot2grid((6, 1), (0, 0), colspan = 1, rowspan = 2),
pl.subplot2grid((6, 1), (2, 0), colspan = 1, rowspan = 1),
pl.subplot2grid((6, 1), (3, 0), colspan = 1, rowspan = 2),
pl.subplot2grid((6, 1), (5, 0), colspan = 1, rowspan = 1)]
fig.subplots_adjust(top = 0.8)
# Plot both an airless and a limb-darkened planet
for color, airless, label, dt, df in zip(['b', 'g'], [False, True],
['Thick atmosphere', 'Airless'],
[0, -0.00165],
[1, 1.015 * 1.1728478216578166]):
# Instantiate the star
mstar = 0.0802
rstar = 0.121
teff = (0.000524 * LSUN /
(4 * np.pi * (rstar * RSUN) ** 2 * SBOLTZ)) ** 0.25
star = Star('A', m = mstar, r = rstar, teff = teff, color = 'k')
# Instantiate `c`
RpRs = np.sqrt(0.687 / 100)
r = RpRs * rstar * RSUN / REARTH
c = Planet('c', m = 1.38, per = 2.4218233, inc = 89.67 - 0.05, r = r,
t0 = 0, Omega = 0, w = 0, ecc = 0, color = 'coral',
tnight = 40., albedo = 0.3, phasecurve = False, nz = 31)
# Instantiate `d`
RpRs = np.sqrt(0.367 / 100)
r = RpRs * rstar * RSUN / REARTH
d = Planet('d', m = 0.41, per = 4.049610, inc = 89.75 + 0.16, r = r,
t0 = 0, Omega = 0, w = 0, ecc = 0, color = 'firebrick',
tnight = 40., albedo = 0.3, phasecurve = False)
# Instantiate the system
system = System(star, c, d, distance = 12, oversample = 1)
# There's an occultation of `c` at this time
# This is in the distant past, but since this script is just an
# example, it will do just fine.
time = np.arange(-259.684 + 2 * 0.00025, -259.665, 0.01 * MINUTE)
minutes = (time - np.nanmedian(time)) / MINUTE
# System
system = System(star, c, d)
if airless:
system.c.radiancemap = RadiativeEquilibriumMap()
else:
system.c.radiancemap = LimbDarkenedMap()
system.compute(time, lambda2 = 15)
flux = np.array(c.flux[:,-1]) - np.nanmedian(c.flux[:,-1])
# Stellar baseline
norm = np.nanmedian(star.flux[:,-1])
tmid = len(time) // 2
# Plot the light curves
ax[0].plot(minutes, (norm + flux) / norm, '-',
color = color, label = label)
# Compute the shifted, normalized airless light curve
# I eyeballed the scaled depth and duration to get the best match
# to egress. We can probably do a little better, but it's still going
# to be a ~20 ppm signal in the residuals -- there's no way around the
# asymmetry!
if airless:
fairless1 = (norm + flux) / norm
fairless2 = np.interp(time, time + dt,
1 - df * (1 - (norm + flux) / norm))
fairless3 = np.interp(minutes, 1.5 * minutes, fairless2)
else:
fthickatm = (norm + flux) / norm
# Plot the residuals
ax[1].plot(minutes, 1e6 * (fairless1 - fthickatm), '-',
color = 'k', label = 'Residuals')
# Plot the shifted, normalized airless light curve and the
# thick atmosphere light curve for comparison
ax[2].plot(minutes, fthickatm, '-',
color = 'b', label = 'Thick atmosphere')
ax[2].plot(minutes, fairless3, '-', color = 'g', label = 'Airless')
# Plot the residuals
ax[3].plot(minutes, 1e6 * (fairless3 - fthickatm), '-',
color = 'k', label = 'Residuals')
# Plot the images
x0 = 0.5102
dx = 0.15
px = [x0 - 2 * dx, x0 - dx, x0, x0 + dx, x0 + 2 * dx]
for i, t in enumerate([1333 - 600, 1333 - 300, 1333,
1333 + 300, 1333 + 600]):
# This algebra is from a previous version of the paper
# Turns out it's incorrect for non-edge-on orbits, but
# since TRAPPIST-1 is so close to edge-on, I didn't bother
# to update the math here.
rp = c._r
x0 = c.x_hr[t]
y0 = c.y_hr[t]
z0 = c.z_hr[t]
x = x0 * np.cos(c._Omega) + y0 * np.sin(c._Omega)
y = y0 * np.cos(c._Omega) - x0 * np.sin(c._Omega)
z = z0
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
xprime = c._r * np.cos(c._Phi) * np.sin(c._Lambda)
yprime = c._r * np.sin(c._Phi)
zprime = r - c._r * np.cos(c._Phi) * np.cos(c._Lambda)
rxz = np.sqrt(x ** 2 + z ** 2)
xstar = ((z * r) * xprime - (x * y) * yprime
+ (x * rxz) * zprime) / (r * rxz)
ystar = (rxz * yprime + y * zprime) / r
zstar = (-(x * r) * xprime - (y * z) * yprime
+ (z * rxz) * zprime) / (r * rxz)
xstar, ystar = xstar * np.cos(c._Omega) - ystar * np.sin(c._Omega), \
ystar * np.cos(c._Omega) + xstar * np.sin(c._Omega)
x = x0
y = y0
dist = np.sqrt((xstar - x) ** 2 + (ystar - y) ** 2)
gamma = np.arctan2(ystar - y, xstar - x) + np.pi
if (zstar - z) <= 0:
theta = np.arccos(dist / c._r)
else:
theta = -np.arccos(dist / c._r)
occ_dict = [dict(x = (d.x_hr[t] - x0) / c._r,
y = (d.y_hr[t] - y0) / c._r,
r = d._r / c._r, zorder = i + 1, alpha = 1)]
DrawEyeball(px[i], 0.85, 0.025, theta = theta, nz = 31, gamma = gamma,
draw_ellipses = False, radiancemap = c.radiancemap,
occultors = occ_dict, cmap = 'inferno',
fig = fig, rasterize = True)
# Arrows
ax[0].annotate("", xy = (minutes[1333 - 600], 1.000008),
xycoords = "data", xytext = (-80, 40),
textcoords = "offset points",
clip_on = False,
arrowprops = dict(arrowstyle = '-', alpha = 0.5, lw = 1))
ax[0].annotate("", xy = (minutes[1333 - 300], 1.000008),
xycoords = "data", xytext = (-40, 40),
textcoords = "offset points",
clip_on = False,
arrowprops = dict(arrowstyle = '-', alpha = 0.5, lw = 1))
ax[0].annotate("", xy = (minutes[1333], 1.000008),
xycoords = "data", xytext = (0, 40),
textcoords = "offset points",
clip_on = False,
arrowprops = dict(arrowstyle = '-', alpha = 0.5, lw = 1))
ax[0].annotate("", xy = (minutes[1333 + 300], 1.000008),
xycoords = "data", xytext = (40, 40),
textcoords = "offset points",
clip_on = False,
arrowprops = dict(arrowstyle = '-', alpha = 0.5, lw = 1))
ax[0].annotate("", xy = (minutes[1333 + 600], 1.000008),
xycoords = "data", xytext = (80, 40),
textcoords = "offset points",
clip_on = False,
arrowprops = dict(arrowstyle = '-', alpha = 0.5, lw = 1))
# Appeareance
ax[0].legend(loc = 'lower left', fontsize = 10, frameon = False)
for i, axis in enumerate(ax):
axis.get_yaxis().set_major_locator(MaxNLocator(4))
axis.get_xaxis().set_major_locator(MaxNLocator(8))
for tick in axis.get_xticklabels():
tick.set_fontsize(12)
for tick in axis.get_yticklabels():
tick.set_fontsize(12)
axis.ticklabel_format(useOffset = False)
if i < 3:
axis.set_xticklabels([])
ax[0].set_ylabel(r'Flux', fontweight = 'bold', fontsize = 16)
ax[1].set_ylabel(r'$\Delta$ [ppm]', fontweight = 'bold',
fontsize = 16, labelpad = 28)
ax[2].set_ylabel(r'Scaled Flux', fontweight = 'bold', fontsize = 16)
ax[3].set_ylabel(r'$\Delta$ [ppm]', fontweight = 'bold',
fontsize = 16, labelpad = 28)
ax[0].margins(None, 0.1)
ax[1].set_ylim(-45,91)
ax[2].margins(None, 0.1)
ax[3].set_ylim(-15, 15)
ax[3].set_yticks([-10,0,10])
ax[-1].set_xlabel('Time [minutes]', fontweight = 'bold',
fontsize = 16, labelpad = 15)
return fig, ax
if __name__ == '__main__':
fig, ax = plot()
fig.savefig("contrast.pdf", bbox_inches = 'tight', dpi = 600) | gpl-3.0 |
wazeerzulfikar/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 112 | 1819 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
1kastner/analyse_weather_data | interpolation/interpolate_experimental.py | 1 | 7078 | """
Run demo with
python3 -m interpolation.interpolate interpolate.py
"""
import datetime
import random
import logging
import itertools
import os
import numpy
import pandas
from filter_weather_data.filters import StationRepository
from filter_weather_data import get_repository_parameters
from filter_weather_data import RepositoryParameter
from interpolation.interpolator.nearest_k_finder import NearestKFinder
from interpolation.interpolator.statistical_interpolator_experimental import get_interpolation_results
class Scorer:
def __init__(self, target_station_dict, neighbour_station_dicts, start_date, end_date):
self.target_station_dict = target_station_dict
self.nearest_k_finder = NearestKFinder(neighbour_station_dicts, start_date, end_date)
def score_all_neighbours(self, date, t_actual):
relevant_neighbours = self.nearest_k_finder.find_k_nearest_neighbours(self.target_station_dict, date, -1)
return get_interpolation_results(relevant_neighbours, t_actual, "_all")
def score_interpolation_algorithm_at_date(scorer, date):
t_actual = scorer.target_station_dict["data_frame"].loc[date].temperature
results = {}
results.update(scorer.score_all_neighbours(date, t_actual))
return results
def setup_logging(interpolation_name):
log = logging.getLogger('')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
#console_handler = logging.StreamHandler(sys.stdout)
#console_handler.setFormatter(formatter)
#log.addHandler(console_handler)
file_name = "interpolation_{date}_{interpolation_name}.log".format(
interpolation_name=interpolation_name,
date=datetime.datetime.now().isoformat().replace(":", "-").replace(".", "-")
)
path_to_file_to_log_to = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"log",
file_name
)
file_handler = logging.FileHandler(path_to_file_to_log_to)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.propagate = False
log.info("### Start new logging")
return log
def do_interpolation_scoring(
target_station_dict,
j,
target_station_dicts_len,
neighbour_station_dicts,
start_date,
end_date
):
target_station_name = target_station_dict["name"]
logging.info("interpolate for " + target_station_name)
logging.info("currently at " + str(j + 1) + " out of " + target_station_dicts_len)
logging.info("use " + " ".join([station_dict["name"] for station_dict in neighbour_station_dicts]))
scorer = Scorer(target_station_dict, neighbour_station_dicts, start_date, end_date)
scorer.nearest_k_finder.sample_up(target_station_dict, start_date, end_date)
sum_square_errors = {}
total_len = len(target_station_dict["data_frame"].index.values)
each_minute = target_station_dict["data_frame"].index.values
grouped_by_half_day = numpy.array_split(each_minute, total_len / 720) # 12h
each_half_day = [numpy.random.choice(hour_group) for hour_group in grouped_by_half_day]
for current_i, date in enumerate(each_half_day):
result = score_interpolation_algorithm_at_date(scorer, date)
for method, square_error in result.items():
if method not in sum_square_errors:
sum_square_errors[method] = {}
sum_square_errors[method]["total"] = 0
sum_square_errors[method]["n"] = 0
if not numpy.isnan(square_error):
sum_square_errors[method]["total"] += square_error
sum_square_errors[method]["n"] += 1
method_and_result = list(sum_square_errors.items())
method_and_result.sort(key=lambda x: x[0])
for method, result in method_and_result:
if sum_square_errors[method]["n"] > 0:
method_rmse = numpy.sqrt(sum_square_errors[method]["total"] / sum_square_errors[method]["n"])
else:
method_rmse = numpy.nan
sum_square_errors[method]["rmse"] = method_rmse
score_str = "%.3f" % method_rmse
logging.info(method + " " * (12 - len(method)) + score_str + " n=" + str(sum_square_errors[method]["n"]))
logging.info("end method list")
data_dict = {}
for method in sum_square_errors.keys():
data_dict[method + "--rmse"] = [sum_square_errors[method]["rmse"]]
data_dict[method + "--n"] = [sum_square_errors[method]["n"]]
data_dict[method + "--total"] = [sum_square_errors[method]["total"]]
return pandas.DataFrame(data=data_dict)
def score_algorithm(start_date, end_date, repository_parameters, limit=0, interpolation_name="NONE"):
station_repository = StationRepository(*repository_parameters)
station_dicts = station_repository.load_all_stations(start_date, end_date, limit=limit)
# separate in two sets
random.shuffle(station_dicts)
separator = int(.3 * len(station_dicts)) # 70% vs 30%
target_station_dicts, neighbour_station_dicts = station_dicts[:separator], station_dicts[separator:]
setup_logging(interpolation_name)
logging.info("General Overview")
logging.info("targets: " + " ".join([station_dict["name"] for station_dict in target_station_dicts]))
logging.info("neighbours: " + " ".join([station_dict["name"] for station_dict in neighbour_station_dicts]))
logging.info("End overview")
logging.info("Several Runs")
target_station_dicts_len = str(len(target_station_dicts))
overall_result = itertools.starmap(do_interpolation_scoring, [
[
target_station_dict,
j,
target_station_dicts_len,
neighbour_station_dicts,
start_date,
end_date
] for j, target_station_dict in enumerate(target_station_dicts)
])
logging.info("end targets")
logging.info("overall results")
overall_result_df = pandas.concat(overall_result)
column_names = overall_result_df.columns.values.tolist()
methods = set()
for column_name in column_names:
method, value = column_name.split("--")
methods.update([method])
for method in methods:
overall_total = numpy.nansum(overall_result_df[method + "--total"])
overall_n = int(numpy.nansum(overall_result_df[method + "--n"]))
overall_rmse = numpy.sqrt(overall_total / overall_n)
score_str = "%.5f" % overall_rmse
logging.info(method + " " * (12 - len(method)) + score_str + " n=" + str(overall_n))
overall_result_df.to_csv("interpolation_result_{date}_{interpolation_name}.csv".format(
date=datetime.datetime.now().isoformat().replace(":", "-").replace(".", "-"),
interpolation_name=interpolation_name
))
def demo():
start_date = "2016-01-31"
end_date = "2016-02-01"
repository_parameters = get_repository_parameters(RepositoryParameter.ONLY_OUTDOOR_AND_SHADED)
score_algorithm(start_date, end_date, repository_parameters, limit=60, interpolation_name="test")
if __name__ == "__main__":
demo()
| agpl-3.0 |
potash/scikit-learn | examples/hetero_feature_union.py | 81 | 6241 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to scikit-learn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this example faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
Adai0808/BuildingMachineLearningSystemsWithPython | ch06/04_sent.py | 22 | 10125 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains tries to tweak hyperparameters to improve P/R AUC
#
import time
start_time = time.time()
import re
import nltk
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from utils import log_false_positives
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.base import BaseEstimator
from sklearn.naive_bayes import MultinomialNB
from utils import load_sent_word_net
sent_word_net = load_sent_word_net()
phase = "04"
import json
poscache_filename = "poscache.json"
try:
poscache = json.load(open(poscache_filename, "r"))
except IOError:
poscache = {}
class LinguisticVectorizer(BaseEstimator):
def get_feature_names(self):
return np.array(['sent_neut', 'sent_pos', 'sent_neg',
'nouns', 'adjectives', 'verbs', 'adverbs',
'allcaps', 'exclamation', 'question'])
def fit(self, documents, y=None):
return self
def _get_sentiments(self, d):
# http://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
sent = tuple(nltk.word_tokenize(d))
if poscache is not None:
if d in poscache:
tagged = poscache[d]
else:
poscache[d] = tagged = nltk.pos_tag(sent)
else:
tagged = nltk.pos_tag(sent)
pos_vals = []
neg_vals = []
nouns = 0.
adjectives = 0.
verbs = 0.
adverbs = 0.
for w, t in tagged:
p, n = 0, 0
sent_pos_type = None
if t.startswith("NN"):
sent_pos_type = "n"
nouns += 1
elif t.startswith("JJ"):
sent_pos_type = "a"
adjectives += 1
elif t.startswith("VB"):
sent_pos_type = "v"
verbs += 1
elif t.startswith("RB"):
sent_pos_type = "r"
adverbs += 1
if sent_pos_type is not None:
sent_word = "%s/%s" % (sent_pos_type, w)
if sent_word in sent_word_net:
p, n = sent_word_net[sent_word]
pos_vals.append(p)
neg_vals.append(n)
l = len(sent)
avg_pos_val = np.mean(pos_vals)
avg_neg_val = np.mean(neg_vals)
return [1 - avg_pos_val - avg_neg_val, avg_pos_val, avg_neg_val,
nouns / l, adjectives / l, verbs / l, adverbs / l]
def transform(self, documents):
obj_val, pos_val, neg_val, nouns, adjectives, verbs, adverbs = np.array(
[self._get_sentiments(d) for d in documents]).T
allcaps = []
exclamation = []
question = []
for d in documents:
allcaps.append(
np.sum([t.isupper() for t in d.split() if len(t) > 2]))
exclamation.append(d.count("!"))
question.append(d.count("?"))
result = np.array(
[obj_val, pos_val, neg_val, nouns, adjectives, verbs, adverbs, allcaps,
exclamation, question]).T
return result
emo_repl = {
# positive emoticons
"<3": " good ",
":d": " good ", # :D in lower case
":dd": " good ", # :DD in lower case
"8)": " good ",
":-)": " good ",
":)": " good ",
";)": " good ",
"(-:": " good ",
"(:": " good ",
# negative emoticons:
":/": " bad ",
":>": " sad ",
":')": " sad ",
":-(": " bad ",
":(": " bad ",
":S": " bad ",
":-S": " bad ",
}
emo_repl_order = [k for (k_len, k) in reversed(
sorted([(len(k), k) for k in list(emo_repl.keys())]))]
re_repl = {
r"\br\b": "are",
r"\bu\b": "you",
r"\bhaha\b": "ha",
r"\bhahaha\b": "ha",
r"\bdon't\b": "do not",
r"\bdoesn't\b": "does not",
r"\bdidn't\b": "did not",
r"\bhasn't\b": "has not",
r"\bhaven't\b": "have not",
r"\bhadn't\b": "had not",
r"\bwon't\b": "will not",
r"\bwouldn't\b": "would not",
r"\bcan't\b": "can not",
r"\bcannot\b": "can not",
}
def create_union_model(params=None):
def preprocessor(tweet):
tweet = tweet.lower()
for k in emo_repl_order:
tweet = tweet.replace(k, emo_repl[k])
for r, repl in re_repl.items():
tweet = re.sub(r, repl, tweet)
return tweet.replace("-", " ").replace("_", " ")
tfidf_ngrams = TfidfVectorizer(preprocessor=preprocessor,
analyzer="word")
ling_stats = LinguisticVectorizer()
all_features = FeatureUnion(
[('ling', ling_stats), ('tfidf', tfidf_ngrams)])
#all_features = FeatureUnion([('tfidf', tfidf_ngrams)])
#all_features = FeatureUnion([('ling', ling_stats)])
clf = MultinomialNB()
pipeline = Pipeline([('all', all_features), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def __grid_search_model(clf_factory, X, Y):
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
param_grid = dict(vect__ngram_range=[(1, 1), (1, 2), (1, 3)],
vect__min_df=[1, 2],
vect__smooth_idf=[False, True],
vect__use_idf=[False, True],
vect__sublinear_tf=[False, True],
vect__binary=[False, True],
clf__alpha=[0, 0.01, 0.05, 0.1, 0.5, 1],
)
grid_search = GridSearchCV(clf_factory(),
param_grid=param_grid,
cv=cv,
score_func=f1_score,
verbose=10)
grid_search.fit(X, Y)
clf = grid_search.best_estimator_
print(clf)
return clf
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
clfs = [] # just to later get the median
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
log_false_positives(clfs[median], X_test, y_test, name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in range(len(X_wrong)):
print("clf.predict('%s')=%i instead of %i" %
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx]))
def get_best_model():
best_params = dict(all__tfidf__ngram_range=(1, 2),
all__tfidf__min_df=1,
all__tfidf__stop_words=None,
all__tfidf__smooth_idf=False,
all__tfidf__use_idf=False,
all__tfidf__sublinear_tf=True,
all__tfidf__binary=False,
clf__alpha=0.01,
)
best_clf = create_union_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
#from sklearn.utils import shuffle
# print "shuffle, sample"
#X_orig, Y_orig = shuffle(X_orig, Y_orig)
#X_orig = X_orig[:100,]
#Y_orig = Y_orig[:100,]
classes = np.unique(Y_orig)
for c in classes:
print("#%s: %i" % (c, sum(Y_orig == c)))
print("== Pos vs. neg ==")
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos/neg vs. irrelevant/neutral ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_union_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos+neg vs rest", plot=True)
print("== Pos vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print("== Neg vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print("time spent:", time.time() - start_time)
json.dump(poscache, open(poscache_filename, "w"))
| mit |
Ziqi-Li/bknqgis | pandas/pandas/tests/frame/test_join.py | 11 | 5226 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas import DataFrame, Index, PeriodIndex
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
@pytest.fixture
def frame_with_period_index():
return DataFrame(
data=np.arange(20).reshape(4, 5),
columns=list('abcde'),
index=PeriodIndex(start='2000', freq='A', periods=4))
@pytest.fixture
def frame():
return TestData().frame
@pytest.fixture
def left():
return DataFrame({'a': [20, 10, 0]}, index=[2, 1, 0])
@pytest.fixture
def right():
return DataFrame({'b': [300, 100, 200]}, index=[3, 1, 2])
@pytest.mark.parametrize(
"how, sort, expected",
[('inner', False, DataFrame({'a': [20, 10],
'b': [200, 100]},
index=[2, 1])),
('inner', True, DataFrame({'a': [10, 20],
'b': [100, 200]},
index=[1, 2])),
('left', False, DataFrame({'a': [20, 10, 0],
'b': [200, 100, np.nan]},
index=[2, 1, 0])),
('left', True, DataFrame({'a': [0, 10, 20],
'b': [np.nan, 100, 200]},
index=[0, 1, 2])),
('right', False, DataFrame({'a': [np.nan, 10, 20],
'b': [300, 100, 200]},
index=[3, 1, 2])),
('right', True, DataFrame({'a': [10, 20, np.nan],
'b': [100, 200, 300]},
index=[1, 2, 3])),
('outer', False, DataFrame({'a': [0, 10, 20, np.nan],
'b': [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3])),
('outer', True, DataFrame({'a': [0, 10, 20, np.nan],
'b': [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3]))])
def test_join(left, right, how, sort, expected):
result = left.join(right, how=how, sort=sort)
tm.assert_frame_equal(result, expected)
def test_join_index(frame):
# left / right
f = frame.loc[frame.index[:10], ['A', 'B']]
f2 = frame.loc[frame.index[5:], ['C', 'D']].iloc[::-1]
joined = f.join(f2)
tm.assert_index_equal(f.index, joined.index)
expected_columns = Index(['A', 'B', 'C', 'D'])
tm.assert_index_equal(joined.columns, expected_columns)
joined = f.join(f2, how='left')
tm.assert_index_equal(joined.index, f.index)
tm.assert_index_equal(joined.columns, expected_columns)
joined = f.join(f2, how='right')
tm.assert_index_equal(joined.index, f2.index)
tm.assert_index_equal(joined.columns, expected_columns)
# inner
joined = f.join(f2, how='inner')
tm.assert_index_equal(joined.index, f.index[5:10])
tm.assert_index_equal(joined.columns, expected_columns)
# outer
joined = f.join(f2, how='outer')
tm.assert_index_equal(joined.index, frame.index.sort_values())
tm.assert_index_equal(joined.columns, expected_columns)
tm.assert_raises_regex(
ValueError, 'join method', f.join, f2, how='foo')
# corner case - overlapping columns
for how in ('outer', 'left', 'inner'):
with tm.assert_raises_regex(ValueError, 'columns overlap but '
'no suffix'):
frame.join(frame, how=how)
def test_join_index_more(frame):
af = frame.loc[:, ['A', 'B']]
bf = frame.loc[::2, ['C', 'D']]
expected = af.copy()
expected['C'] = frame['C'][::2]
expected['D'] = frame['D'][::2]
result = af.join(bf)
tm.assert_frame_equal(result, expected)
result = af.join(bf, how='right')
tm.assert_frame_equal(result, expected[::2])
result = bf.join(af, how='right')
tm.assert_frame_equal(result, expected.loc[:, result.columns])
def test_join_index_series(frame):
df = frame.copy()
s = df.pop(frame.columns[-1])
joined = df.join(s)
# TODO should this check_names ?
tm.assert_frame_equal(joined, frame, check_names=False)
s.name = None
tm.assert_raises_regex(ValueError, 'must have a name', df.join, s)
def test_join_overlap(frame):
df1 = frame.loc[:, ['A', 'B', 'C']]
df2 = frame.loc[:, ['B', 'C', 'D']]
joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')
df1_suf = df1.loc[:, ['B', 'C']].add_suffix('_df1')
df2_suf = df2.loc[:, ['B', 'C']].add_suffix('_df2')
no_overlap = frame.loc[:, ['A', 'D']]
expected = df1_suf.join(df2_suf).join(no_overlap)
# column order not necessarily sorted
tm.assert_frame_equal(joined, expected.loc[:, joined.columns])
def test_join_period_index(frame_with_period_index):
other = frame_with_period_index.rename(
columns=lambda x: '{key}{key}'.format(key=x))
joined_values = np.concatenate(
[frame_with_period_index.values] * 2, axis=1)
joined_cols = frame_with_period_index.columns.append(other.columns)
joined = frame_with_period_index.join(other)
expected = DataFrame(
data=joined_values,
columns=joined_cols,
index=frame_with_period_index.index)
tm.assert_frame_equal(joined, expected)
| gpl-2.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_cont/padova_cont_0/Optical1.py | 33 | 7366 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [36, #NE 3 3343A
38, #BA C
39, #3646
40, #3726
41, #3727
42, #3729
43, #3869
44, #3889
45, #3933
46, #4026
47, #4070
48, #4074
49, #4078
50, #4102
51, #4340
52] #4363
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Optical Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('optical_lines.pdf')
plt.clf()
| gpl-2.0 |
nikken1/patentprocessor | integrate.py | 5 | 12535 | #!/usr/bin/env python
"""
Copyright (c) 2013 The Regents of the University of California, AMERICAN INSTITUTES FOR RESEARCH
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Gabe Fierro [email protected] github.com/gtfierro
"""
"""
Takes in a CSV file that represents the output of the disambiguation engine:
Patent Number, Firstname, Lastname, Unique_Inventor_ID
Groups by Unique_Inventor_ID and then inserts them into the Inventor table using
lib.alchemy.match
"""
import sys
import lib.alchemy as alchemy
from lib.util.csv_reader import read_file
from lib.alchemy import is_mysql
from lib.alchemy.schema import Inventor, RawInventor, patentinventor, App_Inventor, App_RawInventor, applicationinventor
from lib.handlers.xml_util import normalize_document_identifier
from collections import defaultdict
import cPickle as pickle
import linecache
from datetime import datetime
import pandas as pd
from collections import defaultdict, Counter
from lib.tasks import bulk_commit_inserts, bulk_commit_updates
from unidecode import unidecode
from datetime import datetime
def integrate(disambig_input_file, disambig_output_file):
"""
We have two files: the input to the disambiguator:
uuid, first name, middle name, last name, patent, mainclass, subclass, city, state, country, rawassignee, disambiguated assignee
And the output of the disambiguator:
uuid, unique inventor id
The files will line up line by line, so we can easily get the collection of raw
records that map to a single disambiguated record (D_REC). For each of the raw records
for a given disambiguated id (D_ID), we want to vote the most frequent values for
each of the columns, and use those to populate the D_REC.
just have to populate the fields of the disambiguated inventor object:
inventor id, first name, last name, nationality (?)
"""
disambig_input = pd.read_csv(disambig_input_file,header=None,delimiter='\t',encoding='utf-8')
disambig_output = pd.read_csv(disambig_output_file,header=None,delimiter='\t',encoding='utf-8')
disambig_input[0] = disambig_input[0].apply(str)
disambig_output[0] = disambig_output[0].apply(str)
print 'finished loading csvs'
merged = pd.merge(disambig_input, disambig_output, on=0)
merged.columns = ['rawinventor_uuid','isgrant','granted','name_first','name_middle','name_last','patent_id','mainclass','subclass','city','state','country','assignee','rawassignee','prev_inventorid','current_inventorid']
print 'finished merging'
apps = merged[merged['isgrant'] == 0]
inventor_attributes = merged[['isgrant','rawinventor_uuid','current_inventorid','name_first','name_middle','name_last','patent_id']] # rawinventor uuid, inventor id, first name, middle name, last name, patent_id
inventor_attributes = inventor_attributes.dropna(subset=['rawinventor_uuid'],how='all')
inventor_attributes['name_first'] = inventor_attributes['name_first'].fillna('')
inventor_attributes['name_middle'] = inventor_attributes['name_middle'].fillna('')
inventor_attributes['name_last'] = inventor_attributes['name_last'].fillna('')
grants = inventor_attributes[inventor_attributes['isgrant'] == 1]
apps = inventor_attributes[inventor_attributes['isgrant'] == 0]
del grants['isgrant']
del apps['isgrant']
####### DO GRANTS #######
rawinventors = defaultdict(list)
inventor_inserts = []
rawinventor_updates = []
patentinventor_inserts = []
for row in grants.iterrows():
uuid = row[1]['current_inventorid']
rawinventors[uuid].append(row[1])
patentinventor_inserts.append({'inventor_id': uuid, 'patent_id': row[1]['patent_id']})
print 'finished associating ids'
i = 0
for inventor_id in rawinventors.iterkeys():
i += 1
freq = defaultdict(Counter)
param = {}
rawuuids = []
names = []
for raw in rawinventors[inventor_id]:
rawuuids.append(raw[0])
name = ' '.join(x for x in (raw['name_first'], raw['name_middle'], raw['name_last']) if x)
freq['name'][name] += 1
for k,v in raw.iteritems():
freq[k][v] += 1
param['id'] = inventor_id
name = freq['name'].most_common(1)[0][0]
name_first = unidecode(' '.join(name.split(' ')[:-1]))
name_last = unidecode(name.split(' ')[-1])
param['name_first'] = name_first
param['name_last'] = name_last
param['nationality'] = ''
assert set(param.keys()) == {'id','name_first','name_last','nationality'}
inventor_inserts.append(param)
for rawuuid in rawuuids:
rawinventor_updates.append({'pk': rawuuid, 'update': param['id']})
if i % 100000 == 0:
print i, datetime.now(), rawuuids[0]
print 'finished voting'
session_generator = alchemy.session_generator(dbtype='grant')
session = session_generator()
if alchemy.is_mysql():
session.execute('truncate inventor; truncate patent_inventor;')
else:
session.execute('delete from inventor; delete from patent_inventor;')
from lib.tasks import bulk_commit_inserts, bulk_commit_updates
bulk_commit_inserts(inventor_inserts, Inventor.__table__, is_mysql(), 20000,'grant')
bulk_commit_inserts(patentinventor_inserts, patentinventor, is_mysql(), 20000,'grant')
bulk_commit_updates('inventor_id', rawinventor_updates, RawInventor.__table__, is_mysql(), 20000,'grant')
###### DO APPLICATIONS ######
rawinventors = defaultdict(list)
inventor_inserts = []
rawinventor_updates = []
patentinventor_inserts = []
for row in apps.iterrows():
uuid = row[1]['current_inventorid']
rawinventors[uuid].append(row[1])
patentinventor_inserts.append({'inventor_id': uuid, 'patent_id': row[1]['patent_id']})
print 'finished associating ids'
i = 0
for inventor_id in rawinventors.iterkeys():
i += 1
freq = defaultdict(Counter)
param = {}
rawuuids = []
names = []
for raw in rawinventors[inventor_id]:
rawuuids.append(raw[0])
name = ' '.join(x for x in (raw['name_first'], raw['name_middle'], raw['name_last']) if x)
freq['name'][name] += 1
for k,v in raw.iteritems():
freq[k][v] += 1
param['id'] = inventor_id
name = freq['name'].most_common(1)[0][0]
name_first = unidecode(' '.join(name.split(' ')[:-1]))
name_last = unidecode(name.split(' ')[-1])
param['name_first'] = name_first
param['name_last'] = name_last
param['nationality'] = ''
assert set(param.keys()) == {'id','name_first','name_last','nationality'}
inventor_inserts.append(param)
for rawuuid in rawuuids:
rawinventor_updates.append({'pk': rawuuid, 'update': param['id']})
if i % 100000 == 0:
print i, datetime.now(), rawuuids[0]
print 'finished voting'
session_generator = alchemy.session_generator(dbtype='application')
session = session_generator()
if alchemy.is_mysql():
session.execute('truncate inventor; truncate application_inventor;')
else:
session.execute('delete from inventor; delete from application_inventor;')
from lib.tasks import bulk_commit_inserts, bulk_commit_updates
bulk_commit_inserts(inventor_inserts, App_Inventor.__table__, is_mysql(), 20000,'application')
bulk_commit_inserts(patentinventor_inserts, applicationinventor, is_mysql(), 20000,'application')
bulk_commit_updates('inventor_id', rawinventor_updates, App_RawInventor.__table__, is_mysql(), 20000,'application')
session_generator = alchemy.session_generator(dbtype='grant')
session = session_generator()
doctype = 'grant'
session.execute('truncate location_assignee;')
res = session.execute('select location_id, assignee_id from patent \
left join rawassignee on rawassignee.patent_id = patent.id \
left join rawlocation on rawlocation.id = rawassignee.rawlocation_id \
where assignee_id != "" and location_id != "";')
assigneelocation = pd.DataFrame.from_records(res.fetchall())
assigneelocation.columns = ['location_id','assignee_id']
assigneelocation = assigneelocation.sort('assignee_id')
print assigneelocation.info()
locationassignee_inserts = [row[1].to_dict() for row in assigneelocation.iterrows()]
bulk_commit_inserts(locationassignee_inserts, alchemy.schema.locationassignee, alchemy.is_mysql(), 20000, 'grant')
session.execute('truncate location_inventor;')
res = session.execute('select location_id, inventor_id from patent \
left join rawinventor on rawinventor.patent_id = patent.id \
left join rawlocation on rawlocation.id = rawinventor.rawlocation_id \
where inventor_id != "" and location_id != "";')
inventorlocation = pd.DataFrame.from_records(res.fetchall())
inventorlocation.columns = ['location_id','inventor_id']
inventorlocation = inventorlocation.sort('inventor_id')
print inventorlocation.info()
locationinventor_inserts = [row[1].to_dict() for row in inventorlocation.iterrows()]
bulk_commit_inserts(locationinventor_inserts, alchemy.schema.locationinventor, alchemy.is_mysql(), 20000, 'grant')
doctype = 'application'
session_generator = alchemy.session_generator(dbtype='application')
session = session_generator()
session.execute('truncate location_assignee;')
res = session.execute('select location_id, assignee_id from application \
left join rawassignee on rawassignee.application_id = application.id \
left join rawlocation on rawlocation.id = rawassignee.rawlocation_id \
where assignee_id != "" and location_id != "";')
assigneelocation = pd.DataFrame.from_records(res.fetchall())
assigneelocation.columns = ['location_id','assignee_id']
assigneelocation = assigneelocation.sort('assignee_id')
print assigneelocation.info()
locationassignee_inserts = [row[1].to_dict() for row in assigneelocation.iterrows()]
bulk_commit_inserts(locationassignee_inserts, alchemy.schema.app_locationassignee, alchemy.is_mysql(), 20000, 'application')
session.execute('truncate location_inventor;')
res = session.execute('select location_id, inventor_id from application \
left join rawinventor on rawinventor.application_id = application.id \
left join rawlocation on rawlocation.id = rawinventor.rawlocation_id \
where inventor_id != "" and location_id != "";')
inventorlocation = pd.DataFrame.from_records(res.fetchall())
inventorlocation.columns = ['location_id','inventor_id']
inventorlocation = inventorlocation.sort('inventor_id')
print inventorlocation.info()
locationinventor_inserts = [row[1].to_dict() for row in inventorlocation.iterrows()]
bulk_commit_inserts(locationinventor_inserts, alchemy.schema.app_locationinventor, alchemy.is_mysql(), 20000, 'application')
def main():
if len(sys.argv) <= 2:
print 'USAGE: python integrate.py <disambig input file> <disambig output file>'
sys.exit()
dis_in = sys.argv[1]
dis_out = sys.argv[2]
integrate(dis_in,dis_out)
if __name__ == '__main__':
main()
| bsd-2-clause |
mehdidc/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
BhallaLab/moose | moose-examples/tutorials/ExcInhNet/ExcInhNet_Ostojic2014_Brunel2000_brian2.py | 2 | 8226 | '''
The LIF network is based on:
Ostojic, S. (2014).
Two types of asynchronous activity in networks of
excitatory and inhibitory spiking neurons.
Nat Neurosci 17, 594-600.
Key parameter to change is synaptic coupling J (mV).
Tested with Brian 1.4.1
Written by Aditya Gilra, CAMP 2014, Bangalore, 20 June, 2014.
Updated to match MOOSE implementation by Aditya Gilra, Jan, 2015.
Currently, simtime and dt are modified to compare across MOOSE, Brian1 and Brian2.
'''
#import modules and functions to be used
# 'from pylab import *' which imports:
# matplot like commands into the namespace, further
# also can use np. for numpy and mpl. for matplotlib
try:
from brian2 import * # importing brian also does:
except ImportError as e:
print( "[INFO ] brian2 is not found." )
quit()
#prefs.codegen.target='numpy'
#prefs.codegen.target='weave'
set_device('cpp_standalone')
import random
import time
np.random.seed(100) # set seed for reproducibility of simulations
random.seed(100) # set seed for reproducibility of simulations
# ###########################################
# Simulation parameters
# ###########################################
simdt = 0.01*ms
simtime = 10.0*second # Simulation time
defaultclock.dt = simdt # Brian's default sim time step
dt = defaultclock.dt/second # convert to value in seconds
# ###########################################
# Neuron model
# ###########################################
# equation: dv/dt=(1/taum)*(-(v-el))
# with spike when v>vt, reset to vr
el = -65.*mV # Resting potential
vt = -45.*mV # Spiking threshold
taum = 20.*ms # Membrane time constant
vr = -55.*mV # Reset potential
inp = 20.1*mV/taum # input I/C to each neuron
# same as setting el=-41 mV and inp=0
taur = 0.5*ms # Refractory period
taudelay = 0.5*ms + dt*second # synaptic delay
eqs_neurons='''
dv/dt=(1/taum)*(-(v-el))+inp : volt
'''
# ###########################################
# Network parameters: numbers
# ###########################################
N = 1000 # Total number of neurons
fexc = 0.8 # Fraction of exc neurons
NE = int(fexc*N) # Number of excitatory cells
NI = N-NE # Number of inhibitory cells
# ###########################################
# Network parameters: synapses
# ###########################################
C = 100 # Number of incoming connections on each neuron (exc or inh)
fC = fexc # fraction fC incoming connections are exc, rest inhibitory
excC = int(fC*C) # number of exc incoming connections
J = 0.8*mV # exc strength is J (in mV as we add to voltage)
# Critical J is ~ 0.45 mV in paper for N = 1000, C = 1000
g = 5.0 # -gJ is the inh strength. For exc-inh balance g>~f(1-f)=4
# ###########################################
# Initialize neuron (sub)groups
# ###########################################
P=NeuronGroup(N,model=eqs_neurons,\
threshold='v>=vt',reset='v=vr',refractory=taur,method='euler')
# not distributing uniformly to ensure match with MOOSE
#Pe.v = uniform(el,vt+10*mV,NE)
#Pi.v = uniform(el,vt+10*mV,NI)
P.v = linspace(el/mV-20,vt/mV,N)*mV
# ###########################################
# Connecting the network
# ###########################################
sparseness_e = fC*C/float(NE)
sparseness_i = (1-fC)*C/float(NI)
# Follow Dale's law -- exc (inh) neurons only have +ve (-ve) synapses
# hence need to set w correctly (always set after creating connections
con = Synapses(P,P,'w:volt',pre='v_post+=w',method='euler')
# I don't use Brian's connect_random,
# instead I use the same algorithm and seed as in the MOOSE version
#con_e.connect_random(sparseness=sparseness_e)
#con_i.connect_random(sparseness=sparseness_i)
## Connections from some Exc/Inh neurons to each neuron
random.seed(100) # set seed for reproducibility of simulations
conn_i = []
conn_j = []
for j in range(0,N):
## draw excC number of neuron indices out of NmaxExc neurons
preIdxsE = random.sample(list(range(NE)),excC)
## draw inhC=C-excC number of neuron indices out of inhibitory neurons
preIdxsI = random.sample(list(range(NE,N)),C-excC)
## connect these presynaptically to i-th post-synaptic neuron
## choose the synapses object based on whether post-syn nrn is exc or inh
conn_i += preIdxsE
conn_j += [j]*excC
conn_i += preIdxsI
conn_j += [j]*(C-excC)
con.connect(conn_i,conn_j)
con.delay = taudelay
con.w['i<NE'] = J
con.w['i>=NE'] = -g*J
# ###########################################
# Setting up monitors
# ###########################################
Nmon = N
sm = SpikeMonitor(P)
# Population monitor
popm = PopulationRateMonitor(P)
# voltage monitor
sm_vm = StateMonitor(P,'v',record=list(range(10))+list(range(NE,NE+10)))
# ###########################################
# Simulate
# ###########################################
print(("Setup complete, running for",simtime,"at dt =",dt,"s."))
t1 = time.time()
run(simtime,report='text')
device.build(directory='output', compile=True, run=True, debug=False)
print(('inittime + runtime, t = ', time.time() - t1))
#print "For g,J =",g,J,"mean exc rate =",\
# sm_e.num_spikes/float(NE)/(simtime/second),'Hz.'
#print "For g,J =",g,J,"mean inh rate =",\
# sm_i.num_spikes/float(NI)/(simtime/second),'Hz.'
# ###########################################
# Analysis functions
# ###########################################
tau=50e-3
sigma = tau/2.
# normalized Gaussian kernel, integral with dt is normed to 1
# to count as 1 spike smeared over a finite interval
norm_factor = 1./(sqrt(2.*pi)*sigma)
gauss_kernel = array([norm_factor*exp(-x**2/(2.*sigma**2))\
for x in arange(-5.*sigma,5.*sigma+dt,dt)])
def rate_from_spiketrain(spikemon,fulltime,nrnidx=None):
"""
Returns a rate series of spiketimes convolved with a Gaussian kernel;
all times must be in SI units,
remember to divide fulltime and dt by second
"""
if nrnidx is None:
spiketimes = spikemon.t # take spiketimes of all neurons
else:
# take spiketimes of only neuron index nrnidx
spiketimes = spikemon.t[where(spikemon.i==nrnidx)[0]]
kernel_len = len(gauss_kernel)
# need to accommodate half kernel_len on either side of fulltime
rate_full = zeros(int(fulltime/dt)+kernel_len)
for spiketime in spiketimes:
idx = int(spiketime/dt)
rate_full[idx:idx+kernel_len] += gauss_kernel
# only the middle fulltime part of the rate series
# This is already in Hz,
# since should have multiplied by dt for above convolution
# and divided by dt to get a rate, so effectively not doing either.
return rate_full[kernel_len/2:kernel_len/2+int(fulltime/dt)]
# ###########################################
# Make plots
# ###########################################
fig = figure()
# Vm plots
timeseries = arange(0,simtime/second+dt,dt)
for i in range(3):
plot(timeseries[:len(sm_vm.t)],sm_vm[i].v)
fig = figure()
# raster plots
subplot(231)
plot(sm.t,sm.i,',')
title(str(N)+" exc & inh neurons")
xlim([0,simtime/second])
xlabel("")
print("plotting firing rates")
subplot(232)
# firing rates
timeseries = arange(0,simtime/second+dt,dt)
num_to_plot = 10
#rates = []
for nrni in range(num_to_plot):
rate = rate_from_spiketrain(sm,simtime/second,nrni)
plot(timeseries[:len(rate)],rate)
#print mean(rate),len(sm_e[nrni])
#rates.append(rate)
title(str(num_to_plot)+" exc rates")
ylabel("Hz")
ylim(0,300)
subplot(235)
for nrni in range(NE,NE+num_to_plot):
rate = rate_from_spiketrain(sm,simtime/second,nrni)
plot(timeseries[:len(rate)],rate)
#print mean(rate),len(sm_i[nrni])
#rates.append(rate)
title(str(num_to_plot)+" inh rates")
ylim(0,300)
#print "Mean rate = ",mean(rates)
xlabel("Time (s)")
ylabel("Hz")
print("plotting pop firing rates")
# Population firing rates
subplot(233)
timeseries = arange(0,simtime/second,dt)
#plot(timeseries,popm_e.smooth_rate(width=50.*ms,filter="gaussian"),color='grey')
rate = rate_from_spiketrain(sm,simtime/second)/float(N)
plot(timeseries[:len(rate)],rate)
title("population rate")
ylabel("Hz")
xlabel("Time (s)")
fig.tight_layout()
show()
| gpl-3.0 |
mrahim/adni_petmr_analysis | classification_spn_fmri_stacking.py | 1 | 3078 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 26 14:56:00 2015
@author: [email protected]
"""
import os, time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import RidgeCV, LogisticRegression
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.datasets.base import Bunch
from nilearn.decoding import SpaceNetRegressor
from fetch_data import fetch_adni_petmr
from fetch_data import set_cache_base_dir, set_features_base_dir,\
set_group_indices, array_to_niis, fetch_adni_masks
def train_and_test(X, y, mask, train, test):
x_train_stacked = []
x_test_stacked = []
coeffs = []
y_train, y_test = y[train], y[test]
for k in range(X.shape[2]):
x = X[...,k]
x_train, x_test = x[train], x[test]
spn = SpaceNetRegressor(penalty='tv-l1', mask=mask,
cv=8, max_iter=400)
x_train_img = array_to_niis(x_train, mask)
spn.fit(x_train_img, y_train)
x_train_stacked.append(spn.predict(x_train_img))
x_test_img = array_to_niis(x_test, mask)
x_test_stacked.append(spn.predict(x_test_img))
coeffs.append(spn.coef_)
x_train_ = np.asarray(x_train_stacked).T[0, ...]
x_test_ = np.asarray(x_test_stacked).T[0, ...]
lgr = LogisticRegression()
lgr.fit(x_train_, y_train)
probas = lgr.decision_function(x_test_)
scores = lgr.score(x_test_, y_test)
coeff_lgr = lgr.coef_
B = Bunch(score=scores, proba=probas, coeff=coeffs, coeff_lgr=coeff_lgr)
ts = str(int(time.time()))
np.savez_compressed(os.path.join(CACHE_DIR, 'spacenet_stacking_fmri_tv_' + ts),
data=B)
return B
###########################################################################
###########################################################################
### set paths
CACHE_DIR = set_cache_base_dir()
FIG_DIR = os.path.join(CACHE_DIR, 'figures', 'petmr')
FEAT_DIR = set_features_base_dir()
FMRI_DIR = os.path.join(FEAT_DIR, 'smooth_preproc', 'fmri_subjects_68seeds')
### load dataset
mask = fetch_adni_masks()
dataset = fetch_adni_petmr()
fmri = dataset['func']
subj_list = dataset['subjects']
idx = set_group_indices(dataset['dx_group'])
idx_ = np.hstack((idx['EMCI'][0], idx['LMCI'][0]))
img_idx = np.hstack((idx['AD'][0], idx_))
X = []
print 'Loading data ...'
for i in img_idx:
X.append(np.load(os.path.join(FMRI_DIR, subj_list[i]+'.npz'))['corr'])
# X.shape = (n_samples, n_features, n_rois)
X = np.array(X)
y = np.ones(X.shape[0])
y[len(y) - len(idx_):] = 0
print 'Classification ...'
n_iter = 100
sss = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=.2,
random_state=np.random.seed(42))
from joblib import Parallel, delayed
p = Parallel(n_jobs=20, verbose=5)(delayed(train_and_test)(X, y, mask['mask_petmr'], train, test)\
for train, test in sss)
np.savez_compressed(os.path.join(CACHE_DIR, 'spacenet_stacking_fmri_tv_'+str(n_iter)),data=p)
| bsd-2-clause |
Vimos/scikit-learn | examples/svm/plot_svm_kernels.py | 96 | 2019 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10, edgecolors='k')
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired,
edgecolors='k')
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
ch710798472/GithubRecommended | RecGithub/function/display.py | 1 | 1975 | # -*- coding: utf-8 -*-
'''
Copyright@USTC SSE
Author by ch yy in suzhou ,08/12/2015
Use d3.js to display
'''
import numpy as np
import d3py
import pandas
import knn as knn1
import fptree as fptree1
import svd as svd1
import networkx as nx
import json
import webbrowser
import os
from networkx.readwrite import json_graph
def knn(ipaddress = "localhost",port = "9999"):
'''
用D3.JS展示Knn算法的运行结果
:return:
'''
testNum,errorRate, errorCount, classifierData, realData = knn1.displayData(
'data/edx_knn.csv');
x = np.linspace(0,testNum,testNum)
df = pandas.DataFrame({
'x' : x,
'y' : classifierData[:testNum],
'z' : realData[:testNum],
})
print "testNummber = %d \n" % testNum, "error rate : %f \n" % (errorCount/float(testNum)), "error count:%d \n" % errorCount
webbrowser.open_new_tab("http://%s:%s/%s.html" % (ipaddress, port, "disply_knn"))
with d3py.PandasFigure(df, 'disply_knn', width=20000, height=200, port = int(port)) as fig:
fig += d3py.geoms.Line('x', 'y', stroke='BlueViolet')
fig += d3py.geoms.Line('x', 'z', stroke='DeepPink')
fig += d3py.xAxis('x', label="test number")
fig += d3py.yAxis('y', label="test label")
fig.show()
def githubRec(ipaddress = "localhost",port = "8989"):
'''
利用每次处理后保存的图来进行恢复展示
:return:
'''
g = nx.read_gpickle("data/github.1")
print nx.info(g)
print
mtsw_users = [n for n in g if g.node[n]['type'] == 'user']
h = g.subgraph(mtsw_users)
print nx.info(h)
print
d = json_graph.node_link_data(h)
json.dump(d, open('data/githubRec.json', 'w'))
# cmdstr = "python3 -m http.server %s" % port
webbrowser.open_new_tab("http://%s:%s/%s.html"%(ipaddress,port, "display_githubRec"))
# os.system(cmdstr)
def fptree():
fptree1.start_test()
def svd():
svd1.start_test()
if __name__ == '__main__':
githubRec()
| mit |
sergiopasra/numina | numina/array/display/ximplotxy.py | 3 | 5275 | #
# Copyright 2015-2016 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
from .matplotlib_qt import set_window_geometry
from .pause_debugplot import pause_debugplot
def ximplotxy_jupyter(x, y, fmt=None, **args):
"""Auxiliary function to call ximplotxy from a jupyter notebook.
"""
using_jupyter = True
if fmt is None:
return ximplotxy(x, y, using_jupyter=using_jupyter, **args)
else:
return ximplotxy(x, y, fmt, using_jupyter=using_jupyter, **args)
def ximplotxy(x, y, fmt=None, plottype=None,
xlim=None, ylim=None,
xlabel=None, ylabel=None, title=None,
show=True, geometry=(0, 0, 640, 480), tight_layout=True,
debugplot=0, using_jupyter=False,
**kwargs):
"""
Parameters
----------
x : 1d numpy array, float
Array containing the X coordinate.
y : 1d numpy array, float
Array containing the Y coordinate.
fmt : str, optional
Format string for quickly setting basic line properties.
plottype : string
Plot type. It can be 'loglog', 'semilogx', 'semilogy' or None
(default, non-logarithmic plot).
xlim : tuple of floats
Tuple defining the x-axis range.
ylim : tuple of floats
Tuple defining the y-axis range.
xlabel : string
X-axis label.
ylabel : string
Y-axis label.
title : string
Plot title.
show : bool
If True, the function shows the displayed image. Otherwise
plt.show() is expected to be executed outside.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
tight_layout : bool
If True, and show=True, a tight display layout is set.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
using_jupyter : bool
If True, this function is called from a jupyter notebook.
Returns
-------
ax : axes object
Matplotlib axes instance. This value is returned only when
'show' is False.
"""
from numina.array.display.matplotlib_qt import plt
if not show and using_jupyter:
plt.ioff()
fig = plt.figure()
ax = fig.add_subplot(111)
if plottype == 'loglog':
if fmt is None:
ax.loglog(x, y, **kwargs)
else:
ax.loglog(x, y, fmt, **kwargs)
elif plottype == 'semilogx':
if fmt is None:
ax.semilogx(x, y, **kwargs)
else:
ax.semilogx(x, y, fmt, **kwargs)
elif plottype == 'semilogy':
if fmt is None:
ax.semilogy(x, y, **kwargs)
else:
ax.semilogy(x, y, fmt, **kwargs)
elif plottype == None:
if fmt is None:
ax.plot(x, y, **kwargs)
else:
ax.plot(x, y, fmt, **kwargs)
else:
raise ValueError('Invalid plottype: ' + str(plottype))
if xlim is not None:
ax.set_xlim(xlim[0], xlim[1])
if ylim is not None:
ax.set_ylim(ylim[0], ylim[1])
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if title is not None:
ax.set_title(title)
set_window_geometry(geometry)
if show:
pause_debugplot(debugplot, pltshow=show, tight_layout=tight_layout)
else:
if tight_layout:
plt.tight_layout()
# return axes
if using_jupyter:
plt.ion()
return ax
def main(args=None):
# parse command-line options
parser = argparse.ArgumentParser(prog='ximplotxy')
parser.add_argument("filename",
help="ASCII file with data in columns")
parser.add_argument("col1",
help="Column number for X data",
type=int)
parser.add_argument("col2",
help="Column number for Y data",
type=int)
parser.add_argument("--kwargs",
help="Extra arguments for plot, e.g.: "
"\"{'marker':'o',"
" 'linestyle':'dotted',"
" 'xlabel':'x axis', 'ylabel':'y axis',"
" 'title':'sample plot',"
" 'xlim':[-1,1], 'ylim':[-2,2],"
" 'label':'sample data',"
" 'color':'magenta'}\"")
args = parser.parse_args(args)
# ASCII file
filename = args.filename
# columns to be plotted (first column will be number 1 and not 0)
col1 = args.col1 - 1
col2 = args.col2 - 1
# read ASCII file
bigtable = np.genfromtxt(filename)
x = bigtable[:, col1]
y = bigtable[:, col2]
if args.kwargs is None:
ximplotxy(x, y, debugplot=12, marker='o', linestyle='')
else:
ximplotxy(x, y, debugplot=12, **eval(args.kwargs))
if __name__ == '__main__':
main()
| gpl-3.0 |
jenfly/monsoon-onset | scripts/save-dp.py | 1 | 2356 | """
Calculate d/dp from downloaded pressure level variable and save to files
"""
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import numpy as np
import xarray as xray
import pandas as pd
import collections
import atmos as atm
# ----------------------------------------------------------------------
version = 'merra2'
datadir = atm.homedir() + 'datastore/%s/daily/' % version
savedir = datadir
years = np.arange(1980, 2010)
plevs = [1000,925,850,775,700,600,500,400,300,250,200,150,100,70,50,30,20]
pdim = 1
varnms = ['U', 'OMEGA']
def datafile(datadir, varnm, plev, year, version):
latlonstr = '40E-120E_90S-90N'
filenm = '%s_%s%d_%s_%d.nc' % (version, varnm, plev, latlonstr, year)
filenm = datadir + filenm
return filenm
def concat_plevs(datadir, year, varnm, plevs, pdim, version):
pname = 'Height'
for i, plev in enumerate(plevs):
filenm = datafile(datadir, varnm, plev, year, version)
print('Reading ' + filenm)
with xray.open_dataset(filenm) as ds:
var_in = ds[varnm].load()
var_in = atm.expand_dims(var_in, pname, plev, axis=1)
if i == 0:
var = var_in
else:
var = xray.concat([var, var_in], dim=pname)
return var
def calc_dp(var, plev):
"""Extract subset of pressure levels and calculate d/dp."""
plevs = atm.get_coord(var, 'plev')
pname = atm.get_coord(var, 'plev', 'name')
pdim = atm.get_coord(var, 'plev', 'dim')
ind = (list(plevs)).index(plev)
i1 = max(0, ind - 1)
i2 = min(len(plevs) - 1, ind + 1) + 1
psub = plevs[i1:i2]
varsub = var.sel(**{pname : psub})
pres = atm.pres_convert(psub, 'hPa', 'Pa')
atm.disptime()
print('Computing d/dp for pressure level %d' % plev)
dvar = atm.gradient(varsub, pres, axis=pdim)
dvar = dvar.sel(**{pname : plev})
dvar.name = 'D%sDP' % var.name
atm.disptime()
return dvar
# Compute d/dp and save
for year in years:
for varnm in varnms:
var = concat_plevs(datadir, year, varnm, plevs, pdim, version)
for plev in plevs:
dvar = calc_dp(var, plev)
filenm = datafile(savedir, 'D%sDP' % varnm, plev, year, version)
print('Saving to ' + filenm)
atm.save_nc(filenm, dvar)
| mit |
alekz112/statsmodels | statsmodels/sandbox/nonparametric/tests/ex_smoothers.py | 33 | 1413 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 04 10:51:39 2011
@author: josef
"""
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from statsmodels.sandbox.nonparametric import smoothers, kernels
from statsmodels.regression.linear_model import OLS, WLS
#DGP: simple polynomial
order = 3
sigma_noise = 0.5
nobs = 100
lb, ub = -1, 2
x = np.linspace(lb, ub, nobs)
x = np.sin(x)
exog = x[:,None]**np.arange(order+1)
y_true = exog.sum(1)
y = y_true + sigma_noise * np.random.randn(nobs)
#xind = np.argsort(x)
pmod = smoothers.PolySmoother(2, x)
pmod.fit(y) #no return
y_pred = pmod.predict(x)
error = y - y_pred
mse = (error*error).mean()
print(mse)
res_ols = OLS(y, exog[:,:3]).fit()
print(np.squeeze(pmod.coef) - res_ols.params)
weights = np.ones(nobs)
weights[:nobs//3] = 0.1
weights[-nobs//5:] = 2
pmodw = smoothers.PolySmoother(2, x)
pmodw.fit(y, weights=weights) #no return
y_predw = pmodw.predict(x)
error = y - y_predw
mse = (error*error).mean()
print(mse)
res_wls = WLS(y, exog[:,:3], weights=weights).fit()
print(np.squeeze(pmodw.coef) - res_wls.params)
doplot = 1
if doplot:
import matplotlib.pyplot as plt
plt.plot(y, '.')
plt.plot(y_true, 'b-', label='true')
plt.plot(y_pred, '-', label='poly')
plt.plot(y_predw, '-', label='poly -w')
plt.legend(loc='upper left')
plt.close()
#plt.show()
| bsd-3-clause |
jseabold/statsmodels | statsmodels/stats/tests/test_anova.py | 5 | 18927 | # -*- coding: utf-8 -*-
from io import StringIO
import numpy as np
from statsmodels.stats.anova import anova_lm
from statsmodels.formula.api import ols
from pandas import read_csv
kidney_table = StringIO("""Days Duration Weight ID
0.0 1 1 1
2.0 1 1 2
1.0 1 1 3
3.0 1 1 4
0.0 1 1 5
2.0 1 1 6
0.0 1 1 7
5.0 1 1 8
6.0 1 1 9
8.0 1 1 10
2.0 1 2 1
4.0 1 2 2
7.0 1 2 3
12.0 1 2 4
15.0 1 2 5
4.0 1 2 6
3.0 1 2 7
1.0 1 2 8
5.0 1 2 9
20.0 1 2 10
15.0 1 3 1
10.0 1 3 2
8.0 1 3 3
5.0 1 3 4
25.0 1 3 5
16.0 1 3 6
7.0 1 3 7
30.0 1 3 8
3.0 1 3 9
27.0 1 3 10
0.0 2 1 1
1.0 2 1 2
1.0 2 1 3
0.0 2 1 4
4.0 2 1 5
2.0 2 1 6
7.0 2 1 7
4.0 2 1 8
0.0 2 1 9
3.0 2 1 10
5.0 2 2 1
3.0 2 2 2
2.0 2 2 3
0.0 2 2 4
1.0 2 2 5
1.0 2 2 6
3.0 2 2 7
6.0 2 2 8
7.0 2 2 9
9.0 2 2 10
10.0 2 3 1
8.0 2 3 2
12.0 2 3 3
3.0 2 3 4
7.0 2 3 5
15.0 2 3 6
4.0 2 3 7
9.0 2 3 8
6.0 2 3 9
1.0 2 3 10
""")
kidney_table.seek(0)
kidney_table = read_csv(kidney_table, sep=r"\s+", engine='python').astype(int)
class TestAnovaLM(object):
@classmethod
def setup_class(cls):
# kidney data taken from JT's course
# do not know the license
cls.data = kidney_table
cls.kidney_lm = ols('np.log(Days+1) ~ C(Duration) * C(Weight)',
data=cls.data).fit()
def test_results(self):
Df = np.array([1, 2, 2, 54])
sum_sq = np.array([2.339693, 16.97129, 0.6356584, 28.9892])
mean_sq = np.array([2.339693, 8.485645, 0.3178292, 0.536837])
f_value = np.array([4.358293, 15.80674, 0.5920404, np.nan])
pr_f = np.array([0.0415617, 3.944502e-06, 0.5567479, np.nan])
results = anova_lm(self.kidney_lm)
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, sum_sq, 4)
np.testing.assert_almost_equal(results['F'].values, f_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, pr_f)
class TestAnovaLMNoconstant(object):
@classmethod
def setup_class(cls):
# kidney data taken from JT's course
# do not know the license
cls.data = kidney_table
cls.kidney_lm = ols('np.log(Days+1) ~ C(Duration) * C(Weight) - 1',
data=cls.data).fit()
def test_results(self):
Df = np.array([2, 2, 2, 54])
sum_sq = np.array([158.6415227, 16.97129, 0.6356584, 28.9892])
mean_sq = np.array([79.3207613, 8.485645, 0.3178292, 0.536837])
f_value = np.array([147.7557648, 15.80674, 0.5920404, np.nan])
pr_f = np.array([1.262324e-22, 3.944502e-06, 0.5567479, np.nan])
results = anova_lm(self.kidney_lm)
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, sum_sq, 4)
np.testing.assert_almost_equal(results['F'].values, f_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, pr_f)
# > sum2.lm = lm(logDays ~ Duration * Weight - 1, contrasts=list(Duration=contr.sum, Weight=contr.sum))
# > anova.lm.sum2 <- anova(sum2.lm)
# > anova.lm.sum2
# Analysis of Variance Table
#
# Response: logDays
# Df Sum Sq Mean Sq F value Pr(>F)
# Duration 2 158.642 79.321 147.756 < 2.2e-16 ***
# Weight 2 16.971 8.486 15.807 3.945e-06 ***
# Duration:Weight 2 0.636 0.318 0.592 0.5567
# Residuals 54 28.989 0.537
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
class TestAnovaLMCompare(TestAnovaLM):
def test_results(self):
new_model = ols("np.log(Days+1) ~ C(Duration) + C(Weight)",
self.data).fit()
results = anova_lm(new_model, self.kidney_lm)
Res_Df = np.array([
56, 54
])
RSS = np.array([
29.62486, 28.9892
])
Df = np.array([
0, 2
])
Sum_of_Sq = np.array([
np.nan, 0.6356584
])
F = np.array([
np.nan, 0.5920404
])
PrF = np.array([
np.nan, 0.5567479
])
np.testing.assert_equal(results["df_resid"].values, Res_Df)
np.testing.assert_almost_equal(results["ssr"].values, RSS, 4)
np.testing.assert_almost_equal(results["df_diff"].values, Df)
np.testing.assert_almost_equal(results["ss_diff"].values, Sum_of_Sq)
np.testing.assert_almost_equal(results["F"].values, F)
np.testing.assert_almost_equal(results["Pr(>F)"].values, PrF)
class TestAnovaLMCompareNoconstant(TestAnovaLM):
def test_results(self):
new_model = ols("np.log(Days+1) ~ C(Duration) + C(Weight) - 1",
self.data).fit()
results = anova_lm(new_model, self.kidney_lm)
Res_Df = np.array([
56, 54
])
RSS = np.array([
29.62486, 28.9892
])
Df = np.array([
0, 2
])
Sum_of_Sq = np.array([
np.nan, 0.6356584
])
F = np.array([
np.nan, 0.5920404
])
PrF = np.array([
np.nan, 0.5567479
])
np.testing.assert_equal(results["df_resid"].values, Res_Df)
np.testing.assert_almost_equal(results["ssr"].values, RSS, 4)
np.testing.assert_almost_equal(results["df_diff"].values, Df)
np.testing.assert_almost_equal(results["ss_diff"].values, Sum_of_Sq)
np.testing.assert_almost_equal(results["F"].values, F)
np.testing.assert_almost_equal(results["Pr(>F)"].values, PrF)
class TestAnova2(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
3.067066, 13.27205, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F_value = np.array([
5.667033, 12.26141, 0.1760025, np.nan
])
PrF = np.array([
0.02106078, 4.487909e-05, 0.8391231, np.nan
])
results = anova_lm(anova_ii, typ="II")
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova2Noconstant(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum) - 1",
data).fit()
Sum_Sq = np.array([
154.7131692, 13.27205, 0.1905093, 27.60181
])
Df = np.array([
2, 2, 2, 51
])
F_value = np.array([
142.9321191, 12.26141, 0.1760025, np.nan
])
PrF = np.array([
1.238624e-21, 4.487909e-05, 0.8391231, np.nan
])
results = anova_lm(anova_ii, typ="II")
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
# > sum2.lm.dropped <- lm(logDays ~ Duration * Weight - 1, dta.dropped,
# contrasts=list(Duration=contr.sum, Weight=contr.sum))
# > anova.ii.dropped2 <- Anova(sum2.lm.dropped, type='II')
# > anova.ii.dropped2
# Anova Table (Type II tests)
#
# Response: logDays
# Sum Sq Df F value Pr(>F)
# Duration 154.713 2 142.932 < 2.2e-16 ***
# Weight 13.272 2 12.261 4.488e-05 ***
# Duration:Weight 0.191 2 0.176 0.8391
# Residuals 27.602 51
class TestAnova2HC0(TestAnovaLM):
#NOTE: R does not return SSq with robust covariance. Why?
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F = np.array([
6.972744, 13.7804, 0.1709936, np.nan
])
PrF = np.array([
0.01095599, 1.641682e-05, 0.8433081, np.nan
])
results = anova_lm(anova_ii, typ="II", robust="hc0")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova2HC1(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F = np.array([
6.238771, 12.32983, 0.1529943, np.nan
])
PrF = np.array([
0.01576555, 4.285456e-05, 0.858527, np.nan
])
results = anova_lm(anova_ii, typ="II", robust="hc1")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova2HC2(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F = np.array([
6.267499, 12.25354, 0.1501224, np.nan
])
PrF = np.array([
0.01554009, 4.511826e-05, 0.8609815, np.nan
])
results = anova_lm(anova_ii, typ="II", robust="hc2")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova2HC3(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_ii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 2, 2, 51
])
F = np.array([
5.633786, 10.89842, 0.1317223, np.nan
])
PrF = np.array([
0.02142223, 0.0001145965, 0.8768817, np.nan
])
results = anova_lm(anova_ii, typ="II", robust="hc3")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F_value = np.array([
279.7545, 5.367071, 12.43245, 0.1760025, np.nan
])
PrF = np.array([
2.379855e-22, 0.02457384, 3.999431e-05, 0.8391231, np.nan
])
results = anova_lm(anova_iii, typ="III")
np.testing.assert_equal(results['df'].values, Df)
np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F_value, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3HC0(TestAnovaLM):
#NOTE: R does not return SSq with robust covariance. Why?
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F = np.array([
298.3404, 5.723638, 13.76069, 0.1709936, np.nan
])
PrF = np.array([
5.876255e-23, 0.02046031, 1.662826e-05, 0.8433081, np.nan
])
results = anova_lm(anova_iii, typ="III", robust="hc0")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3HC1(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F = np.array([
266.9361, 5.12115, 12.3122, 0.1529943, np.nan
])
PrF = np.array([
6.54355e-22, 0.02792296, 4.336712e-05, 0.858527, np.nan
])
results = anova_lm(anova_iii, typ="III", robust="hc1")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3HC2(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F = np.array([
264.5137, 5.074677, 12.19158, 0.1501224, np.nan
])
PrF = np.array([
7.958286e-22, 0.02860926, 4.704831e-05, 0.8609815, np.nan
])
results = anova_lm(anova_iii, typ="III", robust="hc2")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
class TestAnova3HC3(TestAnovaLM):
# drop some observations to make an unbalanced, disproportionate panel
# to make sure things are okay
def test_results(self):
data = self.data.drop([0,1,2])
anova_iii = ols("np.log(Days+1) ~ C(Duration, Sum)*C(Weight, Sum)",
data).fit()
Sum_Sq = np.array([
151.4065, 2.904723, 13.45718, 0.1905093, 27.60181
])
Df = np.array([
1, 1, 2, 2, 51
])
F = np.array([
234.4026, 4.496996, 10.79903, 0.1317223, np.nan
])
PrF = np.array([
1.037224e-20, 0.03883841, 0.0001228716, 0.8768817, np.nan
])
results = anova_lm(anova_iii, typ="III", robust="hc3")
np.testing.assert_equal(results['df'].values, Df)
#np.testing.assert_almost_equal(results['sum_sq'].values, Sum_Sq, 4)
np.testing.assert_almost_equal(results['F'].values, F, 4)
np.testing.assert_almost_equal(results['PR(>F)'].values, PrF)
| bsd-3-clause |
gibiansky/tensorflow | tensorflow/contrib/learn/python/learn/grid_search_test.py | 27 | 2080 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Grid search tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
HAS_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if HAS_SKLEARN:
try:
# pylint: disable=g-import-not-at-top
from sklearn import datasets
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
except ImportError:
HAS_SKLEARN = False
class GridSearchTest(tf.test.TestCase):
"""Grid search tests."""
def testIrisDNN(self):
if HAS_SKLEARN:
random.seed(42)
iris = datasets.load_iris()
feature_columns = learn.infer_real_valued_columns_from_input(iris.data)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10],
n_classes=3)
grid_search = GridSearchCV(classifier,
{'hidden_units': [[5, 5], [10, 10]]},
scoring='accuracy',
fit_params={'steps': [50]})
grid_search.fit(iris.data, iris.target)
score = accuracy_score(iris.target, grid_search.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
fbagirov/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
rcrowder/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_template.py | 70 | 8806 | """
This is a fully functional do nothing backend to provide a template to
backend writers. It is fully functional in that you can select it as
a backend with
import matplotlib
matplotlib.use('Template')
and your matplotlib scripts will (should!) run without error, though
no output is produced. This provides a nice starting point for
backend writers because you can selectively implement methods
(draw_rectangle, draw_lines, etc...) and slowly see your figure come
to life w/o having to have a full blown implementation before getting
any results.
Copy this to backend_xxx.py and replace all instances of 'template'
with 'xxx'. Then implement the class methods and functions below, and
add 'xxx' to the switchyard in matplotlib/backends/__init__.py and
'xxx' to the backends list in the validate_backend methon in
matplotlib/__init__.py and you're off. You can use your backend with::
import matplotlib
matplotlib.use('xxx')
from pylab import *
plot([1,2,3])
show()
matplotlib also supports external backends, so you can place you can
use any module in your PYTHONPATH with the syntax::
import matplotlib
matplotlib.use('module://my_backend')
where my_backend.py is your module name. Thus syntax is also
recognized in the rc file and in the -d argument in pylab, eg::
python simple_plot.py -dmodule://my_backend
The files that are most relevant to backend_writers are
matplotlib/backends/backend_your_backend.py
matplotlib/backend_bases.py
matplotlib/backends/__init__.py
matplotlib/__init__.py
matplotlib/_pylab_helpers.py
Naming Conventions
* classes Upper or MixedUpperCase
* varables lower or lowerUpper
* functions lower or underscore_separated
"""
from __future__ import division
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
class RendererTemplate(RendererBase):
"""
The renderer handles drawing/rendering operations.
This is a minimal do-nothing class that can be used to get started when
writing a new backend. Refer to backend_bases.RendererBase for
documentation of the classes methods.
"""
def __init__(self, dpi):
self.dpi = dpi
def draw_path(self, gc, path, transform, rgbFace=None):
pass
# draw_markers is optional, and we get more correct relative
# timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
# pass
# draw_path_collection is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_path_collection(self, master_transform, cliprect, clippath,
# clippath_trans, paths, all_transforms, offsets,
# offsetTrans, facecolors, edgecolors, linewidths,
# linestyles, antialiaseds):
# pass
# draw_quad_mesh is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_quad_mesh(self, master_transform, cliprect, clippath,
# clippath_trans, meshWidth, meshHeight, coordinates,
# offsets, offsetTrans, facecolors, antialiased,
# showedges):
# pass
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
pass
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
pass
def flipy(self):
return True
def get_canvas_width_height(self):
return 100, 100
def get_text_width_height_descent(self, s, prop, ismath):
return 1, 1, 1
def new_gc(self):
return GraphicsContextTemplate()
def points_to_pixels(self, points):
# if backend doesn't have dpi, eg, postscript or svg
return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
#return points/72.0 * self.dpi.get()
class GraphicsContextTemplate(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasTemplate(thisFig)
manager = FigureManagerTemplate(canvas, num)
return manager
class FigureCanvasTemplate(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Note GUI templates will want to connect events for button presses,
mouse movements and key presses to functions that call the base
class methods button_press_event, button_release_event,
motion_notify_event, key_press_event, and key_release_event. See,
eg backend_gtk.py, backend_wx.py and backend_tkagg.py
"""
def draw(self):
"""
Draw the figure using the renderer
"""
renderer = RendererTemplate(self.figure.dpi)
self.figure.draw(renderer)
# You should provide a print_xxx function for every file format
# you can write.
# If the file type is not in the base set of filetypes,
# you should add it to the class-scope filetypes dictionary as follows:
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['foo'] = 'My magic Foo format'
def print_foo(self, filename, *args, **kwargs):
"""
Write out format foo. The dpi, facecolor and edgecolor are restored
to their original values after this call, so you don't need to
save and restore them.
"""
pass
def get_default_filetype(self):
return 'foo'
class FigureManagerTemplate(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerTemplate
| agpl-3.0 |
lucfra/RFHO | rfho/datasets.py | 1 | 47300 | """
This module contains utility functions to process and load various datasets. Most of the datasets are public,
but are not included in the package; MNIST dataset will be automatically downloaded.
There are also some classes to represent datasets. `ExampleVisiting` is an helper class that implements
the stochastic sampling of data and is optimized to work with `Reverse/ForwardHyperGradient` (has helper funcitons
to create training and validation `feed_dict` suppliers).
"""
from collections import OrderedDict
import numpy as np
from functools import reduce
import tensorflow as tf
from tensorflow.examples.tutorials.mnist.input_data import read_data_sets
import os
from rfho.utils import as_list, np_normalize_data, merge_dicts
import sys
try:
import pandas as pd
except ImportError:
pd = None
print(sys.exc_info())
print('pandas not found. Some load function might not work')
try:
import scipy.io as scio
from scipy import linalg
import scipy.sparse as sc_sp
import scipy as sp
SPARSE_SCIPY_MATRICES = (sc_sp.csr.csr_matrix, sc_sp.coo.coo_matrix)
except ImportError:
scio, linalg, sp, sc_sp = None, None, None, None
SPARSE_SCIPY_MATRICES = ()
print(sys.exc_info())
print('scipy not found. Some load function might not work')
try:
import sklearn.datasets as sk_dt
from sklearn.utils import shuffle as sk_shuffle
except ImportError:
sk_dt, sk_shuffle = None, None
print('sklearn not found. Some load function might not work')
try:
import intervaltree as it
except ImportError:
it = None
print(sys.exc_info())
print('intervaltree not found. WindowedData will not work. (You can get intervaltree with pip!)')
import _pickle as cpickle
from_env = os.getenv('RFHO_DATA_FOLDER')
if from_env:
DATA_FOLDER = from_env
# print('Congratulations, RFHO_DATA_FOLDER found!')
else:
print('Environment variable RFHO_DATA_FOLDER not found. Variables HELP_WIN and HELP_UBUNTU contain info.')
DATA_FOLDER = os.getcwd()
_COMMON_BEGIN = "You can set environment variable RFHO_DATA_FOLDER to" \
"specify root folder in which you store various datasets. \n"
_COMMON_END = """\n
You can also skip this step... \n
In this case all load_* methods take a FOLDER path as first argument. \n
Bye."""
HELP_UBUNTU = _COMMON_BEGIN + """
Bash command is: export RFHO_DATA_FOLDER='absolute/path/to/dataset/folder \n
Remember! To add the global variable kinda permanently in your system you should add export command in
bash.bashrc file located in etc folder.
""" + _COMMON_END
HELP_WIN = _COMMON_BEGIN + """
Cmd command is: Set RFHO_DATA_FOLDER absolute/path/to/dataset/folder for one session. \n
To set it permanently use SetX instead of Set (and probably reboot system)
""" + _COMMON_END
print('Data folder is', DATA_FOLDER)
# kind of private
TIMIT_DIR = os.path.join(DATA_FOLDER, 'timit4python')
XRMB_DIR = os.path.join(DATA_FOLDER, 'XRMB')
IROS15_BASE_FOLDER = os.path.join(DATA_FOLDER, os.path.join('dls_collaboration', 'Learning'))
# easy to find!
IRIS_TRAINING = os.path.join(DATA_FOLDER, 'iris', "training.csv")
IRIS_TEST = os.path.join(DATA_FOLDER, 'iris', "test.csv")
MNIST_DIR = os.path.join(DATA_FOLDER, "mnist_data")
CALTECH101_30_DIR = os.path.join(DATA_FOLDER, "caltech101-30")
CALTECH101_DIR = os.path.join(DATA_FOLDER, "caltech")
CENSUS_TRAIN = os.path.join(DATA_FOLDER, 'census', "train.csv")
CENSUS_TEST = os.path.join(DATA_FOLDER, 'census', "test.csv")
CIFAR10_DIR = os.path.join(DATA_FOLDER, "CIFAR-10")
CIFAR100_DIR = os.path.join(DATA_FOLDER, "CIFAR-100")
REALSIM = os.path.join(DATA_FOLDER, "realsim")
# scikit learn datasets
SCIKIT_LEARN_DATA = os.path.join(DATA_FOLDER, 'scikit_learn_data')
class Datasets:
"""
Simple object for standard datasets. Has the field `train` `validation` and `test` and support indexing
"""
def __init__(self, train=None, validation=None, test=None):
self.train = train
self.validation = validation
self.test = test
self._lst = [train, validation, test]
def setting(self):
return {k: v.setting() if hasattr(v, 'setting') else None for k, v in vars(self).items()}
def __getitem__(self, item):
return self._lst[item]
def __len__(self):
return len([_ for _ in self._lst if _ is not None])
@staticmethod
def from_list(list_of_datasets):
"""
Generates a `Datasets` object from a list.
:param list_of_datasets: list containing from one to three dataset
:return:
"""
train, valid, test = None, None, None
train = list_of_datasets[0]
if len(list_of_datasets) > 3:
print('There are more then 3 Datasets here...')
return list_of_datasets
if len(list_of_datasets) > 1:
test = list_of_datasets[-1]
if len(list_of_datasets) == 3:
valid = list_of_datasets[1]
return Datasets(train, valid, test)
@staticmethod
def stack(*datasets_s):
"""
Stack some datasets calling stack for each dataset.
:param datasets_s:
:return: a new dataset
"""
return Datasets.from_list([Dataset.stack(*[d[k] for d in datasets_s if d[k] is not None])
for k in range(3)])
def _maybe_cast_to_scalar(what):
return what[0] if len(what) == 1 else what
def convert_sparse_matrix_to_sparse_tensor(X):
if isinstance(X, sc_sp.csr.csr_matrix):
coo = X.tocoo()
indices = np.mat([coo.row, coo.col]).transpose()
else:
coo, indices = X, [X.row, X.col]
# data = np.array(coo.data, dtype=)
return tf.SparseTensor(indices, tf.constant(coo.data, dtype=tf.float32), coo.shape)
class Dataset:
"""
Class for managing a single dataset, includes data and target fields and has some utility functions.
It allows also to convert the dataset into tensors and to store additional information both on a
per-example basis and general infos.
"""
def __init__(self, data, target, sample_info=None, info=None):
"""
:param data: Numpy array containing data
:param target: Numpy array containing targets
:param sample_info: either an array of dicts or a single dict, in which case it is cast to array of
dicts.
:param info: (optional) dictionary with further info about the dataset
"""
self._tensor_mode = False
self._data = data
self._target = target
if sample_info is None:
sample_info = {}
self.sample_info = np.array([sample_info] * self.num_examples) \
if isinstance(sample_info, dict) else sample_info
assert self.num_examples == len(self.sample_info)
assert self.num_examples == self._shape(self._target)[0]
self.info = info or {}
def _shape(self, what):
return what.get_shape().as_list() if self._tensor_mode else what.shape
def setting(self):
"""
for save setting purposes, does not save the actual data
:return:
"""
return {
'num_examples': self.num_examples,
'dim_data': self.dim_data,
'dim_target': self.dim_target,
'info': self.info
}
@property
def data(self):
return self._data
@property
def target(self):
return self._target
@property
def num_examples(self):
"""
:return: Number of examples in this dataset
"""
return self._shape(self.data)[0]
@property
def dim_data(self):
"""
:return: The data dimensionality as an integer, if input are vectors, or a tuple in the general case
"""
return _maybe_cast_to_scalar(self._shape(self.data)[1:])
@property
def dim_target(self):
"""
:return: The target dimensionality as an integer, if targets are vectors, or a tuple in the general case
"""
shape = self._shape(self.target)
return 1 if len(shape) == 1 else _maybe_cast_to_scalar(shape[1:])
def convert_to_tensor(self, keep_sparse=True):
matrices = ['_data', '_target']
for att in matrices:
if keep_sparse and isinstance(self.__getattribute__(att), SPARSE_SCIPY_MATRICES):
self.__setattr__(att, convert_sparse_matrix_to_sparse_tensor(self.__getattribute__(att)))
else:
self.__setattr__(att, tf.convert_to_tensor(self.__getattribute__(att), dtype=tf.float32))
self._tensor_mode = True
def create_supplier(self, x, y, other_feeds=None):
"""
Return a standard feed dictionary for this dataset.
:param x: placeholder for data
:param y: placeholder for target
:param other_feeds: optional other feeds
:return: a callable.
"""
if not other_feeds: other_feeds = {}
# noinspection PyUnusedLocal
def _supplier(step=None):
"""
:param step: unused, just for making it compatible with `HG` and `Saver`
:return: the feed dictionary
"""
if isinstance(self.data, WindowedData):
data = self.data.generate_all()
return {**{x: self.data, y: self.target}, **other_feeds}
return _supplier
@staticmethod
def stack(*datasets):
"""
Assuming that the datasets have same structure, stucks data and targets
:param datasets:
:return: stacked dataset
"""
return Dataset(data=vstack([d.data for d in datasets]),
target=stack_or_concat([d.target for d in datasets]),
sample_info=stack_or_concat([d.sample_info for d in datasets]),
info={k: [d.info.get(k, None) for d in datasets]
for k in merge_dicts(*[d.info for d in datasets])})
def to_one_hot_enc(seq, dimension=None):
da_max = dimension or np.max(seq) + 1
def create_and_set(_p):
_tmp = np.zeros(da_max)
_tmp[_p] = 1
return _tmp
return np.array([create_and_set(_v) for _v in seq])
def load_census():
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
df_train = pd.read_csv(CENSUS_TRAIN, names=COLUMNS, skipinitialspace=True)
df_test = pd.read_csv(CENSUS_TEST, names=COLUMNS, skipinitialspace=True, skiprows=1)
LABEL_COLUMN = "label"
df_train[LABEL_COLUMN] = (df_train["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
df_test[LABEL_COLUMN] = (df_test["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
def load_iris(partitions_proportions=None, classes=3):
"""Loads Iris dataset divided as training and test set (by default)"""
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32)
tr_set = training_set.data
tr_targets = to_one_hot_enc(training_set.target)
tr_dst = Dataset(data=tr_set, target=tr_targets)
tst_set = test_set.data
tst_targets = to_one_hot_enc(test_set.target)
tst_dst = Dataset(data=tst_set, target=tst_targets)
if partitions_proportions:
if classes == 2:
# noinspection PyUnusedLocal
def filter_class(x, y, info, i):
return np.argmax(y) != 0 # discard first class
filter_list = [filter_class]
# noinspection PyUnusedLocal
def project_map(x, y, info, i):
return x, y[1:], info
else:
filter_list, project_map = (None, None)
res = redivide_data([tr_dst, tst_dst], partitions_proportions, filters=filter_list, maps=project_map)
res += [None] * (3 - len(res))
return Datasets(train=res[0], validation=res[1], test=res[2])
return Datasets(train=tr_dst, test=tst_dst, validation=None)
def stack_or_concat(list_of_arays):
func = np.concatenate if list_of_arays[0].ndim == 1 else np.vstack
return func(list_of_arays)
def vstack(lst):
"""
Vstack that considers sparse matrices
:param lst:
:return:
"""
return sp.vstack(lst) if sp and isinstance(lst[0], sp.sparse.csr.csr_matrix) else np.vstack(lst)
def redivide_data(datasets, partition_proportions=None, shuffle=False, filters=None, maps=None, balance_classes=False):
"""
Function that redivides datasets. Can be use also to shuffle or filter or map examples.
:param datasets: original datasets, instances of class Dataset (works with get_data and get_targets for
compatibility with mnist datasets
:param partition_proportions: (optional, default None) list of fractions that can either sum up to 1 or less
then one, in which case one additional partition is created with proportion 1 - sum(partition proportions).
If None it will retain the same proportion of samples found in datasets
:param shuffle: (optional, default False) if True shuffles the examples
:param filters: (optional, default None) filter or list of filters: functions with signature
(data, target, index) -> boolean (accept or reject the sample)
:param maps: (optional, default None) map or list of maps: functions with signature
(data, target, index) -> (new_data, new_target) (maps the old sample to a new one, possibly also to more
than one sample, for data augmentation)
:return: a list of datasets of length equal to the (possibly augmented) partition_proportion
"""
all_data = vstack([get_data(d) for d in datasets])
all_labels = stack_or_concat([get_targets(d) for d in datasets])
all_infos = np.concatenate([d.sample_info for d in datasets])
N = all_data.shape[0]
if partition_proportions: # argument check
partition_proportions = list([partition_proportions] if isinstance(partition_proportions, float)
else partition_proportions)
sum_proportions = sum(partition_proportions)
assert sum_proportions <= 1, "partition proportions must sum up to at most one: %d" % sum_proportions
if sum_proportions < 1.: partition_proportions += [1. - sum_proportions]
else:
partition_proportions = [1. * get_data(d).shape[0] / N for d in datasets]
if shuffle:
if sp and isinstance(all_data, sp.sparse.csr.csr_matrix): raise NotImplementedError()
# if sk_shuffle: # TODO this does not work!!! find a way to shuffle these matrices while
# keeping compatibility with tensorflow!
# all_data, all_labels, all_infos = sk_shuffle(all_data, all_labels, all_infos)
# else:
permutation = np.arange(all_data.shape[0])
np.random.shuffle(permutation)
all_data = all_data[permutation]
all_labels = np.array(all_labels[permutation])
all_infos = np.array(all_infos[permutation])
if filters:
if sp and isinstance(all_data, sp.sparse.csr.csr_matrix): raise NotImplementedError()
filters = as_list(filters)
data_triple = [(x, y, d) for x, y, d in zip(all_data, all_labels, all_infos)]
for fiat in filters:
data_triple = [xy for i, xy in enumerate(data_triple) if fiat(xy[0], xy[1], xy[2], i)]
all_data = np.vstack([e[0] for e in data_triple])
all_labels = np.vstack([e[1] for e in data_triple])
all_infos = np.vstack([e[2] for e in data_triple])
if maps:
if sp and isinstance(all_data, sp.sparse.csr.csr_matrix): raise NotImplementedError()
maps = as_list(maps)
data_triple = [(x, y, d) for x, y, d in zip(all_data, all_labels, all_infos)]
for _map in maps:
data_triple = [_map(xy[0], xy[1], xy[2], i) for i, xy in enumerate(data_triple)]
all_data = np.vstack([e[0] for e in data_triple])
all_labels = np.vstack([e[1] for e in data_triple])
all_infos = np.vstack([e[2] for e in data_triple])
N = all_data.shape[0]
assert N == all_labels.shape[0]
calculated_partitions = reduce(
lambda v1, v2: v1 + [sum(v1) + v2],
[int(N * prp) for prp in partition_proportions],
[0]
)
calculated_partitions[-1] = N
print('datasets.redivide_data:, computed partitions numbers -',
calculated_partitions, 'len all', N, end=' ')
new_general_info_dict = {}
for data in datasets:
new_general_info_dict = {**new_general_info_dict, **data.info}
if balance_classes:
new_datasets = []
forbidden_indices = np.empty(0, dtype=np.int64)
for d1, d2 in zip(calculated_partitions[:-1], calculated_partitions[1:-1]):
indices = np.array(get_indices_balanced_classes(d2 - d1, all_labels, forbidden_indices))
dataset = Dataset(data=all_data[indices], target=all_labels[indices],
sample_info=all_infos[indices],
info=new_general_info_dict)
new_datasets.append(dataset)
forbidden_indices = np.append(forbidden_indices, indices)
test_if_balanced(dataset)
remaining_indices = np.array(list(set(list(range(N))) - set(forbidden_indices)))
new_datasets.append(Dataset(data=all_data[remaining_indices], target=all_labels[remaining_indices],
sample_info=all_infos[remaining_indices],
info=new_general_info_dict))
else:
new_datasets = [
Dataset(data=all_data[d1:d2], target=all_labels[d1:d2], sample_info=all_infos[d1:d2],
info=new_general_info_dict)
for d1, d2 in zip(calculated_partitions, calculated_partitions[1:])
]
print('DONE')
return new_datasets
def get_indices_balanced_classes(n_examples, labels, forbidden_indices):
N = len(labels)
n_classes = len(labels[0])
indices = []
current_class = 0
for i in range(n_examples):
index = np.random.random_integers(0, N - 1, 1)[0]
while index in indices or index in forbidden_indices or np.argmax(labels[index]) != current_class:
index = np.random.random_integers(0, N - 1, 1)[0]
indices.append(index)
current_class = (current_class + 1) % n_classes
return indices
def test_if_balanced(dataset):
labels = dataset.target
n_classes = len(labels[0])
class_counter = [0] * n_classes
for l in labels:
class_counter[np.argmax(l)] += 1
print('exemple by class: ', class_counter)
def load_20newsgroup_vectorized(folder=SCIKIT_LEARN_DATA, one_hot=True, partitions_proportions=None,
shuffle=False, binary_problem=False, as_tensor=True, minus_value=-1.):
data_train = sk_dt.fetch_20newsgroups_vectorized(data_home=folder, subset='train')
data_test = sk_dt.fetch_20newsgroups_vectorized(data_home=folder, subset='test')
X_train = data_train.data
X_test = data_test.data
y_train = data_train.target
y_test = data_test.target
if binary_problem:
y_train[data_train.target < 10] = minus_value
y_train[data_train.target >= 10] = 1.
y_test[data_test.target < 10] = minus_value
y_test[data_test.target >= 10] = 1.
if one_hot:
y_train = to_one_hot_enc(y_train)
y_test = to_one_hot_enc(y_test)
# if shuffle and sk_shuffle:
# xtr = X_train.tocoo()
# xts = X_test.tocoo()
d_train = Dataset(data=X_train,
target=y_train, info={'target names': data_train.target_names})
d_test = Dataset(data=X_test,
target=y_test, info={'target names': data_train.target_names})
res = [d_train, d_test]
if partitions_proportions:
res = redivide_data([d_train, d_test], partition_proportions=partitions_proportions, shuffle=False)
if as_tensor: [dat.convert_to_tensor() for dat in res]
return Datasets.from_list(res)
def load_realsim(folder=REALSIM, one_hot=True, partitions_proportions=None, shuffle=False, as_tensor=True):
X, y = sk_dt.load_svmlight_file(folder + "/real-sim")
y = np.array([int(yy) for yy in y])
if one_hot:
y = to_one_hot_enc(y)
res = [Dataset(data=X, target=y)]
if partitions_proportions:
res = redivide_data(res, shuffle=shuffle, partition_proportions=partitions_proportions)
res = Datasets.from_list(res)
if as_tensor: [dat.convert_to_tensor() for dat in res]
return res
# noinspection PyPep8Naming
def load_XRMB(folder=XRMB_DIR, half_window=2, max_speakers=100, only_independent=False, normalize_single_speaker=False):
"""
Loads XRMB data.
:param max_speakers:
:param folder: path for root directory.
:param half_window: half window size for the data.
:param only_independent: if False returns speaker datasets that do not keep track of the speaker.
:param normalize_single_speaker: if True normalizes each dataset independently
:return: A Datasets class containing speaker independent data for training, validation and test, or a list
a triplet of lists of Dataset if speaker_dependent is True.
"""
prefix = folder + "/xrbm_spk_"
set_types = ['train', 'val', 'test']
def load_speaker(speaker_number, set_type):
assert set_type in set_types
files = (prefix + str(speaker_number).zfill(3) + "_%s%s.csv" % (set_type, data_type)
for data_type in ('audio', 'motor', 'sentences'))
arrays = [pd.read_csv(fl, header=None).values for fl in files]
return arrays[0], arrays[1], arrays[2] - 1 # sentence bounds are with MATLAB convetions
def load_all_in(_range=range(1)):
datasets = {n: [] for n in set_types}
m, mo, sd, sto = None, None, None, None
k = 0
for set_type in set_types:
for k in _range:
try:
general_info_dict = {'speaker': k, 'original set': set_type}
data, targets, sentence_bounds = load_speaker(k, set_type)
if normalize_single_speaker and k != 0: # with k = 0 use mean and sd from training set
data, m_sd, sd_sd = np_normalize_data(data, return_mean_and_sd=True)
targets, mo_sd, sto_sd = np_normalize_data(targets, return_mean_and_sd=True)
general_info_dict['normalizing stats'] = (m_sd, sd_sd, mo_sd, sto_sd)
else:
data, m, sd = np_normalize_data(data, m, sd, return_mean_and_sd=True)
targets, mo, sto = np_normalize_data(targets, mo, sto, return_mean_and_sd=True)
general_info_dict['normalizing stats'] = (m, sd, mo, sto)
data = WindowedData(data, sentence_bounds, window=half_window, process_all=True)
datasets[set_type].append(Dataset(data, targets,
sample_info={'speaker': k} if k != 0 else None,
info=general_info_dict))
except OSError or FileNotFoundError:
k -= 1
break
print('loaded %d speakers for %s' % (k, set_type))
return datasets
if not only_independent:
res = load_all_in(range(0, max_speakers))
for _set_type in set_types: # sample-wise speaker info to the general datasets
res[_set_type][0].sample_info_dicts = np.concatenate([
np.array([{'speaker': k + 1}] * ds.num_examples)
for k, ds in enumerate(res[_set_type][1:])
])
return Datasets(train=res['train'], validation=res['val'], test=res['test'])
else:
res = load_all_in()
return Datasets(train=res['train'][0], validation=res['val'][0], test=res['test'][0])
def load_timit_for_joint_training(folder, small=False, one_hot=True, only_gender=False):
"""
:param folder: source folder...
:param small: if `True` loads a smaller version of the dataset
:param one_hot: whether to use one hot encoding for output
:return: A list of `Datasets` where the first one is for the speaker
dependent net and the subsequent are for group dependent nets.
The first dataset should include validation and test data,
while for the others (at the moment) is not needed
"""
# # example
# X, Y = np.array(), np.array()
# group_id = 0
# gender = 'M'
# train = Dataset(X, Y, general_info_dict={'group': group_id, 'gender': gender})
# datasets = Datasets(train=train)
if small:
set_names = ['train_small', 'validation_small', 'coretest_small']
else:
set_names = ['train', 'validation', 'coretest']
Xall = {}
Yall = {}
datasets = [None]
for gender in ['F', 'M']:
_temp_gender = []
for dr in range(1, 9):
sets = []
for s in set_names:
# Loading data
fname = '{}_DR{}_{}.npy'.format(s, dr, gender)
data = np.load(os.path.join(folder, fname))
# Creating dataset
X = data[:, :-1]
Y = data[:, -1]
if one_hot:
Y = to_one_hot_enc(np.array(Y, dtype=np.int32), dimension=183)
info = {'group': dr, 'gender': gender}
sets.append(Dataset(X, Y, info=info))
# Stacking data for full dataset
Xall[s] = np.vstack((Xall[s], X)) if s in Xall else X
if one_hot:
Yall[s] = np.vstack((Yall[s], Y)) if s in Yall else Y
else:
Yall[s] = np.hstack((Yall[s], Y)) if s in Yall else Y
ds = Datasets(train=sets[0], validation=sets[1], test=sets[2])
if not only_gender:
datasets.append(ds)
else:
_temp_gender.append(ds)
if only_gender:
datasets.append(Datasets.stack(*_temp_gender))
# Building full dataset
# sets = []
# for s in set_names:
# sets.append(Dataset(Xall[s], Yall[s]))
# ds = Datasets(train=sets[0], validation=sets[1], test=sets[2])
# datasets[0] = ds
datasets[0] = Datasets.stack(*datasets[1:])
return datasets
# noinspection PyUnusedLocal
def load_timit(folder=TIMIT_DIR, only_primary=False, filters=None, maps=None, small=False, context=None,
fake=False, process_all=False):
def load_timit_sentence_bound():
def sentence_bound_reader(name):
bnd = pd.read_csv(folder + '/timit_%sSentenceBound.csv' % name, header=None).values
return bnd - 1
return [sentence_bound_reader(n) for n in ['train', 'val', 'test']]
folder = folder or TIMIT_DIR
if isinstance(process_all, bool):
process_all = [process_all] * 3
if fake:
def generate_dataset(secondary=False):
target = np.random.randn(2000, 183)
if secondary:
target = np.hstack([target, np.random.randn(2000, 300)])
return np.random.randn(2000, 123), target
training_data, training_target = generate_dataset(not only_primary)
validation_data, validation_target = generate_dataset()
test_data, test_target = generate_dataset()
training_info_dict = None
else:
split_number = '00' if small else ''
training_target = pd.read_csv(folder + '/timit_trainTargets%s.csv' % split_number, header=None).values
training_data = pd.read_csv(folder + '/timit-preproc_traindata_norm_noctx%s.csv' %
split_number, header=None).values
training_info_dict = {'dim_primary_target': training_target.shape[1]}
print('loaded primary training data')
if not only_primary:
training_secondary_target = pd.read_csv(folder + '/timit_trainTargetsPE%s.csv'
% split_number, header=None).values
training_target = np.hstack([training_target, training_secondary_target])
training_info_dict['dim_secondary_target'] = training_secondary_target.shape[1]
print('loaded secondary task targets')
validation_data = pd.read_csv(folder + '/timit-preproc_valdata_norm_noctx%s.csv'
% split_number, header=None).values
validation_target = pd.read_csv(folder + '/timit_valTargets%s.csv' % split_number, header=None).values
print('loaded validation data')
test_data = pd.read_csv(folder + '/timit-preproc_testdata_norm_noctx.csv', header=None).values
test_target = pd.read_csv(folder + '/timit_testTargets.csv', header=None).values
print('loaded test data')
if context:
sbs = load_timit_sentence_bound()
training_data, validation_data, test_data = (WindowedData(d, s, context, process_all=pa) for d, s, pa
in zip([training_data, validation_data, test_data],
sbs, process_all))
test_dataset = Dataset(data=test_data, target=test_target)
validation_dataset = Dataset(data=validation_data, target=validation_target)
training_dataset = Dataset(data=training_data, target=training_target, info=training_info_dict)
res = Datasets(train=training_dataset, validation=validation_dataset, test=test_dataset)
return res
def load_mnist(folder=None, one_hot=True, partitions=None, filters=None, maps=None, shuffle=False):
if not folder: folder = MNIST_DIR
datasets = read_data_sets(folder, one_hot=one_hot)
train = Dataset(datasets.train.images, datasets.train.labels)
validation = Dataset(datasets.validation.images, datasets.validation.labels)
test = Dataset(datasets.test.images, datasets.test.labels)
res = [train, validation, test]
if partitions:
res = redivide_data(res, partition_proportions=partitions, filters=filters, maps=maps, shuffle=shuffle)
res += [None] * (3 - len(res))
return Datasets.from_list(res)
def load_caltech101_30(folder=CALTECH101_30_DIR, tiny_problem=False):
caltech = scio.loadmat(folder + '/caltech101-30.matlab')
k_train, k_test = caltech['Ktrain'], caltech['Ktest']
label_tr, label_te = caltech['tr_label'], caltech['te_label']
file_tr, file_te = caltech['tr_files'], caltech['te_files']
if tiny_problem:
pattern_step = 5
fraction_limit = 0.2
k_train = k_train[:int(len(label_tr) * fraction_limit):pattern_step,
:int(len(label_tr) * fraction_limit):pattern_step]
label_tr = label_tr[:int(len(label_tr) * fraction_limit):pattern_step]
U, s, Vh = linalg.svd(k_train)
S_sqrt = linalg.diagsvd(s ** 0.5, len(s), len(s))
X = np.dot(U, S_sqrt) # examples in rows
train_x, val_x, test_x = X[0:len(X):3, :], X[1:len(X):3, :], X[2:len(X):3, :]
label_tr_enc = to_one_hot_enc(np.array(label_tr) - 1)
train_y, val_y, test_y = label_tr_enc[0:len(X):3, :], label_tr_enc[1:len(X):3, :], label_tr_enc[2:len(X):3, :]
train_file, val_file, test_file = file_tr[0:len(X):3], file_tr[1:len(X):3], file_tr[2:len(X):3]
test_dataset = Dataset(data=test_x, target=test_y, info={'files': test_file})
validation_dataset = Dataset(data=val_x, target=val_y, info={'files': val_file})
training_dataset = Dataset(data=train_x, target=train_y, info={'files': train_file})
return Datasets(train=training_dataset, validation=validation_dataset, test=test_dataset)
def load_iros15(folder=IROS15_BASE_FOLDER, resolution=15, legs='all', part_proportions=(.7, .2), one_hot=True,
shuffle=True):
resolutions = (5, 11, 15)
legs_names = ('LF', 'LH', 'RF', 'RH')
assert resolution in resolutions
folder += str(resolution)
if legs == 'all': legs = legs_names
base_name_by_leg = lambda leg: os.path.join(folder, 'trainingSet%sx%sFromSensor%s.mat'
% (resolution, resolution, leg))
datasets = {}
for _leg in legs:
dat = scio.loadmat(base_name_by_leg(_leg))
data, target = dat['X'], to_one_hot_enc(dat['Y']) if one_hot else dat['Y']
# maybe pre-processing??? or it is already done? ask...
datasets[_leg] = Datasets.from_list(
redivide_data([Dataset(data, target, info={'leg': _leg})],
partition_proportions=part_proportions, shuffle=shuffle))
return datasets
def load_caltech101(folder=CALTECH101_DIR, one_hot=True, partitions=None, filters=None, maps=None):
path = folder + "/caltech101.pickle"
with open(path, "rb") as input_file:
X, target_name, files = cpickle.load(input_file)
dict_name_ID = {}
i = 0
list_of_targets = sorted(list(set(target_name)))
for k in list_of_targets:
dict_name_ID[k] = i
i += 1
dict_ID_name = {v: k for k, v in dict_name_ID.items()}
Y = []
for name_y in target_name:
Y.append(dict_name_ID[name_y])
if one_hot:
Y = to_one_hot_enc(Y)
dataset = Dataset(data=X, target=Y, info={'dict_name_ID': dict_name_ID, 'dict_ID_name': dict_ID_name},
sample_info=[{'target_name': t, 'files': f} for t, f in zip(target_name, files)])
if partitions:
res = redivide_data([dataset], partitions, filters=filters, maps=maps, shuffle=True)
res += [None] * (3 - len(res))
return Datasets(train=res[0], validation=res[1], test=res[2])
return dataset
def load_cifar10(folder=CIFAR10_DIR, one_hot=True, partitions=None, filters=None, maps=None, balance_classes=False):
path = folder + "/cifar-10.pickle"
with open(path, "rb") as input_file:
X, target_name, files = cpickle.load(input_file)
X = np.array(X)
dict_name_ID = {}
i = 0
list_of_targets = sorted(list(set(target_name)))
for k in list_of_targets:
dict_name_ID[k] = i
i += 1
dict_ID_name = {v: k for k, v in dict_name_ID.items()}
Y = []
for name_y in target_name:
Y.append(dict_name_ID[name_y])
if one_hot:
Y = to_one_hot_enc(Y)
dataset = Dataset(data=X, target=Y, info={'dict_name_ID': dict_name_ID, 'dict_ID_name': dict_ID_name},
sample_info=[{'target_name': t, 'files': f} for t, f in zip(target_name, files)])
if partitions:
res = redivide_data([dataset], partitions, filters=filters, maps=maps, shuffle=True, balance_classes=True)
res += [None] * (3 - len(res))
return Datasets(train=res[0], validation=res[1], test=res[2])
return dataset
def load_cifar100(folder=CIFAR100_DIR, one_hot=True, partitions=None, filters=None, maps=None):
path = folder + "/cifar-100.pickle"
with open(path, "rb") as input_file:
X, target_ID_fine, target_ID_coarse, fine_ID_corr, coarse_ID_corr, files = cpickle.load(input_file)
X = np.array(X);
target_ID_fine = target_ID_fine[:len(X)]
target_ID_coarse = target_ID_coarse[:len(X)]
fine_ID_corr = {v: k for v, k in zip(range(len(fine_ID_corr)), fine_ID_corr)}
coarse_ID_corr = {v: k for v, k in zip(range(len(coarse_ID_corr)), coarse_ID_corr)}
fine_label_corr = {v: k for k, v in fine_ID_corr.items()}
coarse_label_corr = {v: k for k, v in coarse_ID_corr.items()}
Y = []
for name_y in target_ID_fine:
Y.append(name_y)
Y = np.array(Y)
if one_hot:
Y = to_one_hot_enc(Y)
superY = []
for name_y in target_ID_coarse:
superY.append(name_y)
superY = np.array(superY)
if one_hot:
superY = to_one_hot_enc(superY)
print(len(X))
print(len(Y))
dataset = Dataset(data=X, target=Y,
info={'dict_name_ID_fine': fine_label_corr, 'dict_name_ID_coarse': coarse_label_corr,
'dict_ID_name_fine': fine_ID_corr, 'dict_ID_name_coarse': coarse_ID_corr},
sample_info=[{'Y_coarse': yc, 'files': f} for yc, f in zip(superY, files)])
if partitions:
res = redivide_data([dataset], partitions, filters=filters, maps=maps, shuffle=True)
res += [None] * (3 - len(res))
return Datasets(train=res[0], validation=res[1], test=res[2])
return dataset
def generate_multiclass_dataset(n_samples=100, n_features=10,
n_informative=5, n_redundant=3, n_repeated=2,
n_classes=2, n_clusters_per_class=2,
weights=None, flip_y=0.01, class_sep=1.0,
hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None, hot_encoded=True, partitions_proportions=None,
negative_labels=-1.):
X, y = sk_dt.make_classification(n_samples=n_samples, n_features=n_features,
n_informative=n_informative, n_redundant=n_redundant, n_repeated=n_repeated,
n_classes=n_classes, n_clusters_per_class=n_clusters_per_class,
weights=weights, flip_y=flip_y, class_sep=class_sep,
hypercube=hypercube, shift=shift, scale=scale,
shuffle=True, random_state=random_state)
if hot_encoded:
y = to_one_hot_enc(y)
else:
y[y == 0] = negative_labels
res = Dataset(data=np.array(X, dtype=np.float32), target=np.array(y, dtype=np.float32),
info={'n_informative': n_informative, 'n_redundant': n_redundant,
'n_repeated': n_repeated,
'n_classes': n_classes, 'n_clusters_per_class': n_clusters_per_class,
'weights': weights, 'flip_y': flip_y, 'class_sep': class_sep,
'hypercube': hypercube, 'shift': shift, 'scale': scale,
'shuffle': True, 'random_state': random_state})
np.random.seed(random_state)
if partitions_proportions:
res = redivide_data([res], shuffle=shuffle, partition_proportions=partitions_proportions)
res = Datasets.from_list(res)
return res
def get_data(d_set):
if hasattr(d_set, 'images'):
data = d_set.images
elif hasattr(d_set, 'data'):
data = d_set.data
else:
raise ValueError("something wrong with the dataset %s" % d_set)
return data
def get_targets(d_set):
if hasattr(d_set, 'labels'):
return d_set.labels
elif hasattr(d_set, 'target'):
return d_set.target
else:
raise ValueError("something wrong with the dataset %s" % d_set)
#
class ExampleVisiting:
def __init__(self, dataset, batch_size, epochs=None):
"""
Class for stochastic sampling of data points. It is most useful for feeding examples for the the
training ops of `ReverseHG` or `ForwardHG`. Most notably, if the number of epochs is specified,
the class takes track of the examples per mini-batches which is important for the backward pass
of `ReverseHG` method.
:param dataset: instance of `Dataset` class
:param batch_size:
:param epochs: number of epochs (can be None, in which case examples are
fed continuously)
"""
self.dataset = dataset
self.batch_size = batch_size
self.epochs = epochs
self.T = int(np.ceil(dataset.num_examples / batch_size))
if self.epochs: self.T *= self.epochs
self.training_schedule = None
self.iter_per_epoch = int(dataset.num_examples / batch_size)
def setting(self):
excluded = ['training_schedule', 'datasets']
dictionary = {k: v for k, v in vars(self).items() if k not in excluded}
if hasattr(self.dataset, 'setting'):
dictionary['dataset'] = self.dataset.setting()
return dictionary
def generate_visiting_scheme(self):
"""
Generates and stores example visiting scheme, as a numpy array of integers.
:return: self
"""
def all_indices_shuffled():
_res = list(range(self.dataset.num_examples))
np.random.shuffle(_res)
return _res
# noinspection PyUnusedLocal
self.training_schedule = np.concatenate([all_indices_shuffled()
for _ in range(self.epochs or 1)])
return self
def create_supplier(self, x, y, other_feeds=None, lambda_feeds=None):
return self.create_feed_dict_supplier(x, y, other_feeds=other_feeds,
lambda_feeds=lambda_feeds)
def create_feed_dict_supplier(self, x, y, other_feeds=None, lambda_feeds=None):
"""
:param x: placeholder for independent variable
:param y: placeholder for dependent variable
:param lambda_feeds: dictionary of placeholders: number_of_example -> substitution
:param other_feeds: dictionary of other feeds (e.g. dropout factor, ...) to add to the input output
feed_dict
:return: a function that generates a feed_dict with the right signature for Reverse and Forward HyperGradient
classes
"""
if not lambda_feeds:
lambda_processed_feeds = {}
if not other_feeds:
other_feeds = {}
def _training_supplier(step=None):
nonlocal lambda_processed_feeds, other_feeds
if step >= self.T:
if step % self.T == 0:
if self.epochs:
print('WARNING: End of the training scheme reached.'
'Generating another scheme.')
self.generate_visiting_scheme()
step %= self.T
if self.training_schedule is None:
# print('visiting scheme not yet generated!')
self.generate_visiting_scheme()
# noinspection PyTypeChecker
nb = self.training_schedule[step * self.batch_size: min(
(step + 1) * self.batch_size, len(self.training_schedule))]
bx = self.dataset.data[nb, :]
by = self.dataset.target[nb, :]
if lambda_feeds:
lambda_processed_feeds = {k: v(nb) for k, v in lambda_feeds.items()}
else:
lambda_processed_feeds = {}
return {**{x: bx, y: by}, **other_feeds, **lambda_processed_feeds}
return _training_supplier
def pad(_example, _size): return np.concatenate([_example] * _size)
class WindowedData(object):
def __init__(self, data, row_sentence_bounds, window=5, process_all=False):
"""
Class for managing windowed input data (like TIMIT).
:param data: Numpy matrix. Each row should be an example data
:param row_sentence_bounds: Numpy matrix with bounds for padding. TODO add default NONE
:param window: half-window size
:param process_all: (default False) if True adds context to all data at object initialization.
Otherwise the windowed data is created in runtime.
"""
self.window = window
self.data = data
base_shape = self.data.shape
self.shape = (base_shape[0], (2 * self.window + 1) * base_shape[1])
self.tree = it.IntervalTree([it.Interval(int(e[0]), int(e[1]) + 1) for e in row_sentence_bounds])
if process_all:
print('adding context to all the dataset', end='- ')
self.data = self.generate_all()
print('DONE')
self.process_all = process_all
def generate_all(self):
return self[:]
def __getitem__(self, item): # TODO should be right for all the common use... But better write down a TestCase
if hasattr(self, 'process_all') and self.process_all: # keep attr check!
return self.data[item]
if isinstance(item, int):
return self.get_context(item=item)
if isinstance(item, tuple):
if len(item) == 2:
rows, columns = item
if isinstance(rows, int) and isinstance(columns, int): # TODO check here
# do you want the particular element?
return self.get_context(item=rows)[columns]
else:
raise TypeError('NOT IMPLEMENTED <|>')
if isinstance(rows, slice):
rows = range(*rows.indices(self.shape[0]))
return np.vstack([self.get_context(r) for r in rows])[:, columns]
else:
if isinstance(item, slice):
item = range(*item.indices(self.shape[0]))
return np.vstack([self.get_context(r) for r in item])
def __len__(self):
return self.shape[0]
def get_context(self, item):
interval = list(self.tree[item])[0]
# print(interval)
left, right = interval[0], interval[1]
left_pad = max(self.window + left - item, 0)
right_pad = max(0, self.window - min(right, len(self) - 1) + item) # this is to cope with reduce datasets
# print(left, right, item)
# print(left_pad, right_pad)
base = np.concatenate(self.data[item - self.window + left_pad: item + self.window + 1 - right_pad])
if left_pad:
base = np.concatenate([pad(self.data[item], left_pad), base])
if right_pad:
base = np.concatenate([base, pad(self.data[item], right_pad)])
return base
#
#
# if __name__ == '__main__':
# # _datasets = load_20newsgroup_feed_vectorized(one_hot=False, binary_problem=True)
# # print(_datasets.train.dim_data)
# # print(_datasets.train.dim_target)
# # mnist = load_mnist(partitions=[0.1, .2], filters=lambda x, y, d, k: True)
# # print(len(_datasets.train))\
#
# load_20newsgroup_vectorized(one_hot=False, shuffle=True, partitions_proportions=(1 / 3, 1 / 3))
#
# mnist = load_mnist(partitions=(.1, .1), shuffle=True)
#
# print(mnist.train.data)
# print(type(mnist.train.data))
#
# # dt = load_20newsgroup_vectorized()
# # print(dt.train.num_examples)
# # print(dt.train.num_examples)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.