repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
phoebe-project/phoebe2-docs | development/examples/rotstar_pulsations.py | 4 | 1391 | #!/usr/bin/env python
# coding: utf-8
# Single Star with Pulsations
# ============================
#
# **NOTE: pulsations are currently being tested but not yet supported**
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.html) for more details.
# In[1]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_star()
# Adding Pulsations
# ---------------------
# In[2]:
b.add_feature('pulsation', component='starA', feature='puls01', m=0, l=0)
# In[3]:
b.add_feature('pulsation', component='starA', feature='puls02', m=1, l=1)
# Pulsation Parameters
# -----------------
# Pulsations are defined by a frequency and amplitude
# In[4]:
print b['puls01']
# In[5]:
print b['puls02']
# In[6]:
b.add_dataset('lc', times=np.linspace(0,3,21))
# In[7]:
b.run_compute(distortion_method='rotstar', pbmesh=True)
# In[8]:
b['model'].animate(facecolor='teffs', edgecolor=None)
# In[ ]:
| gpl-3.0 |
tzipperle/mplstyle | examples/custom_colormap.py | 1 | 1358 | import matplotlib.pyplot as plt
import numpy as np
from mplstyle.ewk import PLTewk
def color_map_example():
""" An example of how to use get_cmap."""
ewk_plt.set_style('default')
fig = plt.figure(figsize=(14, 12))
fig.suptitle('Define your own color maps', fontsize=20, fontweight='bold')
fig.subplots_adjust(hspace=0.3, top=0.91)
ax0 = fig.add_subplot(311)
colors = [(21, 71, 157), (53, 117, 227), (210, 224, 249)]
plt.pcolor(np.random.rand(25, 50), cmap=ewk_plt.get_cmap(colors, bit=True))
plt.colorbar()
ax0.set_title('default style')
ax1 = fig.add_subplot(312)
ewk_plt.set_style('ewk_ggplt')
ewk_colors = ewk_plt.get_colors()
colors = [ewk_colors['lightred'], ewk_colors['mdarkred']]
plt.pcolor(np.random.rand(25, 50), cmap=ewk_plt.get_cmap(colors))
plt.colorbar()
ax1.set_title('example style: red color')
ewk_plt.set_style('ewk_ggplt')
ax2 = fig.add_subplot(313)
colors = [(0.4, 0.2, 0.0), (1, 1, 1), (1, 1, 0), (0, 0.3, 0.4)]
position = [0, 0.2, 0.5, 1]
plt.pcolor(np.random.rand(25, 50),
cmap=ewk_plt.get_cmap(colors, position=position))
plt.colorbar()
ax2.set_title('enfo style')
ewk_plt.add_zbild(ax2, x=.82, y=1.02, text='88-xxx-B16')
plt.show()
if __name__ == "__main__":
ewk_plt = PLTewk()
color_map_example()
| gpl-3.0 |
eramirem/astroML | book_figures/chapter3/fig_conditional_probability.py | 3 | 4425 | """
Joint and Conditional Probabilities
-----------------------------------
Figure 3.2.
An example of a two-dimensional probability distribution. The color-coded
panel shows p(x, y). The two panels to the left and below show marginal
distributions in x and y (see eq. 3.8). The three panels to the right show
the conditional probability distributions p(x|y) (see eq. 3.7) for three
different values of y (as marked in the left panel).
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.ticker import NullFormatter, NullLocator, MultipleLocator
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def banana_distribution(N=10000):
"""This generates random points in a banana shape"""
# create a truncated normal distribution
theta = np.random.normal(0, np.pi / 8, 10000)
theta[theta >= np.pi / 4] /= 2
theta[theta <= -np.pi / 4] /= 2
# define the curve parametrically
r = np.sqrt(1. / abs(np.cos(theta) ** 2 - np.sin(theta) ** 2))
r += np.random.normal(0, 0.08, size=10000)
x = r * np.cos(theta + np.pi / 4)
y = r * np.sin(theta + np.pi / 4)
return (x, y)
#------------------------------------------------------------
# Generate the data and compute the normalized 2D histogram
np.random.seed(1)
x, y = banana_distribution(10000)
Ngrid = 41
grid = np.linspace(0, 2, Ngrid + 1)
H, xbins, ybins = np.histogram2d(x, y, grid)
H /= np.sum(H)
#------------------------------------------------------------
# plot the result
fig = plt.figure(figsize=(5, 2.5))
# define axes
ax_Pxy = plt.axes((0.2, 0.34, 0.27, 0.52))
ax_Px = plt.axes((0.2, 0.14, 0.27, 0.2))
ax_Py = plt.axes((0.1, 0.34, 0.1, 0.52))
ax_cb = plt.axes((0.48, 0.34, 0.01, 0.52))
ax_Px_y = [plt.axes((0.65, 0.62, 0.32, 0.23)),
plt.axes((0.65, 0.38, 0.32, 0.23)),
plt.axes((0.65, 0.14, 0.32, 0.23))]
# set axis label formatters
ax_Px_y[0].xaxis.set_major_formatter(NullFormatter())
ax_Px_y[1].xaxis.set_major_formatter(NullFormatter())
ax_Pxy.xaxis.set_major_formatter(NullFormatter())
ax_Pxy.yaxis.set_major_formatter(NullFormatter())
ax_Px.yaxis.set_major_formatter(NullFormatter())
ax_Py.xaxis.set_major_formatter(NullFormatter())
# draw the joint probability
plt.axes(ax_Pxy)
H *= 1000
plt.imshow(H, interpolation='nearest', origin='lower', aspect='auto',
extent=[0, 2, 0, 2], cmap=plt.cm.binary)
cb = plt.colorbar(cax=ax_cb)
cb.set_label('$p(x, y)$')
plt.text(0, 1.02, r'$\times 10^{-3}$',
transform=ax_cb.transAxes)
# draw p(x) distribution
ax_Px.plot(xbins[1:], H.sum(0), '-k', drawstyle='steps')
# draw p(y) distribution
ax_Py.plot(H.sum(1), ybins[1:], '-k', drawstyle='steps')
# define axis limits
ax_Pxy.set_xlim(0, 2)
ax_Pxy.set_ylim(0, 2)
ax_Px.set_xlim(0, 2)
ax_Py.set_ylim(0, 2)
# label axes
ax_Pxy.set_xlabel('$x$')
ax_Pxy.set_ylabel('$y$')
ax_Px.set_xlabel('$x$')
ax_Px.set_ylabel('$p(x)$')
ax_Px.yaxis.set_label_position('right')
ax_Py.set_ylabel('$y$')
ax_Py.set_xlabel('$p(y)$')
ax_Py.xaxis.set_label_position('top')
# draw marginal probabilities
iy = [3 * Ngrid / 4, Ngrid / 2, Ngrid / 4]
colors = 'rgc'
axis = ax_Pxy.axis()
for i in range(3):
# overplot range on joint probability
ax_Pxy.plot([0, 2, 2, 0],
[ybins[iy[i] + 1], ybins[iy[i] + 1],
ybins[iy[i]], ybins[iy[i]]], c=colors[i], lw=1)
Px_y = H[iy[i]] / H[iy[i]].sum()
ax_Px_y[i].plot(xbins[1:], Px_y, drawstyle='steps', c=colors[i])
ax_Px_y[i].yaxis.set_major_formatter(NullFormatter())
ax_Px_y[i].set_ylabel('$p(x | %.1f)$' % ybins[iy[i]])
ax_Pxy.axis(axis)
ax_Px_y[2].set_xlabel('$x$')
ax_Pxy.set_title('Joint Probability')
ax_Px_y[0].set_title('Conditional Probability')
plt.show()
| bsd-2-clause |
swapnilgt/percPatternDiscovery | rlcs/analysis/__init__.py | 2 | 16233 | import sys
import os
sys.path.append('../utils')
sys.path.append('../src')
import utils as ut
from src import impl as rlcs
import numpy as np
import pickle as pkl
import matplotlib.pyplot as plt
eps = np.finfo(np.float).eps
'''
This module has the code to analyze the data obtaied fro the LCS
'''
# TODO: Remove this
def groupPatterns(patternDic):
'''
This function takes in a dictionary with the format:
{<fileName1>:[<pattern1>, <pattern2>, ...], <fileName2>:[<pattern3>, <pattern4>,...], ...}
and returns the patterns by grouping them in the form:
{<pattern1>:<count1>, <pattern2>:<count2>, ...}
'''
patternCount = {}
for key, patternList in patternDic.iteritems():
for pattern in patternList:
if pattern in patternCount.keys():
patternCount[pattern] += 1
else:
patternCount[pattern] = 1
return patternCount
#TODO: Remove this
def getAccuracy(patternInGT, matches):
'''
Takes in the the rlcs output on the grond truth <patternInGT> and in the test phrase <matches>.
Return the precision, recall and f-measure the each values
'''
matchSet = set()
gtSet = set()
# Get all the positions in matches and the ground truth in as set
# We might have multiple subsequences starting at the same location
# We take the one which has largest length
# If the length of two patterns starting at the same place is same, we take the one which
# has the maximum number of matched
for tup in matches:
matchSet.add(tup[0])
for tup in patternInGT:
gtSet.add(tup[0])
numOrig = len(gtSet)
numTrans = len(matchPosSet)
diff = numOrig - numTrans
print 'The poistions in the transcribed are:' + str(numTrans)
print 'The poistions in the grount truth are:' + str(numOrig)
print 'The difference is:' + str(diff)
return (diff, numOrig, numTrans)
def getClosestPairs(transMatches, scoreMatches, origOnsets, transOnsets, overlapTres):
'''
This method takes the transMatches and scoreMatches which are the list of the patterns found in
the transciption and the ground truth score in the format:
[(<position1>,<pattern1>), (<position2>, <pattern2>), ...]
It also inputs the onset time for both ground truth (origOnsets) and the transcription (transOnsets)
It returns a list of the tuples in which first element is a pattern from the transcription and
the second element is the pattern from the ground truth score. Both the tuples are of the form:
(<position3>, <pattern3>)
Also, returns the transcriptions in the ground truth which had no matches associated to them in format
same as the above.
:Note:
1) Here we assume that the position mentioned in the tuples of transMatcehs and scoreMatches start from 1
and the onsets position in the onset arrays start from 0 (handle this)
'''
# The return object ...
closestPairs = []
# The array that stores that closest match positions ...
bestOverlapIndices = set()
# The array that stores the matches in the ground score that do not have any match
noMatchScore = []
# Array to store the lengths of the retrieved patterns which conform to the scoreTres
indexSet = set()
# Iterating ...
for trans in transMatches:
bestOverlap = 0.0
# the start position of the transcription
sTrans = trans[0] - 1
bestIndex = None # In case there is no match.. we check later if the bestIndex is None
# Getting the number of syllables in transcription
ptrTrans = trans[1].strip() # Have spaces at the end
ptrTransLen = len(ptrTrans.split(' '))
for index, orig in enumerate(scoreMatches):
# the start position of the ground truth score
#print 'The index is:' + str(index)
#print 'The orig is:' + str(orig)
sOrig = orig[0] - 1
ptrOrigLen = len(orig[1])
overlapTime = ut.getOverlap((origOnsets[sOrig], origOnsets[sOrig + ptrOrigLen - 1]), (transOnsets[sTrans], transOnsets[sTrans + ptrTransLen - 1]))
percOverlap = overlapTime * 100 / (origOnsets[sOrig + ptrOrigLen - 1] - origOnsets[sOrig])
# checking if the new overlap percentage is greater than the older one
if percOverlap > bestOverlap:
bestOverlap = percOverlap
bestIndex = index
closestPairs.append((trans, bestOverlap))
if bestOverlap >= overlapTres: # Appending only if the overlap is more than the threshold
bestOverlapIndices.add(bestIndex)
for index, val in enumerate(scoreMatches):
if index not in bestOverlapIndices:
noMatchScore.append(val)
return (closestPairs, noMatchScore)
def getRelevantTrans(overlaps, tres = 0.0):
'''
Takes in the overlaps for each of the transcripted substring found and returns the number which have overlap above the tres
Also, returns a list which shows the length of the retrieved patterns in the transcribed data
'''
total = 0
ptrLen = []
for tup in overlaps:
if tup[1] != None and tup[1] >= tres:
total += 1
ptrLen.append(len(tup[0][1].strip().split(' ')))
return total, ptrLen
def getFalseNegativesInTranscription(trans, notFound, origOnsets, transOnsets):
'''
This function takes in the transcribed syllables, the one not found, the onset positions and returns
the list of the tuples where each tuple is of the format (<position>, <pattern>)
'''
falseNegs = []
for tup in notFound:
# The start position of the ground truth pattern..
sOrig = tup[0] - 1
# Getting the number of syllables in the original score pattern
#ptrOrig = tup[1].strip() # Have spaces at the end
#ptrOrigLen = len(ptrOrig.split(' '))
ptrOrigLen = len(tup[1])
# Getting the start and end time of the transcription
sTime = origOnsets[sOrig]
eTime = origOnsets[sOrig + ptrOrigLen - 1]
if sTime >= eTime:
print 'analysis.getFalseNegativesInTranscription:: sTime greater or equal than the eTime:' + str(sTime) + ' and ' + str(eTime)
sys.exit()
# Getting the closest indices for the start and end time in the
sIndex = ut.findClosestIndex(sTime, transOnsets, 0, len(transOnsets) - 1)
eIndex = ut.findClosestIndex(eTime, transOnsets, 0, len(transOnsets) - 1)
falseNegs.append((sIndex + 1, trans[sIndex : eIndex + 1]))
return falseNegs
def getAnalysisBeta(resDir, p = 0.875, scoreTres = 0.75, betaMin = 0.01, betaMax = 0.99, betaStep = 0.05):
"""
Gives the analysis for for a range of beta with other parameters fixed
"""
result = []
if not os.path.isdir(resDir):
print 'The directory mentioned does not exist:' + resDir
sys.exit()
pDir = os.path.join(resDir, 'p_'+ str(p))
if not os.path.isdir(pDir):
print 'The directory mentioned does not exist:' + pDir
sys.exit()
beta = betaMin
while beta <= betaMax:
bDir = os.path.join(pDir, 'beta_' + str(beta))
if not os.path.isdir(bDir):
print 'The directory mentioned does not exist:' + bDir
sys.exit()
data = ut.readPickle(os.path.join(bDir, 'result.pkl'))
found = False
for tup in data:
if abs(tup[0] - scoreTres) < eps:
found = True
print 'The value of scoreTres:' + str(tup[0])
result.append((beta,tup[1]))
break
if found is not True:
print 'Could not find the value for iteration with beta=' + str(beta)
sys.exit()
beta += betaStep
return result
########## New set of functions for analysis of the errors in the transcription ##########
def getIndicesInTrans(origOnset, transOnset, dictPtrIndices):
"""
Inputs the onset locations of ground truth (origOnset), transcribed score (transOnset) and the start and end locations
of the pattern in the ground truth and returns the list of indices for the closest onset positions in the transcription
"""
result = {}
retRes = []
print 'The number of patterns in the GT are:' + str(len(dictPtrIndices))
for i, ons in enumerate(transOnset):
closestInOrig = ut.findClosestIndex(ons, origOnset, 0, len(origOnset)-1)
for key, val in dictPtrIndices.iteritems():
if closestInOrig in val:
if key in result:
result[key].append(i)
else:
result[key] = [i]
for key, val in result.iteritems():
if max(val) - min(val) + 1 != len(val):
print 'Something wrong with the positions found'
print 'key:' + str(key)
print 'val:' + str(val)
retRes.append(val)
return retRes
def getPatternsInTrans(score, indicesList):
"""
This function takes in the score which is a list of syllables and the indices which is a list of lists of
the indices in these score. For, each list in the indices, it returns the sequence of syllables found with
the ones that correspond to exact locatinos in the ground truth marked with a '!'
"""
result = []
for indices in indicesList:
#print indices
seq = score[indices[0]:indices[len(indices)-1]+1]
result.append(seq)
return result
def populateDictPtrIndices(ptrStartIndices, l):
"""
Takes in the starting points of the patterns and populate a dictionary of sets with all the indices of the pattern
"""
res = {}
for i in ptrStartIndices:
s = set()
for j in range(i, i+l):
s.add(j)
res[i] = s
return res
def getTranscribedPatterns(ptr = ['TA', 'TA', 'KI', 'TA']):
"""
Gets the transcribed sequences for the pattern (ptr) provided bsed on the onset information in the transcribed data.
"""
result = []
l = len(ptr)
# Getting the masterdata
config = ut.loadConfig('/home/swapnil/SMC/MasterThesis/gitPercPatterns/code/rlcs/config')
transFolder = config['transFolder']
#transFolder = '/home/swapnil/Desktop/temp'
lblDir = config['lblDir']
onsDir = config['onsDir']
masterData = ut.getAllSylbData(tPath = transFolder, lblDir = lblDir, onsDir = onsDir)
# Find the start and the end point of patters in
for comp in masterData:
compName = comp[2]
print 'Working for composition:' + compName
transScore = comp[0][0]
transOnset = comp[0][1]
origScore = comp[1][0]
origOnset = comp[1][1]
# Get the starting indices for the pattern in the composition comp
ptrStartIndices = ut.getSubstringPos(origScore, ptr)
# Get the dictionaries of set for the indices of patterns in the ground truth
dictPtrIndices = populateDictPtrIndices(ptrStartIndices, l)
# Get the closest set onsets for the pattern in the transcription
ptrIndicesInTrans = getIndicesInTrans(origOnset, transOnset, dictPtrIndices)
ptrnsInTrans = getPatternsInTrans(transScore, ptrIndicesInTrans)
result.append((compName, ptrnsInTrans))
return result
def getTranscribedPatternsStats(ptrErrors):
"""
Returns the statistics of the pattern errors. Return in the form of number of patterns with different lengths
"""
stats = {}
statsArray = []
total = 0
for tup in ptrErrors:
for ptr in tup[1]:
statsArray.append(len(ptr))
total += 1
if len(ptr) in stats:
stats[len(ptr)] += 1
else:
stats[len(ptr)] = 1
print 'The total number of patterns are:' + str(total)
return stats, statsArray
def getPtrStr(ptr):
"""
Inputs a pattern in the form of list of syllables and returns the '!' separated string for it
"""
res = ''
for syl in ptr:
res += (syl + '!')
return res.strip('!')
def getPatternGroupsFromTups(transPtrs):
"""
Groups the patterns in the transcription and returns count for each class along with the total number of patterns
"""
res = {}
total = 0
for tup in transPtrs:
for ptr in tup[1]:
total += 1
strPtr = getPtrStr(ptr)
if strPtr in res:
res[strPtr] += 1
else:
res[strPtr] = 1
return res, total
def getPatternsInTransInGTPos(masterData, queryList):
'''
This function takes in the masterData and queryList and returns pattern wise list of patterns in transcribed data for
each composition for the positions of the where there is a pattern in the GT. This is to analyze the errors in the transcription
'''
res = []
for query in queryList:
qLen = len(query)
qRes = []
transQ = []
for compData in masterData:
uniqMatchesGT = rlcs.getGTPatterns(compData[1][0],query)
#print 'In GT for composition ' + str(compData[2] + ':' + str(uniqMatchesGT))
# getting the onset times of the pattern boundaries
GTOnsets = compData[1][1] # array of onsets in the ground truth of the composition
transOnsets = compData[0][1] # array of the onsets in the transcribed data of the composition
for match in uniqMatchesGT:
#print 'Working for:' + str(match)
GTStartIndex = match[0] - 1 # start index of the pattern in the GT
GTEndIndex = GTStartIndex + qLen - 1 # end index of the pattern in the GT
#print 'Starting index in GT:' + str(GTStartIndex)
#print 'Ending index in GT:' + str(GTEndIndex)
transStartIndex = ut.findClosestIndex(GTOnsets[GTStartIndex], transOnsets, 0, len(transOnsets)-1)
transEndIndex = ut.findClosestIndex(GTOnsets[GTEndIndex], transOnsets, 0, len(transOnsets)-1)
#print 'Starting index in Trans:' + str(transStartIndex)
#print 'Ending index in iTrans:' + str(transEndIndex)
if compData[0][0][transStartIndex] == 'NA' and compData[0][0][transStartIndex+1] == 'KI' and compData[0][0][transStartIndex+2] == 'TA' and compData[0][0][transStartIndex+3] == 'TA' and transEndIndex-transStartIndex+1 == 4:
print compData[2]
qRes.append(transEndIndex - transStartIndex + 1)
transQ.append(compData[0][0][transStartIndex:transEndIndex + 1])
res.append((query, qRes, transQ))
return res
def getPatternGroups(ptrs):
'''
Takes in a group of list of patterns and returns them after groupong them with count of each
'''
res = {}
for ptr in ptrs:
strPtr = getPtrStr(ptr)
if strPtr in res:
res[strPtr] += 1
else:
res[strPtr] = 1
# adding the length of the pattern to the payload
for key, val in res.iteritems():
res[key] = (val, len(key.split('!')))
return res
def plotFramesPerSyl(bagOfFeatsFile='/home/swapnil/SMC/MasterThesis/gitPercPatterns/code/sylbSimilarity/bagOfFeats.pkl'):
'''
Takes in the file path for the bag of feats file and plots a bar graph for distribution of the frames for all the syllables
'''
fo = open(bagOfFeatsFile, 'rb')
feats = pkl.load(fo)
n = len(feats)
x = np.arange(n)
syls = []
frms = []
for key, val in feats.iteritems():
syls.append(key)
frms.append(len(feats[key]))
fig, ax = plt.subplots()
rects = ax.bar(x, frms, 0.35, color='b')
ax.set_xticks(x + 0.35/2)
ax.set_xticklabels(syls, fontsize='large')
ax.set_title('Frames for each bol', fontsize='large')
ax.set_ylabel('Number of Frames', fontsize='large')
# adding the count on top of the bar
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.02 * height, ' %d'%int(height), ha='center', va='bottom', fontsize='large')
plt.show()
| agpl-3.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/matplotlib/tests/test_basic.py | 5 | 1550 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
from nose.tools import assert_equal
from matplotlib.cbook import MatplotlibDeprecationWarning
from matplotlib.testing.decorators import knownfailureif
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'The finance module has been deprecated in mpl 2',
MatplotlibDeprecationWarning)
from pylab import *
def test_simple():
assert_equal(1 + 1, 2)
@knownfailureif(True)
def test_simple_knownfail():
# Test the known fail mechanism.
assert_equal(1 + 1, 3)
def test_override_builtins():
ok_to_override = set([
'__name__',
'__doc__',
'__package__',
'__loader__',
'__spec__',
'any',
'all',
'sum'
])
# We could use six.moves.builtins here, but that seems
# to do a little more than just this.
if six.PY3:
builtins = sys.modules['builtins']
else:
builtins = sys.modules['__builtin__']
overridden = False
for key in globals().keys():
if key in dir(builtins):
if (globals()[key] != getattr(builtins, key) and
key not in ok_to_override):
print("'%s' was overridden in globals()." % key)
overridden = True
assert not overridden
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| apache-2.0 |
YihaoLu/statsmodels | statsmodels/datasets/heart/data.py | 25 | 1858 | """Heart Transplant Data, Miller 1976"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """???"""
TITLE = """Transplant Survival Data"""
SOURCE = """ Miller, R. (1976). Least squares regression with censored dara. Biometrica, 63 (3). 449-464.
"""
DESCRSHORT = """Survival times after receiving a heart transplant"""
DESCRLONG = """This data contains the survival time after receiving a heart transplant, the age of the patient and whether or not the survival time was censored.
"""
NOTE = """::
Number of Observations - 69
Number of Variables - 3
Variable name definitions::
death - Days after surgery until death
age - age at the time of surgery
censored - indicates if an observation is censored. 1 is uncensored
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
dset = du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float)
dset.censors = dset.exog[:,0]
dset.exog = dset.exog[:,1]
return dset
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/heart.csv', 'rb'),
delimiter=",", names = True, dtype=float)
return data
| bsd-3-clause |
bundgus/python-playground | matplotlib-playground/examples/images_contours_and_fields/contourf_log.py | 1 | 1347 | '''
Demonstrate use of a log color scale in contourf
'''
import matplotlib.pyplot as plt
import numpy as np
from numpy import ma
from matplotlib import colors, ticker, cm
from matplotlib.mlab import bivariate_normal
N = 100
x = np.linspace(-3.0, 3.0, N)
y = np.linspace(-2.0, 2.0, N)
X, Y = np.meshgrid(x, y)
# A low hump with a spike coming out of the top right.
# Needs to have z/colour axis on a log scale so we see both hump and spike.
# linear scale only shows the spike.
z = (bivariate_normal(X, Y, 0.1, 0.2, 1.0, 1.0)
+ 0.1 * bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0))
# Put in some negative values (lower left corner) to cause trouble with logs:
z[:5, :5] = -1
# The following is not strictly essential, but it will eliminate
# a warning. Comment it out to see the warning.
z = ma.masked_where(z <= 0, z)
# Automatic selection of levels works; setting the
# log locator tells contourf to use a log scale:
cs = plt.contourf(X, Y, z, locator=ticker.LogLocator(), cmap=cm.PuBu_r)
# Alternatively, you can manually set the levels
# and the norm:
#lev_exp = np.arange(np.floor(np.log10(z.min())-1),
# np.ceil(np.log10(z.max())+1))
#levs = np.power(10, lev_exp)
#cs = P.contourf(X, Y, z, levs, norm=colors.LogNorm())
# The 'extend' kwarg does not work yet with a log scale.
cbar = plt.colorbar()
plt.show()
| mit |
lhm30/PIDGIN | predict_single.py | 1 | 3159 | #Author : Lewis Mervin [email protected]
#Supervisor : Dr. A. Bender
#All rights reserved 2014
#Protein Target Prediction Tool trained on SARs from PubChem (Mined 08/04/14) and ChEMBL18
#Molecular Descriptors : 2048bit Morgan Binary Fingerprints (Rdkit) - ECFP4
#Dependencies : rdkit, sklearn, numpy
#libraries
from rdkit import Chem
from rdkit.Chem import AllChem
from sklearn.naive_bayes import BernoulliNB
import cPickle
import glob
import os
import sys
import numpy as np
def introMessage():
print '=============================================================================================='
print ' Author: Lewis Mervin\n Email: [email protected]\n Supervisor: Dr. A. Bender'
print ' Address: Centre For Molecular Informatics, Dept. Chemistry, Lensfield Road, Cambridge CB2 1EW'
print '==============================================================================================\n'
return
#import user query
def importQuery():
query = open(file_name).read().splitlines()
matrix = []
for q in query:
matrix.append(calcFingerprints(q))
matrix = np.array(matrix, dtype=np.uint8)
return matrix
#calculate 2048bit morgan fingerprints, radius 2
def calcFingerprints(smiles):
m1 = Chem.MolFromSmiles(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(m1,2, nBits=2048)
binary = fp.ToBitString()
return list(binary)
#get names of uniprots
def getName():
global u_name
t_file = open('classes_in_model.txt').read().splitlines()
t_file.pop(0)
for t in t_file:
t = t.split('\t')
u_name[t[1]] = t[0]
return
#import thresholds as specified by user
def importThresholds(uniprot):
t_file = open('thresholds.txt').read().splitlines()
for t in t_file:
t = t.split('\t')
if t[0] == uniprot:
thresholds = (float(t) for t in t[1:])
return thresholds
#main
introMessage()
file_name = sys.argv[1]
t_count = len(glob.glob('models/*.pkl'))
print ' Total Number of Classes : ' + str(t_count)
output_name = 'out_result_single.txt'
file = open(output_name, 'w')
querymatrix = importQuery()
u_name = dict()
getName()
print ' Query Molecule : ' + file_name
file.write('NAME\tUNIPROT\tRAW_SCORE\tPRECISION\tF_SCORE\tRECALL\tACCURACY\t0.5\n')
count=0
#for each model
for filename in glob.glob('models/*.pkl'):
row = []
count +=1
#unpickle model
with open(filename, 'rb') as fid:
row = [u_name[filename[7:-4]],filename[7:-4]]
bnb = cPickle.load(fid)
prob = bnb.predict_proba(querymatrix)[0][1]
row.append(prob)
#if the probability of activity is above threshold then active
thresholds = importThresholds(filename[7:-4])
for thresh in thresholds:
if prob >= thresh:
row.append('1')
else:
row.append('0')
file.write('\t'.join(map(str,row)) + '\n')
#update precent finished
percent = (float(count)/float(t_count))*100
sys.stdout.write(' Performing Classification on Query Molecule: %3d%%\r' % percent)
sys.stdout.flush()
print '\n Wrote Results to: ' + output_name
file.close() | mit |
marohngroup/kpfm | kpfm/lockin/__init__.py | 1 | 20559 | # -*- coding: utf-8 -*-
"""
======
lockin
======
This module contains classes and functions for performing digital lock-in
amplifier data analysis.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from scipy import signal, optimize
import matplotlib as mpl
import matplotlib.pyplot as plt
import h5py
import pandas as pd
import sigutils
from scipy.signal.signaltools import _centered
from kpfm.util import next_fast_len
class LockIn(object):
"""A basic digital lock-in amplifier.
Run an input signal x through a digital lock-in amplifier.
A finite impulse response (FIR) lock-in filter can be provided by
`lock` or `lock2`, or a custom FIR filter can be used by directly
calling `run`. After generating the complex lock-in output, the lock-in
can be phased by running `phase`, or `autophase`.
After phasing, the lock-in output channels are X, the in-phase channel and
Y, the out-of-phase channel.
Parameters
----------
t: array_like
Time array
x: array_like
Input signal array
fs: float
Sampling rate
Example
-------
>>> fs = 1000.0
>>> t = np.arange(1000)/fs
>>> A = 1 - 0.1 * t
>>> f = 80 + 0.1 * t
>>> x = A * np.sin(np.cumsum(f)*2*np.pi/fs)
>>> li = LockIn(t, x, fs)
We process the data with a 20 Hz bandwidth lock-in amplifier filter.
>>> li.lock(bw=20.0)
Response:
f mag dB
0.000 1.000 0.000
10.000 0.996 -0.035
20.000 0.500 -6.022
40.025 0.000 -91.020
80.051 0.000 -113.516
500.000 0.000 -204.987
The lock-in amplifier automatically infers the reference frequency.
The printed response shows the lock-in amplifier gain at different
frequencies. For the output to be valid the gain at the reference frequency
must be very small (-60 dB or smaller).
We phase the lock-in amplifier output, and then have the lock-in variables
available for use.
>>> li.phase()
>>> li('t') # Shortcut for accessing masked version of the signal.
"""
def __init__(self, t, x, fs=None):
self.t = t
self.x = x
if fs is not None:
self.fs = fs
else:
self.fs = 1/np.mean(np.gradient(t))
self.f0_est = freq_from_fft(self.x, self.fs)
@classmethod
def from_x(Cls, x, fs, t0=0):
"""Generate the time array internally."""
t = t0 + np.arange(x.size) / fs
return Cls(t, x, fs)
def __call__(self, key):
"""Shorthand for validly masked section of any data array."""
return getattr(self, key)[self.m]
def __repr__(self):
f0 = getattr(self, 'f0', self.f0_est)
return "LockIn(f0={})".format(f0)
def run(self, f0=None, fir=None):
"""Run the lock-in amplifier at reference frequency ``f0``,
using the finite impulse response filter ``fir``.
"""
if f0 is None:
self.f0 = f0 = self.f0_est
else:
self.f0 = f0
if fir is not None:
self.fir = fir
self.z = z = signal.fftconvolve(self.x * np.exp(-2j*np.pi*f0*self.t),
2*self.fir,
"same")
n_fir = self.fir.size
indices = np.arange(self.t.size)
# Valid region mask
# This is borrowed explicitly from scipy.signal.sigtools.fftconvolve
self.m = m = np.zeros_like(self.t, dtype=bool)
mask_indices = _centered(indices, self.t.size - n_fir + 1)
if n_fir % 2 == 0:
mask_indices += 1
self.m[mask_indices] = True
self.A = abs(self.z)
self.phi = np.angle(self.z)
def lock(self, bw=None, f0=None, bw_ratio=0.5, coeff_ratio=9., coeffs=None,
window='blackman'):
"""Standard, windowed finite impulse response filter."""
t = self.t
fs = self.fs
if f0 is None:
self.f0 = f0 = self.f0_est
else:
self.f0 = f0
if bw is None:
if bw_ratio > 1:
raise ValueError("Bandwidth ratio 'bw_ratio' must be < 1 (bw_ratio={}".format(bw_ratio))
bw = bw_ratio * f0 / (self.fs/2)
else:
bw = bw / (self.fs/2)
if coeffs is None:
coeffs = round(coeff_ratio / bw, 0)
if coeffs > self.x.size:
raise ValueError(
"""No valid output when 'coeffs' > t.size (coeffs: {}, t.size: {}).
Reduce coeffs by increasing bw, bw_ratio, or decreasing coeff_ratio,
or provide more data.""".format(coeffs, t.size))
self.fir = b = signal.firwin(coeffs, bw, window=window)
w, rep = signal.freqz(b, worN=np.pi*np.array([0., bw/2, bw, f0/self.fs, f0/(self.fs/2.), 1.]))
print("Response:")
_print_magnitude_data(w, rep, fs)
self.run(f0=f0)
def lock2(self, f0=None, fp_ratio=0.1, fc_ratio=0.4, coeff_ratio=8,
fp=None, fc=None, coeffs=None, window='blackman',
print_response=True):
t = self.t
fs = self.fs
if f0 is None:
self.f0 = f0 = self.f0_est
else:
self.f0 = f0
if fp is None:
fp = fp_ratio * f0
if fc is None:
fc = fc_ratio * f0
self.fir = b = lock2(f0, fp, fc, fs, coeff_ratio, coeffs, window,
print_response=print_response)
if coeffs > self.x.size:
raise ValueError(
"""No valid output when 'coeffs' > t.size (coeffs: {}, t.size: {}).
Reduce coeffs by increasing bw, bw_ratio, decreasing coeff_ratio,
or provide more data.""".format(coeffs, t.size))
self.run(f0=f0)
def lock_butter(self, N, f3dB, t_exclude=0, f0=None, print_response=True):
"""Butterworth filter the lock-in amplifier output"""
t = self.t
fs = self.fs
nyq = fs / 2.
f3dB = f3dB / nyq
self.iir = ba = signal.iirfilter(N, f3dB, btype='low')
if f0 is None:
self.f0 = f0 = self.f0_est
self.z = z = signal.lfilter(self.iir[0], self.iir[1], self.z)
# TODO: Fix accounting on final / initial point
m = self.m
self.m = self.m & (t >= (t[m][0] + t_exclude)) & (t < (t[m][-1] - t_exclude))
self.A = abs(self.z)
self.phi = np.angle(self.z)
if print_response:
w, rep = signal.freqz(self.iir[0], self.iir[1],
worN=np.pi*np.array([0., f3dB/2, f3dB,
0.5*f0/nyq, f0/nyq, 1.]))
print("Response:")
_print_magnitude_data(w, rep, fs)
def _output_df_X_Y(self):
"""Helper function for outputting frequency shift
and lock-in X, Y channels after phasing."""
self.df = np.gradient(self.dphi) * self.fs / (2*np.pi)
self.Z = np.exp(-1j*self.phi_fit) * self.z
self.X = self.Z.real
self.Y = self.Z.imag
def manual_phase(self, phi0, f0corr=None):
"Manually phase the lock-in output with phase phi0 (in radians)."
self.phi0 = phi0
if f0corr is not None:
self.f0corr = f0corr
delta_f0 = f0corr - self.f0
else:
self.f0corr = self.f0
delta_f0 = 0.0
self.phi_fit = self.t * delta_f0 * 2 * np.pi + self.phi0
self.dphi = np.unwrap(((self.phi - self.phi_fit + np.pi) % (2*np.pi))
- np.pi)
self._output_df_X_Y()
def autophase(self, ti=None, tf=None, unwrap=False, x0=[0., 0.], adjust_f0=True):
t = self.t
m = self.m
z = self.z
if unwrap:
phi = np.unwrap(self.phi)
else:
phi = self.phi
if ti is None and tf is None:
mask = m
elif ti is not None and tf is None:
mask = m & (t >= ti)
elif ti is None and tf is not None:
mask = m & (t < tf)
else:
mask = m & (t >= ti) & (t < tf)
self.mb = mb = auto_phase(t[mask], phi[mask], x0, adjust_f0=adjust_f0)
self.phi0 = mb[-1]
self.phi_fit = np.polyval(mb, t)
self.dphi = np.unwrap((
(self.phi - self.phi_fit + np.pi) % (2*np.pi)) - np.pi)
if adjust_f0:
self.f0corr = self.f0 + mb[0] / (2*np.pi)
else:
self.f0corr = self.f0
self._output_df_X_Y()
def phase(self, ti=None, tf=None, weight=True, adjust_f0=True):
t = self.t
m = self.m
z = self.z
poly_order = int(adjust_f0)
if ti is None and tf is None:
mask = m
elif ti is not None and tf is None:
mask = m & (t >= ti)
elif ti is None and tf is not None:
mask = m & (t < tf)
else:
mask = m & (t >= ti) & (t < tf)
phi = np.unwrap(self.phi[mask])
std = np.std(self.phi[mask])
phi_norm = phi / std
try:
if weight:
A = abs(z[mask]) / np.std(abs(z[mask]))
self.mb = mb = np.polyfit(t[mask], phi_norm, poly_order, w=A) * std
else:
self.mb = mb = np.polyfit(t[mask], phi_norm, poly_order) * std
except TypeError:
print(t)
print(ti)
print(tf)
raise
self.phi_fit = np.polyval(mb, t)
self.dphi = np.unwrap(((self.phi - self.phi_fit + np.pi) % (2*np.pi))
- np.pi)
self.phi0 = mb[-1]
if adjust_f0:
self.f0corr = self.f0 + mb[0] / (2*np.pi)
else:
self.f0corr = self.f0
self._output_df_X_Y()
def decimate(self, factor=None):
if factor is None:
factor = int(self.fs//self.f0)
self.dec_t = self.t[self.m][::factor]
self.dec_phi = self.dphi[self.m][::factor]
self.dec_A = self.A[self.m][::factor]
self.dec_df = self.df[self.m][::factor]
self.dec_f0 = self.f0
self.dec_fs = self.fs/factor
self.dec_z = self.z[self.m][::factor]
def phase_dec(self, ti=None, tf=None, weight=True):
t = self.dec_t
m = np.ones_like(self.dec_z, dtype=bool)
z = self.dec_z
if ti is None and tf is None:
mask = m
elif ti is not None and tf is None:
mask = m & (t >= ti)
elif ti is None and tf is not None:
mask = m & (t < tf)
else:
mask = m & (t >= ti) & (t < tf)
phi = np.unwrap(np.angle(z))
std = np.std(phi[mask])
phi_norm = phi / std
try:
if weight:
A = abs(z[mask]) / np.std(abs(z[mask]))
self.mb = mb = np.polyfit(t[mask], phi_norm[mask], 1, w=A) * std
else:
self.mb = mb = np.polyfit(t[mask], phi_norm[mask], 1) * std
except TypeError:
print(t)
print(ti)
print(tf)
raise
phi_fit = np.polyval(mb, t)
dphi = np.unwrap(((phi - phi_fit + np.pi) % (2*np.pi)) - np.pi)
df = np.gradient(dphi) * self.dec_fs / (2*np.pi)
self.f0_dec_direct = self.f0 + mb[0] / (2*np.pi)
def absolute_phase(self, mask, guess=0.0):
"""Perform a curve fit """
phi = self.phi[mask] + self.t[mask]*2*np.pi*self.f0corr
popt, pcov = curve_fit(lambda phi, phi0:
self.A[mask]*np.cos(phi+phi0), phi, self.x[mask],
[guess])
self.phi0abs = popt[0]
self.phiabs = self.phi + self.t*2*np.pi*self.f0corr + self.phi0abs
return popt, pcov
class FIRStateLock(object):
"""
Lock-in amplifier object which uses an FIR filter, decimates data, and
processes data in batches.
Pass data in with the ``filt`` function.
Lock-in amplifier output stored in ``z_out``.
Time array accessible with the ``get_t`` function.
Parameters
----------
fir: array_like
finite-impulse-response (FIR) filter coefficients
dec: int
Decimation factor (output sampling rate = input sampling rate /dec)
f0: scalar
Lock-in amplifier reference frequency.
phi0: scalar
Initial lock-in amplifier phase.
t0: scalar, optional
Inital time associated with the first incoming data point.
Defaults to 0.
fs: scalar, optional
Input sampling rate. Defaults to 1.
"""
def __init__(self, fir, dec, f0, phi0, t0=0, fs=1.):
self.fir = fir
self.nfir_mid = (len(fir) - 1)//2
self.dec = dec
self.f0 = f0
self.w0 = f0/fs
self.phi0 = self.phi_i = phi0 + 2*np.pi*self.w0
self.t0 = t0
self.fs = fs
self.t0_dec = t0 + self.nfir_mid / self.fs
self.z = np.array([], dtype=np.complex128)
self.z_out = np.array([], dtype=np.complex128)
def filt(self, data):
n = self.fir.size
phi = (-2*np.pi*self.w0*np.arange(1, data.size+1) + self.phi_i
) % (2*np.pi)
self.phi_i = phi[-1]
z = np.r_[self.z, data * np.exp(1j*phi)]
y = signal.fftconvolve(z, 2*self.fir, mode="full")
indices = np.arange(y.size)
m = indices[n-1:-n+1]
if len(m) == 0:
self.z = z
else:
m_dec = m[::self.dec]
self.z_out = np.r_[self.z_out, y[m_dec]]
self.z = z[m_dec[-1] - (n-1) + self.dec:]
def get_t(self):
return self.t0_dec + np.arange(self.z_out.size)/self.fs * self.dec
class FIRStateLockVarF(object):
"""
Variable frequency lock-in amplifier object which uses an FIR filter,
decimates data, and processes data in batches.
Pass data in with the ``filt`` function.
Lock-in amplifier output stored in ``z_out``.
Time array corresponding to the data in ``z_out`` accessible
with the ``get_t`` function.
Parameters
----------
fir: array_like
finite-impulse-response (FIR) filter coefficients
dec: int
Decimation factor (output sampling rate = input sampling rate /dec)
f0: function
Lock-in amplifier reference frequency as a function of time
phi0: scalar
Initial lock-in amplifier phase.
t0: scalar, optional
Inital time associated with the first incoming data point.
Defaults to 0.
fs: scalar, optional
Input sampling rate. Defaults to 1.
"""
def __init__(self, fir, dec, f0, phi0, t0=0, fs=1.):
self.fir = fir
self.nfir_mid = (len(fir) -1)//2
self.dec = dec
self.f0 = f0
self.w0 = lambda t: f0(t) / fs
self.phi0 = self.phi_i = phi0 + 2*np.pi*self.w0(t0)
self.t0 = t0
self._current_t = t0 # This field updates as incoming data arrives
self.fs = fs
self.t0_dec = t0 + self.nfir_mid / self.fs
# Stores filtered, lock-in data waiting to be decimated
self.z = np.array([], dtype=np.complex128)
# Decimated output
self.z_out = np.array([], dtype=np.complex128)
def filt(self, data):
n = self.fir.size
m = data.size
t = self._current_t + np.arange(m, dtype=np.float64) / self.fs
w = self.w0(t)
phi = (-2*np.pi*np.cumsum(w) + self.phi_i) % (2*np.pi)
self.phi_i = phi[-1]
self._current_t = t[-1]
z = np.r_[self.z, data * np.exp(1j*phi)]
y = signal.fftconvolve(z, 2*self.fir, mode="full")
indices = np.arange(y.size)
m = indices[n-1:-n+1]
if len(m) == 0:
self.z = z
else:
m_dec = m[::self.dec]
self.z_out = np.r_[self.z_out, y[m_dec]]
self.z = z[m_dec[-1] - (n-1) + self.dec:]
def get_t(self):
return self.t0_dec + np.arange(self.z_out.size)/self.fs * self.dec
def phase_err(t, phase, dphi_max, x):
return abs(abs(phase - (x[0]*t + x[1])) - dphi_max) - dphi_max
def _fit_phase(t, phase, amp, phase_reversals=True):
if phase_reversals:
dphi_max = np.pi/2
else:
dphi_max = np.pi
f = lambda x: np.sum(amp**2 * abs((abs(abs(phase - (x[0]*t + x[1])) - dphi_max) - dphi_max))**2)
return f
def _fit_phase_only(t, phase, amp, phase_reversals=True):
if phase_reversals:
dphi_max = np.pi/2
else:
dphi_max = np.pi
f = lambda x: np.sum(amp**2*abs((abs(abs(phase - (x[0])) - dphi_max) - dphi_max))**2)
return f
def auto_phase(t, z, x0=np.array([0., 0.]), phase_reversals=True, adjust_f0=True):
""""""
phase = np.angle(z)
amp = abs(z) / np.std(z)
if adjust_f0:
mb = optimize.fmin_slsqp(_fit_phase(t, phase, amp, phase_reversals), x0,)
else:
mb = optimize.fmin_slsqp(_fit_phase_only(t, phase, amp, phase_reversals), x0[-1:],)
mb[-1] = mb[-1] - np.pi/2
return mb
def freq_from_fft(sig, fs):
"""Estimate frequency from peak of FFT
"""
# Compute Fourier transform of windowed signal
N = next_fast_len(sig.size)
windowed = sig * signal.blackmanharris(len(sig))
f = np.fft.rfft(windowed, N)
# Find the peak and interpolate to get a more accurate peak
i = np.argmax(abs(f)) # Just use this for less-accurate, naive version
true_i = parabolic(np.log(abs(f)), i)[0]
# Convert to equivalent frequency
return fs * true_i / N
def parabolic(f, x):
"""Quadratic interpolation for estimating the true position of an
inter-sample maximum when nearby samples are known.
f is a vector and x is an index for that vector.
Returns (vx, vy), the coordinates of the vertex of a parabola that goes
through point x and its two neighbors.
Example:
Defining a vector f with a local maximum at index 3 (= 6), find local
maximum if points 2, 3, and 4 actually defined a parabola.
In [3]: f = [2, 3, 1, 6, 4, 2, 3, 1]
In [4]: parabolic(f, argmax(f))
Out[4]: (3.2142857142857144, 6.1607142857142856)
"""
xv = 1/2. * (f[x-1] - f[x+1]) / (f[x-1] - 2 * f[x] + f[x+1]) + x
yv = f[x] - 1/4. * (f[x-1] - f[x+1]) * (xv - x)
return (xv, yv)
def _print_magnitude_data(w, rep, fs):
df = pd.DataFrame()
df['f'] = w / (2*np.pi) * fs
df['mag'] = abs(rep)
df['dB'] = 20 * np.log10(df['mag'].values)
df.sort_values(by="f", inplace=True)
print(df.to_string(index=False, float_format="{:.3f}".format))
return df
def fir_weighted_lsq(weight_func, N):
"""Return intercept, slope filter coefficients for a linear least squares
fit with weight function ``weight_func``, using ``N`` most recent points."""
i = np.arange(N)
w = weight_func(i)
s0 = np.sum(w)
s1 = np.sum(i*w)
s2 = np.sum(i**2 * w)
prefactor = 1./(s0*s2 - s1**2)
return prefactor*w*(s2 - s1*i), prefactor*w*(s0*i - s1)
# x data
# (guess f0)
# filter (b, a)
# phasing
# Don't actually need time data
def lock2(f0, fp, fc, fs, coeff_ratio=8.0, coeffs=None,
window='blackman', print_response=True):
"""Create a gentle fir filter. Pass frequencies below fp, cutoff frequencies
above fc, and gradually taper to 0 in between.
These filters have a smoother time domain response than filters created
with lock."""
# Convert to digital frequencies, normalizing f_nyq to 1,
# as requested by scipy.signal.firwin2
nyq = fs / 2
fp = fp / nyq
fc = fc / nyq
if coeffs is None:
coeffs = int(round(coeff_ratio / fc, 0))
# Force number of tukey coefficients odd
alpha = (1-fp*1.0/fc)
n = int(round(1000. / alpha) // 2)
N = n * 2 + 1
f = np.linspace(0, fc, n+1)
fm = np.zeros(n + 2)
mm = np.zeros(n + 2)
fm[:-1] = f
# Append fm = nyquist frequency by hand; needed by firwin2
fm[-1] = 1.
m = signal.tukey(N, alpha=alpha)
# Only take the falling part of the tukey window,
# not the part equal to zero
mm[:-1] = m[n:]
# Use approx. 8x more frequencies than total coefficients we need
nfreqs = 2**(int(round(np.log2(coeffs)))+3)+1
b = signal.firwin2(coeffs, fm, mm,
nfreqs=nfreqs,
window=window)
# Force filter gain to 1 at DC; corrects for small rounding errors
b = b / np.sum(b)
w, rep = signal.freqz(b, worN=np.pi*np.array([0., fp/2, fp, fc, 2*fc,
0.5*f0/nyq, f0/nyq, 1.]))
if print_response:
print("Response:")
_print_magnitude_data(w, rep, fs)
return b | mit |
jostep/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 21 | 53471 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
update_global_step = variables.get_global_step().assign_add(1)
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return (const, const,
control_flow_ops.group(train_op_1, training_op_2,
update_global_step))
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, update_global_step
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(
expected_features, expected_labels, actual_features, actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1))
def _make_input_fn(features, labels):
def _input_fn():
return {
k: constant_op.constant(v)
for k, v in six.iteritems(features)
}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn, params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return (constant_op.constant(0.), constant_op.constant(0.),
variables.get_global_step().assign_add(1))
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing train_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(os.path.join(est.model_dir, 'export'), serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(model_fn=linear_model_fn,
model_dir='test_dir',
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir,
model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
self.assertAllEqual(
['model.ckpt-1', 'model.ckpt-5'], ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1,
model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2,
model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testSummaryWritingWithSummaryProto(self):
def _streaming_mean_squared_error_histogram(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
metrics, update_ops = metric_ops.streaming_mean_squared_error(
predictions,
labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
return summary.histogram('histogram', metrics), update_ops
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={'MSE': _streaming_mean_squared_error_histogram})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('MSE' in output_values)
self.assertTrue(output_values['MSE'].HasField('histo'))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertItemsEqual(
['bogus_lookup', 'feature'],
[compat.as_str_any(x) for x in graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS)])
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_graph_transforms(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra,
graph_rewrite_specs=[
estimator.GraphRewriteSpec(['tag_1'], []),
estimator.GraphRewriteSpec(['tag_2', 'tag_3'],
['strip_unused_nodes'])])
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
# tag_1 is untransformed.
tags = ['tag_1']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# Since there were no transforms, both save ops are still present.
self.assertTrue('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# Since there were no transforms, the hash table lookup is still there.
self.assertTrue('hash_table_Lookup' in graph_ops)
# Restore, to validate that the export was well-formed.
# tag_2, tag_3 was subjected to strip_unused_nodes.
tags = ['tag_2', 'tag_3']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# The Saver used to restore the checkpoint into the export Session
# was not added to the SAVERS collection, so strip_unused_nodes removes
# it. The one explicitly created in export_savedmodel is tracked in
# the MetaGraphDef saver_def field, so that one is retained.
# TODO(soergel): Make Savers sane again. I understand this is all a bit
# nuts but for now the test demonstrates what actually happens.
self.assertFalse('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# The fake hash table lookup wasn't connected to anything; stripped.
self.assertFalse('hash_table_Lookup' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
dkoslicki/CMash | setup.py | 1 | 1867 | import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
SCRIPTS = []
SCRIPTS.extend([os.path.join("scripts", script)
for script in os.listdir(os.path.join(os.path.dirname(__file__), "scripts"))
if script.endswith(".py")])
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, 'README.md'), 'r') as fid:
LONG_DESCRIPTION = fid.read()
setup(
name="CMash",
version="0.5.1",
author="David Koslicki",
author_email="[email protected]",
description=("Fast and accurate set similarity estimation via containment min hash (for genomic datasets)."),
long_description=LONG_DESCRIPTION,
#license="BSD-3-Clause", # see classifiers
keywords="jaccard min hash containment genomics metagenomics",
url="https://github.com/dkoslicki/CMash",
packages=['CMash'],
install_requires=[
'khmer>=2.1.1',
'screed',
'h5py',
'numpy',
'blist',
'argparse',
'pandas>=0.21.1',
'setuptools>=24.2.0',
'six',
'scipy',
'matplotlib',
'marisa-trie',
'hydra',
'pycairo'
],
zip_safe=False,
package_data={'CMash': ['data/*.fna', 'tests/Organisms/*.fna.gz']},
scripts=SCRIPTS,
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Mathematics",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
],
)
| bsd-3-clause |
procoder317/scikit-learn | sklearn/preprocessing/__init__.py | 268 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
abele/bokeh | bokeh/util/serialization.py | 31 | 7419 | """ Functions for helping with serialization and deserialization of
Bokeh objects.
"""
from __future__ import absolute_import
from six import iterkeys
is_numpy = None
try:
import numpy as np
is_numpy = True
except ImportError:
is_numpy = False
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
import logging
log = logging.getLogger(__name__)
_simple_id = 1000
def make_id():
""" Return a new unique ID for a Bokeh object.
Normally this function will return UUIDs to use for identifying Bokeh
objects. This is especally important for Bokeh objects stored on a
Bokeh server. However, it is convenient to have more human-readable
IDs during development, so this behavior can be overridden by
setting the environment variable ``BOKEH_SIMPLE_IDS=yes``.
"""
global _simple_id
import uuid
from ..settings import settings
if settings.simple_ids(False):
_simple_id += 1
new_id = _simple_id
else:
new_id = uuid.uuid4()
return str(new_id)
def urljoin(*args):
""" Construct an absolute URL from several URL components.
Args:
*args (str) : URL components to join
Returns:
str : joined URL
"""
from six.moves.urllib.parse import urljoin as sys_urljoin
from functools import reduce
return reduce(sys_urljoin, args)
def get_json(response):
""" Unify retrieving JSON responses from different sources.
Works correctly for HTTP responses from requests <=1.0, >1.0, and
the Flask test client.
Args:
response (Flask or requests response) : a response to process
Returns:
JSON
"""
import json
try:
import flask
except ImportError:
flask = None
if flask and isinstance(response, flask.Response):
# flask testing
return json.loads(response.data.decode('utf-8'))
else:
# requests
if hasattr(response.json, '__call__'):
return response.json()
else:
return response.json
def dump(objs, docid, changed_only=True):
""" Serialize a sequence of Bokeh objects into JSON
Args:
objs (seq[obj]) : a sequence of Bokeh object to dump
docid (str) : an ID for a Bokeh Document to dump relative to
changed_only (bool, optional) : whether to dump only attributes
that have had their values changed at some point (default: True)
Returns:
list[json]
"""
json_objs = []
for obj in objs:
ref = obj.ref
ref["attributes"] = obj.vm_serialize(changed_only=changed_only)
ref["attributes"].update({"id": ref["id"], "doc" : docid})
json_objs.append(ref)
return json_objs
def is_ref(frag):
""" Test whether a given Bokeh object graph fragment is a reference.
A Bokeh "reference" is a ``dict`` with ``"type"`` and ``"id"`` keys.
Args:
frag (dict) : a fragment of a Bokeh object graph
Returns:
True, if the fragment is a reference, otherwise False
"""
return isinstance(frag, dict) and \
frag.get('type') and \
frag.get('id')
def json_apply(fragment, check_func, func):
""" Apply a function to JSON fragments that match the given predicate
and return the collected results.
Recursively traverses a nested collection of ``dict`` and ``list``,
applying ``check_func`` to each fragment. If True, then collect
``func(fragment)`` in the final output
Args:
fragment (JSON-like) : the fragment to apply ``func`` to recursively
check_func (callable) : the predicate to test fragments with
func (callable) : the conversion function to apply
Returns:
converted fragments
"""
if check_func(fragment):
return func(fragment)
elif isinstance(fragment, list):
output = []
for val in fragment:
output.append(json_apply(val, check_func, func))
return output
elif isinstance(fragment, dict):
output = {}
for k, val in fragment.items():
output[k] = json_apply(val, check_func, func)
return output
else:
return fragment
def transform_series(obj):
"""transforms pandas series into array of values
"""
vals = obj.values
return transform_array(vals)
def transform_array(obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
# Check for astype failures (putative Numpy < 1.7)
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
if legacy_datetime64:
if obj.dtype == np.dtype('datetime64[ns]'):
return (obj.astype('int64') / 10**6.0).tolist()
else:
return (obj.astype('datetime64[us]').astype('int64') / 1000.).tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(obj):
"""handles nans/inf conversion
"""
if isinstance(obj, np.ma.MaskedArray):
obj = obj.filled(np.nan) # Set masked values to nan
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def traverse_data(datum, is_numpy=is_numpy, use_numpy=True):
"""recursively dig until a flat list is found
if numpy is available convert the flat list to a numpy array
and send off to transform_array() to handle nan, inf, -inf
otherwise iterate through items in array converting non-json items
Args:
datum (list) : a list of values or lists
is_numpy: True if numpy is present (see imports)
use_numpy: toggle numpy as a dependency for testing purposes
"""
is_numpy = is_numpy and use_numpy
if is_numpy and not any(isinstance(el, (list, tuple)) for el in datum):
return transform_array(np.asarray(datum))
datum_copy = []
for item in datum:
if isinstance(item, (list, tuple)):
datum_copy.append(traverse_data(item))
elif isinstance(item, float):
if np.isnan(item):
item = 'NaN'
elif np.isposinf(item):
item = 'Infinity'
elif np.isneginf(item):
item = '-Infinity'
datum_copy.append(item)
else:
datum_copy.append(item)
return datum_copy
def transform_column_source_data(data):
"""iterate through the data of a ColumnSourceData object replacing
non-JSON-compliant objects with compliant ones
"""
data_copy = {}
for key in iterkeys(data):
if is_pandas and isinstance(data[key], (pd.Series, pd.Index)):
data_copy[key] = transform_series(data[key])
elif isinstance(data[key], np.ndarray):
data_copy[key] = transform_array(data[key])
else:
data_copy[key] = traverse_data(data[key])
return data_copy
| bsd-3-clause |
toobaz/pandas | pandas/tests/series/test_api.py | 2 | 26717 | from collections import OrderedDict
import pydoc
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Series,
TimedeltaIndex,
date_range,
period_range,
timedelta_range,
)
from pandas.core.arrays import PeriodArray
from pandas.core.indexes.datetimes import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal, ensure_clean
import pandas.io.formats.printing as printing
from .common import TestData
class SharedWithSparse:
"""
A collection of tests Series and SparseSeries can share.
In generic tests on this class, use ``self._assert_series_equal()``
which is implemented in sub-classes.
"""
def _assert_series_equal(self, left, right):
"""Dispatch to series class dependent assertion"""
raise NotImplementedError
def test_scalarop_preserve_name(self):
result = self.ts * 2
assert result.name == self.ts.name
def test_copy_name(self):
result = self.ts.copy()
assert result.name == self.ts.name
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
self.ts.index.name = None
assert self.ts.index.name is None
assert self.ts is self.ts
cp = self.ts.copy()
cp.index.name = "foo"
printing.pprint_thing(self.ts.index.name)
assert self.ts.index.name is None
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
assert result.name == self.ts.name
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
assert result.name == self.ts.name
result = self.ts.mul(self.ts)
assert result.name == self.ts.name
result = self.ts * self.ts[:-2]
assert result.name == self.ts.name
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = "something else"
result = self.ts + cp
assert result.name is None
result = self.ts.add(cp)
assert result.name is None
ops = ["add", "sub", "mul", "div", "truediv", "floordiv", "mod", "pow"]
ops = ops + ["r" + op for op in ops]
for op in ops:
# names match, preserve
s = self.ts.copy()
result = getattr(s, op)(s)
assert result.name == self.ts.name
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = "changed"
result = getattr(s, op)(cp)
assert result.name is None
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
assert result.name == self.ts.name
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
assert result.name == self.ts.name
result = self.ts[[0, 2, 4]]
assert result.name == self.ts.name
result = self.ts[5:10]
assert result.name == self.ts.name
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
assert result.name == self.ts.name
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
assert result.name == self.ts.name
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
assert result.name == self.ts.name
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = self.series_klass(d)
expected = self.series_klass(d, index=sorted(d.keys()))
self._assert_series_equal(result, expected)
result = self.series_klass(d, index=["b", "c", "d", "a"])
expected = self.series_klass([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
self._assert_series_equal(result, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = self.series_klass(data)
expected = self.series_klass(dict(data.items()))
self._assert_series_equal(series, expected)
def test_constructor_ordereddict(self):
# GH3283
data = OrderedDict(
("col{i}".format(i=i), np.random.random()) for i in range(12)
)
series = self.series_klass(data)
expected = self.series_klass(list(data.values()), list(data.keys()))
self._assert_series_equal(series, expected)
# Test with subclass
class A(OrderedDict):
pass
series = self.series_klass(A(data))
self._assert_series_equal(series, expected)
def test_constructor_dict_multiindex(self):
d = {("a", "a"): 0.0, ("b", "a"): 1.0, ("b", "c"): 2.0}
_d = sorted(d.items())
result = self.series_klass(d)
expected = self.series_klass(
[x[1] for x in _d], index=pd.MultiIndex.from_tuples([x[0] for x in _d])
)
self._assert_series_equal(result, expected)
d["z"] = 111.0
_d.insert(0, ("z", d["z"]))
result = self.series_klass(d)
expected = self.series_klass(
[x[1] for x in _d], index=pd.Index([x[0] for x in _d], tupleize_cols=False)
)
result = result.reindex(index=expected.index)
self._assert_series_equal(result, expected)
def test_constructor_dict_timedelta_index(self):
# GH #12169 : Resample category data with timedelta index
# construct Series from dict as data and TimedeltaIndex as index
# will result NaN in result Series data
expected = self.series_klass(
data=["A", "B", "C"], index=pd.to_timedelta([0, 10, 20], unit="s")
)
result = self.series_klass(
data={
pd.to_timedelta(0, unit="s"): "A",
pd.to_timedelta(10, unit="s"): "B",
pd.to_timedelta(20, unit="s"): "C",
},
index=pd.to_timedelta([0, 10, 20], unit="s"),
)
self._assert_series_equal(result, expected)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_from_array_deprecated(self):
# multiple FutureWarnings, so can't assert stacklevel
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.series_klass.from_array([1, 2, 3])
def test_sparse_accessor_updates_on_inplace(self):
s = pd.Series([1, 1, 2, 3], dtype="Sparse[int]")
s.drop([0, 1], inplace=True)
assert s.sparse.density == 1.0
class TestSeriesMisc(TestData, SharedWithSparse):
series_klass = Series
# SharedWithSparse tests use generic, series_klass-agnostic assertion
_assert_series_equal = staticmethod(tm.assert_series_equal)
def test_tab_completion(self):
# GH 9910
s = Series(list("abcd"))
# Series of str values should have .str but not .dt/.cat in __dir__
assert "str" in dir(s)
assert "dt" not in dir(s)
assert "cat" not in dir(s)
# similarly for .dt
s = Series(date_range("1/1/2015", periods=5))
assert "dt" in dir(s)
assert "str" not in dir(s)
assert "cat" not in dir(s)
# Similarly for .cat, but with the twist that str and dt should be
# there if the categories are of that type first cat and str.
s = Series(list("abbcd"), dtype="category")
assert "cat" in dir(s)
assert "str" in dir(s) # as it is a string categorical
assert "dt" not in dir(s)
# similar to cat and str
s = Series(date_range("1/1/2015", periods=5)).astype("category")
assert "cat" in dir(s)
assert "str" not in dir(s)
assert "dt" in dir(s) # as it is a datetime categorical
def test_tab_completion_with_categorical(self):
# test the tab completion display
ok_for_cat = [
"name",
"index",
"categorical",
"categories",
"codes",
"ordered",
"set_categories",
"add_categories",
"remove_categories",
"rename_categories",
"reorder_categories",
"remove_unused_categories",
"as_ordered",
"as_unordered",
]
def get_dir(s):
results = [r for r in s.cat.__dir__() if not r.startswith("_")]
return list(sorted(set(results)))
s = Series(list("aabbcde")).astype("category")
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_cat))))
@pytest.mark.parametrize(
"index",
[
tm.makeUnicodeIndex(10),
tm.makeStringIndex(10),
tm.makeCategoricalIndex(10),
Index(["foo", "bar", "baz"] * 2),
tm.makeDateIndex(10),
tm.makePeriodIndex(10),
tm.makeTimedeltaIndex(10),
tm.makeIntIndex(10),
tm.makeUIntIndex(10),
tm.makeIntIndex(10),
tm.makeFloatIndex(10),
Index([True, False]),
Index(["a{}".format(i) for i in range(101)]),
pd.MultiIndex.from_tuples(zip("ABCD", "EFGH")),
pd.MultiIndex.from_tuples(zip([0, 1, 2, 3], "EFGH")),
],
)
def test_index_tab_completion(self, index):
# dir contains string-like values of the Index.
s = pd.Series(index=index)
dir_s = dir(s)
for i, x in enumerate(s.index.unique(level=0)):
if i < 100:
assert not isinstance(x, str) or not x.isidentifier() or x in dir_s
else:
assert x not in dir_s
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
msg = "'Series' objects are mutable, thus they cannot be hashed"
with pytest.raises(TypeError, match=msg):
hash(s_empty)
with pytest.raises(TypeError, match=msg):
hash(s)
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_iter(self):
for i, val in enumerate(self.series):
assert val == self.series[i]
for i, val in enumerate(self.ts):
assert val == self.ts[i]
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
assert getkeys() is self.ts.index
def test_values(self):
tm.assert_almost_equal(self.ts.values, self.ts, check_dtype=False)
def test_iteritems(self):
for idx, val in self.series.iteritems():
assert val == self.series[idx]
for idx, val in self.ts.iteritems():
assert val == self.ts[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(self.series.iteritems(), "reverse")
def test_items(self):
for idx, val in self.series.items():
assert val == self.series[idx]
for idx, val in self.ts.items():
assert val == self.ts[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(self.series.items(), "reverse")
def test_raise_on_info(self):
s = Series(np.random.randn(10))
msg = "'Series' object has no attribute 'info'"
with pytest.raises(AttributeError, match=msg):
s.info()
def test_copy(self):
for deep in [None, False, True]:
s = Series(np.arange(10), dtype="float64")
# default deep is True
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[::2] = np.NaN
if deep is None or deep is True:
# Did not modify original Series
assert np.isnan(s2[0])
assert not np.isnan(s[0])
else:
# we DID modify the original Series
assert np.isnan(s2[0])
assert np.isnan(s[0])
def test_copy_tzaware(self):
# GH#11794
# copy of tz-aware
expected = Series([Timestamp("2012/01/01", tz="UTC")])
expected2 = Series([Timestamp("1999/01/01", tz="UTC")])
for deep in [None, False, True]:
s = Series([Timestamp("2012/01/01", tz="UTC")])
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[0] = pd.Timestamp("1999/01/01", tz="UTC")
# default deep is True
if deep is None or deep is True:
# Did not modify original Series
assert_series_equal(s2, expected2)
assert_series_equal(s, expected)
else:
# we DID modify the original Series
assert_series_equal(s2, expected2)
assert_series_equal(s, expected2)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
assert_series_equal(s.dropna(axis="rows"), s.dropna(axis="index"))
assert s.dropna().sum("rows") == 3
assert s._get_axis_number("rows") == 0
assert s._get_axis_name("rows") == "index"
def test_class_axis(self):
# https://github.com/pandas-dev/pandas/issues/18147
# no exception and no empty docstring
assert pydoc.getdoc(Series.index)
def test_numpy_unique(self):
# it works!
np.unique(self.ts)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(
np.random.randn(1000, 3),
columns=["A", "B", "C"],
index=date_range("1/1/2000", periods=1000),
)
def f(x):
return x[x.idxmax()]
result = tsdf.apply(f)
expected = tsdf.max()
tm.assert_series_equal(result, expected)
# .item()
with tm.assert_produces_warning(FutureWarning):
s = Series([1])
result = s.item()
assert result == 1
assert s.item() == s.iloc[0]
# using an ndarray like function
s = Series(np.random.randn(10))
result = Series(np.ones_like(s))
expected = Series(1, index=range(10), dtype="float64")
tm.assert_series_equal(result, expected)
# ravel
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order="F"), s.values.ravel(order="F"))
# compress
# GH 6658
s = Series([0, 1.0, -1], index=list("abc"))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s > 0, s)
tm.assert_series_equal(result, Series([1.0], index=["b"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s < -1, s)
# result empty Index(dtype=object) as the same as original
exp = Series([], dtype="float64", index=Index([], dtype="object"))
tm.assert_series_equal(result, exp)
s = Series([0, 1.0, -1], index=[0.1, 0.2, 0.3])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s > 0, s)
tm.assert_series_equal(result, Series([1.0], index=[0.2]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = np.compress(s < -1, s)
# result empty Float64Index as the same as original
exp = Series([], dtype="float64", index=Index([], dtype="float64"))
tm.assert_series_equal(result, exp)
def test_str_accessor_updates_on_inplace(self):
s = pd.Series(list("abc"))
s.drop([0], inplace=True)
assert len(s.str.lower()) == 2
def test_str_attribute(self):
# GH9068
methods = ["strip", "rstrip", "lstrip"]
s = Series([" jack", "jill ", " jesse ", "frank"])
for method in methods:
expected = Series([getattr(str, method)(x) for x in s.values])
assert_series_equal(getattr(Series.str, method)(s.str), expected)
# str accessor only valid with string values
s = Series(range(5))
with pytest.raises(AttributeError, match="only use .str accessor"):
s.str.repeat(2)
def test_empty_method(self):
s_empty = pd.Series()
assert s_empty.empty
for full_series in [pd.Series([1]), pd.Series(index=[1])]:
assert not full_series.empty
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; s = pd.Series()"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("s.", 1))
def test_integer_series_size(self):
# GH 25580
s = Series(range(9))
assert s.size == 9
s = Series(range(9), dtype="Int64")
assert s.size == 9
def test_get_values_deprecation(self):
s = Series(range(9))
with tm.assert_produces_warning(FutureWarning):
res = s.get_values()
tm.assert_numpy_array_equal(res, s.values)
class TestCategoricalSeries:
@pytest.mark.parametrize(
"method",
[
lambda x: x.cat.set_categories([1, 2, 3]),
lambda x: x.cat.reorder_categories([2, 3, 1], ordered=True),
lambda x: x.cat.rename_categories([1, 2, 3]),
lambda x: x.cat.remove_unused_categories(),
lambda x: x.cat.remove_categories([2]),
lambda x: x.cat.add_categories([4]),
lambda x: x.cat.as_ordered(),
lambda x: x.cat.as_unordered(),
],
)
def test_getname_categorical_accessor(self, method):
# GH 17509
s = Series([1, 2, 3], name="A").astype("category")
expected = "A"
result = method(s).name
assert result == expected
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
assert not s.cat.ordered, False
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
tm.assert_categorical_equal(s.values, exp)
res = s.cat.set_categories(["b", "a"])
tm.assert_categorical_equal(res.values, exp)
s[:] = "a"
s = s.cat.remove_unused_categories()
tm.assert_index_equal(s.cat.categories, Index(["a"]))
def test_cat_accessor_api(self):
# GH 9322
from pandas.core.arrays.categorical import CategoricalAccessor
assert Series.cat is CategoricalAccessor
s = Series(list("aabbcde")).astype("category")
assert isinstance(s.cat, CategoricalAccessor)
invalid = Series([1])
with pytest.raises(AttributeError, match="only use .cat accessor"):
invalid.cat
assert not hasattr(invalid, "cat")
def test_cat_accessor_no_new_attributes(self):
# https://github.com/pandas-dev/pandas/issues/10673
c = Series(list("aabbcde")).astype("category")
with pytest.raises(AttributeError, match="You cannot add any new attribute"):
c.cat.xlabel = "a"
def test_cat_accessor_updates_on_inplace(self):
s = Series(list("abc")).astype("category")
s.drop(0, inplace=True)
s.cat.remove_unused_categories(inplace=True)
assert len(s.cat.categories) == 2
def test_categorical_delegations(self):
# invalid accessor
msg = r"Can only use \.cat accessor with a 'category' dtype"
with pytest.raises(AttributeError, match=msg):
Series([1, 2, 3]).cat
with pytest.raises(AttributeError, match=msg):
Series([1, 2, 3]).cat()
with pytest.raises(AttributeError, match=msg):
Series(["a", "b", "c"]).cat
with pytest.raises(AttributeError, match=msg):
Series(np.arange(5.0)).cat
with pytest.raises(AttributeError, match=msg):
Series([Timestamp("20130101")]).cat
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["a", "b", "c"])
tm.assert_index_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = Index([1, 2, 3])
tm.assert_index_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype="int8")
tm.assert_series_equal(s.cat.codes, exp_codes)
assert s.cat.ordered
s = s.cat.as_unordered()
assert not s.cat.ordered
s.cat.as_ordered(inplace=True)
assert s.cat.ordered
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
s = s.cat.set_categories(["c", "b", "a"])
tm.assert_index_equal(s.cat.categories, exp_categories)
tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
tm.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"]))
exp_categories = Index(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"], dtype=np.object_)
s = s.cat.remove_unused_categories()
tm.assert_index_equal(s.cat.categories, exp_categories)
tm.assert_numpy_array_equal(s.values.__array__(), exp_values)
tm.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
msg = "'Series' object has no attribute 'set_categories'"
with pytest.raises(AttributeError, match=msg):
s.set_categories([4, 3, 2, 1])
# right: s.cat.set_categories([4,3,2,1])
# GH18862 (let Series.cat.rename_categories take callables)
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
result = s.cat.rename_categories(lambda x: x.upper())
expected = Series(
Categorical(["A", "B", "C", "A"], categories=["A", "B", "C"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_dt_accessor_api_for_categorical(self):
# https://github.com/pandas-dev/pandas/issues/10661
from pandas.core.indexes.accessors import Properties
s_dr = Series(date_range("1/1/2015", periods=5, tz="MET"))
c_dr = s_dr.astype("category")
s_pr = Series(period_range("1/1/2015", freq="D", periods=5))
c_pr = s_pr.astype("category")
s_tdr = Series(timedelta_range("1 days", "10 days"))
c_tdr = s_tdr.astype("category")
# only testing field (like .day)
# and bool (is_month_start)
get_ops = lambda x: x._datetimelike_ops
test_data = [
("Datetime", get_ops(DatetimeIndex), s_dr, c_dr),
("Period", get_ops(PeriodArray), s_pr, c_pr),
("Timedelta", get_ops(TimedeltaIndex), s_tdr, c_tdr),
]
assert isinstance(c_dr.dt, Properties)
special_func_defs = [
("strftime", ("%Y-%m-%d",), {}),
("tz_convert", ("EST",), {}),
("round", ("D",), {}),
("floor", ("D",), {}),
("ceil", ("D",), {}),
("asfreq", ("D",), {}),
# ('tz_localize', ("UTC",), {}),
]
_special_func_names = [f[0] for f in special_func_defs]
# the series is already localized
_ignore_names = ["tz_localize", "components"]
for name, attr_names, s, c in test_data:
func_names = [
f
for f in dir(s.dt)
if not (
f.startswith("_")
or f in attr_names
or f in _special_func_names
or f in _ignore_names
)
]
func_defs = [(f, (), {}) for f in func_names]
for f_def in special_func_defs:
if f_def[0] in dir(s.dt):
func_defs.append(f_def)
for func, args, kwargs in func_defs:
with warnings.catch_warnings():
if func == "to_period":
# dropping TZ
warnings.simplefilter("ignore", UserWarning)
res = getattr(c.dt, func)(*args, **kwargs)
exp = getattr(s.dt, func)(*args, **kwargs)
if isinstance(res, DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_almost_equal(res, exp)
for attr in attr_names:
try:
res = getattr(c.dt, attr)
exp = getattr(s.dt, attr)
except Exception as e:
print(name, attr)
raise e
if isinstance(res, DataFrame):
tm.assert_frame_equal(res, exp)
elif isinstance(res, Series):
tm.assert_series_equal(res, exp)
else:
tm.assert_almost_equal(res, exp)
invalid = Series([1, 2, 3]).astype("category")
msg = "Can only use .dt accessor with datetimelike"
with pytest.raises(AttributeError, match=msg):
invalid.dt
assert not hasattr(invalid, "str")
| bsd-3-clause |
juztas/fdtcp | hdfs/intervals_distribution.py | 2 | 4094 | #!/usr/bin/env python
"""
Script for plot on occurrence study of
"AlreadyBeingCreatedException" in Hadoop DFS.
#40 https://trac.hep.caltech.edu/trac/fdtcp/ticket/40
#39 https://trac.hep.caltech.edu/trac/fdtcp/ticket/39 (parent ticket)
AlreadyBeingCreated-log_file_names - list of transfer separate log file
names when this exception occurred (61 cases during
2011-04-12--06h:43m to 2011-04-14--10h:46m (~52h))
details on #5:comment:20
AlreadyBeingCreated-timestamps - just timestamps extracted
the timestamps are times of when the transfer was initiated
on the fdtcp side, not exactly time when the exception occurred but
for occurrence dependency study it should be approximately enough.
"""
from __future__ import print_function
from __future__ import division
from past.utils import old_div
import time
import sys
import datetime
import numpy
import pylab
from matplotlib.dates import date2num
import matplotlib as mpl
"""
help
a = [10, 20, 22, 24, 25]
b = [1.2, 1, 0.9, 1.3, 1.9]
pylab.plot(a) # generates default x data
pylab.plot(b)
pylab.plot(a, b, 'rs', a, b, 'k')
"""
BIN_SIZE = 4
# max. pause during which the exception didn't occur was over 5h, so
# make the entire period 6h (360mins)
ENTIRE_PERIOD = 360
class PlotData(object):
"""
PlotData - time bins are BIN_SIZE minutes bins into which fall
exception events offsets from the previous occurrence.
"""
def __init__(self):
self.timeBins = []
self.x = [] # what will be plotted - number of minutes bins on X axis
self.y = [] # what will be plotted - number of occurrences in the time bin
# make bins of BIN_SIZE up ENTIRE_PERIOD
pd = PlotData()
for i in range(BIN_SIZE, ENTIRE_PERIOD, BIN_SIZE):
hour = old_div(i, 60)
min = i - (hour * 60)
t = datetime.time(hour, min)
pd.timeBins.append(t)
pd.x.append(i)
pd.y.append(0)
# reference time for calculating time delta, time difference
refDelta = datetime.time(0, BIN_SIZE)
datetimes = [] # on x axis
for dt in open("AlreadyBeingCreated-timestamps", 'r'):
dt = dt.strip()
dt = dt.split('-')
dt = [int(c) for c in dt]
dObj = datetime.datetime(*dt)
delta = None
# can only calculate delta in the second iteration
if len(datetimes) != 0:
delta = dObj - previous
previous = dObj
datetimes.append(date2num(dObj))
# can't do anything on the first iteration
if not delta:
continue
# delta is in form 0:18:51.515249
sDelta = str(delta).split(':')
iDelta = [int(c) for c in (sDelta[0], sDelta[1])]
deltaMin = (60 * iDelta[0]) + iDelta[1]
for i in range(len(pd.timeBins)):
calc = abs(deltaMin - pd.x[i])
# "deltaMin in range(4/2)" makes the first bin since the subtraction
# will still be larger than the half size of the bin ...
if calc <= old_div(BIN_SIZE, 2) or deltaMin in range(old_div(BIN_SIZE, 2)):
pd.y[i] += 1
#print ("%s falls into %s (occup:%s)" % (delta, pd.x[i], pd.y[i]))
break
else:
print("not binned: %s %s" % (delta, deltaMin))
print(pd.y)
t = 0
for c in pd.y:
t += c
print ("number of total occurrences: %s (must be the same as number of "
"lines in the input file - 1)" % t)
# process result lists - consider only those which has occurrence > 0
toPlotX = []
toPlotY = []
for i in range(len(pd.y)):
if pd.y[i] > 0:
toPlotX.append(pd.x[i])
toPlotY.append(pd.y[i])
print("###### to plot:")
print(toPlotX)
print(toPlotY)
pylab.setp(
pylab.gca().get_xticklabels(),
rotation=45,
horizontalalignment='right')
pylab.plot(toPlotX, toPlotY, 'rs')
pylab.xlabel(
"%s [min] time offset bins (time from previous occurrence)" %
BIN_SIZE)
pylab.ylabel("number of occurrences with corresponding time offset")
pylab.title("AlreadyBeingCreated HDFS exceptions time offset occurrences")
pylab.grid(True)
# saves plot into a png file
# pylab.savefig('simple_plot')
#pylab.subplots_adjust(left=0.3, bottom=0.3)
# ggpylab.subplots_adjust(bottom=0.18)
pylab.show()
| apache-2.0 |
bookus/VideoClassification | VideoClassification/test/testDataSetLoader.py | 1 | 2998 | import random
from VideoClassification.utils.DataSetLoader.UCF101Loader import ChooseRandomFromSameVideo,UCF101_TwoStream,UCF101_C3D,ChooseOrderFromSameVideo
import VideoClassification.Config.Config as Config
from importlib import reload
cfsv = ChooseOrderFromSameVideo(file=Config.Code_root+'/data/testlist01.txt', dsl=UCF101_TwoStream)
cfsv2 = ChooseRandomFromSameVideo(file=Config.Code_root+'/data/testlist01.txt', dsl=UCF101_TwoStream)
cfsv3 = ChooseRandomFromSameVideo(file=Config.Code_root+'/data/testlist01.txt', dsl=UCF101_C3D)
a,b = cfsv2[0]
filelists,lbs = cfsv2[1000]
lsts = list(zip(filelists,lbs))
if True:
first_img = filelists[0]
lsts = lsts[1:]
filelists = []
tims = [ int(files[0][0][-8:-4]) for files in lsts ]
ll = sorted(list(zip(tims,filelists)))
[l[1] for l in ll]
lbs
##########################################################################################
import random
import threading
from queue import Queue
import numpy as np
import torch
from torch.autograd import Variable
from VideoClassification.utils.data_pretreatment.PipeLine import GenTensors,ImgAugPipes
from VideoClassification.utils.DataSetLoader.UCF101Loader import test_UCF0101_Spatial,test_UCF0101_Temporal,test_UCF101_C3D
import matplotlib.pyplot as plt
try:
from cv2 import cv2
except:
import cv2
batchsize=3
dsl = cfsv
itemss = random.choices(dsl,k=batchsize)
ret_imgs= []
ret_labels = []
for b in range(batchsize):
# 8x21 picture paths and 8 same labels
imgpathss,labels = itemss[b]
n = len(labels)
m = len(imgpathss[0])
tmp_ret_imgs= []
tmp_ret_labels = []
for i in range(n):
# every batch has 8~10 pictures
imgpaths = imgpathss[i]
# first img is origin image
origin_img = cv2.imread(imgpaths[0])
# the other is temporal image
temporal_imgs = []
for j in range(1,m):
temporal_imgs.append(cv2.imread(imgpaths[j]))
# OK now concate them
imgs = [origin_img] + temporal_imgs
imgs = np.array(imgs)
# use img Aug on them
# imgs.shape is (21,3,224,224)
imgs = ImgAugPipes(imgs)
# now just change it to the tensor and add to ret_imgs
temp_array = imgs[0,:,:,:]
print(temp_array.shape)
# vs = np.vstack((temp_array,)+( imgs[j,0,:,:] for j in range(1,m)))
for j in range(1,m):
t = imgs[j,0,:,:]
t = np.reshape(t,(1,224,224))
temp_array = np.vstack((temp_array,t))
tmp_ret_imgs.append(temp_array)
tmp_ret_labels.append(labels[0])
ret_imgs.append(tmp_ret_imgs)
ret_labels.append(tmp_ret_labels)
ret_imgs = np.array(ret_imgs)
ret_labels = np.array(ret_labels)
img = cv2.imread('/home/itrc/Desktop/Development/dense_flow_fbf/testfile-fbf/UCF101_images/ApplyLipstick/v_ApplyLipstick_g01_c02/image/image_0002.jpg')
img = [img]
imgs.shape
while True:
imgs = ImgAugPipes(img)
plt.imshow(imgs[0,0])
| gpl-3.0 |
cjohnson318/geostatsmodels | geostatsmodels/geoplot.py | 1 | 6299 | #!/usr/bin/env python
from pylab import *
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.patches as mpatches
from geostatsmodels import variograms, utilities
def hscattergram(data, pwdist, lag, tol):
'''
Input: (data) NumPy array with three columns, the first two
columns should be the x and y coordinates, and
third should be the measurements of the variable
of interest
(lag) the lagged distance of interest
(tol) the allowable tolerance about (lag)
(pwdist) a square pairwise distance matrix
Output: h-scattergram figure showing the distribution of
measurements taken at a certain lag and tolerance
'''
# calculate the pairwise distances
indices = variograms.lagindices(pwdist, lag, tol)
# collect the head and tail measurements
head = data[indices[:, 0], 2]
tail = data[indices[:, 1], 2]
# create a scatterplot with equal axes
fig, ax = subplots()
ax.scatter(head, tail, marker="o", facecolor="none", edgecolor="k", alpha=0.5)
ax.set_aspect("equal")
# set the labels and the title
ax.set_ylabel("$z(u+h)$")
ax.set_xlabel("$z(u)$")
ax.set_title("Lags Between " + str(lag - tol) + " and " + str(lag + tol))
# grab the limits of the axes
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
# calculate the covariance and annotate
cv = variograms.covariance(data, indices)
ax.text(xmin * 1.25, ymin * 1.050, 'Covariance = {:3.2f}'.format(cv))
# calculate the semivariance and annotate
sv = variograms.semivariance(data, indices)
ax.text(xmin * 1.25, ymin * 1.025, 'Semivariance = {:3.2f}'.format(sv))
show()
def laghistogram(data, pwdist, lags, tol):
'''
Input: (data) NumPy array with three columns, the first two
columns should be the x and y coordinates, and
third should be the measurements of the variable
of interest
(pwdist) the pairwise distances
(lags) the lagged distance of interest
(tol) the allowable tolerance about (lag)
Output: lag histogram figure showing the number of
distances at each lag
'''
# collect the distances at each lag
indices = [variograms.lagindices(pwdist, lag, tol) for lag in lags]
# record the number of indices at each lag
indices = [len(i) for i in indices]
# create a bar plot
fig, ax = subplots()
ax.bar(lags + tol, indices)
ax.set_ylabel('Number of Lags')
ax.set_xlabel('Lag Distance')
ax.set_title('Lag Histogram')
show()
def semivariogram(data, lags, tol, model=None):
'''
Input: (data) NumPy array with three columns, the first two
columns should be the x and y coordinates, and
third should be the measurements of the variable
of interest
(lags) the lagged distance of interest
(tol) the allowable tolerance about (lag)
(model) model function taking a distance and returning
an approximation of the semivariance
Output: empirical semivariogram
'''
# h, sv = variograms.semivariogram(data, lags, tol)
vdata = variograms.semivariogram(data, lags, tol)
h, sv = vdata[0], vdata[1]
sill = np.var(data[:, 2])
fig, ax = subplots()
if model:
ax.plot(h, model(h), 'r')
ax.plot(h, sv, 'ko-')
ax.set_ylabel('Semivariance')
ax.set_xlabel('Lag Distance')
ax.set_title('Semivariogram')
ax.text(tol * 3, sill * 1.025, str(np.round(sill, decimals=3)))
ax.axhline(sill, ls='--', color='k')
show()
def anisotropiclags(data, pwdist, lag, tol, angle, atol):
'''
SPatial ANIsotropy PLOT
'''
index = variograms.lagindices(pwdist, lag, tol)
anindex = variograms.anilagindices(data, pwdist, lag, tol, angle, atol)
fig, ax = subplots()
# plot the lagged distances
for pair in index:
head, tail = data[pair]
hx, hy, hz = head
tx, ty, tz = tail
x = [hx, tx]
y = [hy, ty]
ax.plot(x, y, 'k-', lw=2, alpha=0.25)
# plot the lagged distances within
# the anisotropy angle and tolerance
for pair in anindex:
head, tail = data[pair]
hx, hy, hz = head
tx, ty, tz = tail
x = [hx, tx]
y = [hy, ty]
ax.plot(x, y, 'r-', lw=1)
ax.set_xlabel('X')
ax.set_ylabel('Y')
def polaranisotropy(data, pwdist, lags, tol, nsectors):
angle = 360.0 / nsectors
atol = angle / 2.0
sectors = [atol + i * angle for i in range(nsectors)]
fig, ax = subplots()
cnorm = colors.Normalize(vmin=0, vmax=1)
scalarmap = cm.ScalarMappable(norm=cnorm, cmap=cm.jet)
for sector in sectors:
for lag in lags:
anisodata = (data, pwdist, lag, tol, sector, atol)
indices = variograms.anilagindices(*anisodata)
sv = variograms.semivariance(data, indices)
fc = scalarmap.to_rgba(sv)
center, r, width = (0, 0), lag, lags[0] * 2
theta1 = utilities.degree_to_bearing(sector + atol)
theta2 = utilities.degree_to_bearing(sector - atol)
wedge = mpatches.Wedge(center, r, theta1, theta2, width, color=fc)
ax.add_patch(wedge)
ax.set_xlim(-lags[-1], lags[-1])
ax.set_ylim(-lags[-1], lags[-1])
ax.set_aspect('equal')
# this is a colormap that ranges from yellow to purple to black
cdict = {'red': ((0.0, 1.0, 1.0),
(0.5, 225/255., 225/255. ),
(0.75, 0.141, 0.141 ),
(1.0, 0.0, 0.0)),
'green': ((0.0, 1.0, 1.0),
(0.5, 57/255., 57/255. ),
(0.75, 0.0, 0.0 ),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.376, 0.376),
(0.5, 198/255., 198/255. ),
(0.75, 1.0, 1.0 ),
(1.0, 0.0, 0.0)) }
YPcmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 256)
| mit |
merfishtools/merfishtools-evaluation | scripts/codebook-neighbors.py | 1 | 1316 | import pandas as pd
import numpy as np
def hamming_dist(a, b):
return sum(x != y for x, y in zip(a, b))
codebook = pd.read_table(snakemake.input[0], dtype=np.dtype(str))
full_codebook = codebook
print([(a["Codeword"], hamming_dist(a["Codeword"], "00000000010111")) for _, a in codebook.iterrows() if hamming_dist(a["Codeword"], "00000000010111") < 4])
codebook = codebook[~codebook["Gene"].str.startswith("blank") & ~codebook["Gene"].str.startswith("notarget")]
def count_neighbors(codebook):
return pd.DataFrame({
"gene": codebook["Gene"],
"neighbors": [sum(hamming_dist(a, b) <= snakemake.params.neighbor_dist for b in codebook["Codeword"]) for a in codebook["Codeword"]]
})
neighbors = count_neighbors(codebook)
# print(count_neighbors(full_codebook))
# print(neighbors, len(neighbors), np.bincount(neighbors["neighbors"]))
neighbors.to_csv(snakemake.output[0], sep="\t", index=False)
# from bitarray import bitarray
# def environment(word, d=4):
# for i in range(len(word)):
# w = word.copy()
# w[i] ^= 1
# if d > 1:
# yield from environment(w, d=d-1)
# else:
# if w.count(True) == 4:
# yield w
#
# s = set(str(w) for w in environment(bitarray("0101100000000010"), d=4))
# print(s)
# print(len(s))
| mit |
oopsliu/Data-Cleaning-Tool | DataCleaning.py | 1 | 2687 | ## -*- coding: utf-8 -*-
##
##Data cleaning for big data tools(GeoAnalytics tools)
##Author:LIU Zheng
##email: [email protected]
##Date:2016-Nov-16
import pandas
import uuid
import datetime
import os
import glob
import gzip
import sys
# gz Path
# gzipPath = "E:\\Huawei\\"
gzipPath = sys.argv[1]
# Export result path
# resultPath = "E:\\result\\"
resultPath = sys.argv[2]
if not os.path.exists(resultPath):
os.makedirs(resultPath)
# If data has header row
# hasHeader = "T"
hasHeader = sys.argv[3]
# Print messages
def msg(message):
print (datetime.datetime.now(), ": ",message)
# Unzip temp path
tempUnzip = os.path.join(resultPath,"temp")
if not os.path.exists(tempUnzip):
os.makedirs(tempUnzip)
tempUnzipFile = os.path.join(tempUnzip,"temp.csv")
# Unzip *.gz
def inputDirectory( path ):
for fn in glob.glob( path + os.sep + '*.gz' ):
if os.path.isdir( fn ):
inputDirectory( fn )
else:
Unzip(fn)
def Unzip(path):
msg("Unzipping " + path)
global startTime
startTime = datetime.datetime.now()
f = gzip.open(path,"rb")
file_content = f.read()
ftemp = open(tempUnzipFile,"wb")
ftemp.write(file_content)
tempName = path
head, tail = os.path.split(tempName)
fileName = (tail.split(".gz")[0])
global outPath
outPath = os.path.join(resultPath,fileName)
ReadCSV(tempUnzipFile)
# Read-in csv, add header row if not exists
def ReadCSV(inputCSV):
df = pandas.read_csv(inputCSV)
if hasHeader == "T":
ProcessData(df)
elif hasHeader == "F":
msg("Adding header row")
df = pandas.read_csv(inputCSV, header=None)
df.columns = ['MmeUeS1apId', 'Latitude','Longitude','TimeStamp','LteScRSRP','LteScRSRQ','LteScTadv']
ProcessData(df)
else:
msg ("invalid parameter 'hasHeaderRow'(T/F)")
# Process data
def ProcessData(dataFrame):
df = dataFrame
# Select columns
msg("Selecting columns")
selectCols = df[['MmeUeS1apId', 'Latitude','Longitude','TimeStamp']]
# Delete rows by query condition
msg("Deleting rows with query")
removeRows = selectCols[(selectCols.Latitude != 0) & (selectCols.Longitude != 0)]
removeRows.is_copy = False
# Timestamp to datetime
msg("Converting timestamp")
removeRows['TimeStamp'] = pandas.to_datetime(removeRows['TimeStamp'], unit = 'ms')
# Add UUID
msg("Adding UUID")
for i, row in removeRows.iterrows():
removeRows.set_value(i, 'UUID',uuid.uuid4())
msg("Wrting to " + outPath)
removeRows.to_csv(outPath, sep=',', index=False)
global endTime
endTime = datetime.datetime.now()
global timeSpan
timeSpan = endTime - startTime
msg("---- Finish: " + outPath + " | This took:" + str(timeSpan.total_seconds()) + "s ----")
if __name__ == '__main__':
inputDirectory(gzipPath)
| mit |
lewislone/mStocks | gadget/sdhub/tushare/datayes/options.py | 17 | 1613 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Options():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def Opt(self, contractStatus='', optID='', secID='', ticker='', varSecID='', varticker='', field=''):
"""
获取期权合约编码,交易代码,交易市场,标的等相关信息
"""
code, result = self.client.getData(vs.OPT%(contractStatus, optID, secID, ticker,
varSecID, varticker, field))
return _ret_data(code, result)
def OptVar(self, exchangeCD='', secID='', ticker='', contractType='', exerType='', field=''):
"""
获取期权品种名称、生效日期、履约方式、交割方式、申报单位等相关信息。
"""
code, result = self.client.getData(vs.OPTVAR%(exchangeCD, secID, ticker,
contractType, exerType, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| mit |
gertingold/scipy | scipy/integrate/_bvp.py | 4 | 41187 | """Boundary value problem solver."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm, pinv
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse.linalg import splu
from scipy.optimize import OptimizeResult
EPS = np.finfo(float).eps
def estimate_fun_jac(fun, x, y, p, f0=None):
"""Estimate derivatives of an ODE system rhs with forward differences.
Returns
-------
df_dy : ndarray, shape (n, n, m)
Derivatives with respect to y. An element (i, j, q) corresponds to
d f_i(x_q, y_q) / d (y_q)_j.
df_dp : ndarray with shape (n, k, m) or None
Derivatives with respect to p. An element (i, j, q) corresponds to
d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
"""
n, m = y.shape
if f0 is None:
f0 = fun(x, y, p)
dtype = y.dtype
df_dy = np.empty((n, n, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(y))
for i in range(n):
y_new = y.copy()
y_new[i] += h[i]
hi = y_new[i] - y[i]
f_new = fun(x, y_new, p)
df_dy[:, i, :] = (f_new - f0) / hi
k = p.shape[0]
if k == 0:
df_dp = None
else:
df_dp = np.empty((n, k, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(p))
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
f_new = fun(x, y, p_new)
df_dp[:, i, :] = (f_new - f0) / hi
return df_dy, df_dp
def estimate_bc_jac(bc, ya, yb, p, bc0=None):
"""Estimate derivatives of boundary conditions with forward differences.
Returns
-------
dbc_dya : ndarray, shape (n + k, n)
Derivatives with respect to ya. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dyb : ndarray, shape (n + k, n)
Derivatives with respect to yb. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dp : ndarray with shape (n + k, k) or None
Derivatives with respect to p. An element (i, j) corresponds to
d bc_i / d p_j. If `p` is empty, None is returned.
"""
n = ya.shape[0]
k = p.shape[0]
if bc0 is None:
bc0 = bc(ya, yb, p)
dtype = ya.dtype
dbc_dya = np.empty((n, n + k), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(ya))
for i in range(n):
ya_new = ya.copy()
ya_new[i] += h[i]
hi = ya_new[i] - ya[i]
bc_new = bc(ya_new, yb, p)
dbc_dya[i] = (bc_new - bc0) / hi
dbc_dya = dbc_dya.T
h = EPS**0.5 * (1 + np.abs(yb))
dbc_dyb = np.empty((n, n + k), dtype=dtype)
for i in range(n):
yb_new = yb.copy()
yb_new[i] += h[i]
hi = yb_new[i] - yb[i]
bc_new = bc(ya, yb_new, p)
dbc_dyb[i] = (bc_new - bc0) / hi
dbc_dyb = dbc_dyb.T
if k == 0:
dbc_dp = None
else:
h = EPS**0.5 * (1 + np.abs(p))
dbc_dp = np.empty((k, n + k), dtype=dtype)
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
bc_new = bc(ya, yb, p_new)
dbc_dp[i] = (bc_new - bc0) / hi
dbc_dp = dbc_dp.T
return dbc_dya, dbc_dyb, dbc_dp
def compute_jac_indices(n, m, k):
"""Compute indices for the collocation system Jacobian construction.
See `construct_global_jac` for the explanation.
"""
i_col = np.repeat(np.arange((m - 1) * n), n)
j_col = (np.tile(np.arange(n), n * (m - 1)) +
np.repeat(np.arange(m - 1) * n, n**2))
i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
j_bc = np.tile(np.arange(n), n + k)
i_p_col = np.repeat(np.arange((m - 1) * n), k)
j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
j = np.hstack((j_col, j_col + n,
j_bc, j_bc + (m - 1) * n,
j_p_col, j_p_bc))
return i, j
def stacked_matmul(a, b):
"""Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
In our case a[i, :, :] and b[i, :, :] are always square.
"""
# Empirical optimization. Use outer Python loop and BLAS for large
# matrices, otherwise use a single einsum call.
if a.shape[1] > 50:
out = np.empty_like(a)
for i in range(a.shape[0]):
out[i] = np.dot(a[i], b[i])
return out
else:
return np.einsum('...ij,...jk->...ik', a, b)
def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
"""Construct the Jacobian of the collocation system.
There are n * m + k functions: m - 1 collocations residuals, each
containing n components, followed by n + k boundary condition residuals.
There are n * m + k variables: m vectors of y, each containing n
components, followed by k values of vector p.
For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
the following sparsity structure:
1 1 2 2 0 0 0 0 5
1 1 2 2 0 0 0 0 5
0 0 1 1 2 2 0 0 5
0 0 1 1 2 2 0 0 5
0 0 0 0 1 1 2 2 5
0 0 0 0 1 1 2 2 5
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
Zeros denote identically zero values, other values denote different kinds
of blocks in the matrix (see below). The blank row indicates the separation
of collocation residuals from boundary conditions. And the blank column
indicates the separation of y values from p values.
Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives
of collocation residuals with respect to y.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
k : int
Number of the unknown parameters.
i_jac, j_jac : ndarray
Row and column indices returned by `compute_jac_indices`. They
represent different blocks in the Jacobian matrix in the following
order (see the scheme above):
* 1: m - 1 diagonal n x n blocks for the collocation residuals.
* 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
* 3 : (n + k) x n block for the dependency of the boundary
conditions on ya.
* 4: (n + k) x n block for the dependency of the boundary
conditions on yb.
* 5: (m - 1) * n x k block for the dependency of the collocation
residuals on p.
* 6: (n + k) x k block for the dependency of the boundary
conditions on p.
df_dy : ndarray, shape (n, n, m)
Jacobian of f with respect to y computed at the mesh nodes.
df_dy_middle : ndarray, shape (n, n, m - 1)
Jacobian of f with respect to y computed at the middle between the
mesh nodes.
df_dp : ndarray with shape (n, k, m) or None
Jacobian of f with respect to p computed at the mesh nodes.
df_dp_middle: ndarray with shape (n, k, m - 1) or None
Jacobian of f with respect to p computed at the middle between the
mesh nodes.
dbc_dya, dbc_dyb : ndarray, shape (n, n)
Jacobian of bc with respect to ya and yb.
dbc_dp: ndarray with shape (n, k) or None
Jacobian of bc with respect to p.
Returns
-------
J : csc_matrix, shape (n * m + k, n * m + k)
Jacobian of the collocation system in a sparse form.
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
df_dy = np.transpose(df_dy, (2, 0, 1))
df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
h = h[:, np.newaxis, np.newaxis]
dtype = df_dy.dtype
# Computing diagonal n x n blocks.
dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_0[:] = -np.identity(n)
dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[:-1])
dPhi_dy_0 -= h**2 / 12 * T
# Computing off-diagonal n x n blocks.
dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_1[:] = np.identity(n)
dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[1:])
dPhi_dy_1 += h**2 / 12 * T
values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
dbc_dyb.ravel()))
if k > 0:
df_dp = np.transpose(df_dp, (2, 0, 1))
df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
df_dp_middle += 0.125 * h * T
dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
J = coo_matrix((values, (i_jac, j_jac)))
return csc_matrix(J)
def collocation_fun(fun, y, p, x, h):
"""Evaluate collocation residuals.
This function lies in the core of the method. The solution is sought
as a cubic C1 continuous spline with derivatives matching the ODE rhs
at given nodes `x`. Collocation conditions are formed from the equality
of the spline derivatives and rhs of the ODE system in the middle points
between nodes.
Such method is classified to Lobbato IIIA family in ODE literature.
Refer to [1]_ for the formula and some discussion.
Returns
-------
col_res : ndarray, shape (n, m - 1)
Collocation residuals at the middle points of the mesh intervals.
y_middle : ndarray, shape (n, m - 1)
Values of the cubic spline evaluated at the middle points of the mesh
intervals.
f : ndarray, shape (n, m)
RHS of the ODE system evaluated at the mesh nodes.
f_middle : ndarray, shape (n, m - 1)
RHS of the ODE system evaluated at the middle points of the mesh
intervals (and using `y_middle`).
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
f = fun(x, y, p)
y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
0.125 * h * (f[:, 1:] - f[:, :-1]))
f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
4 * f_middle)
return col_res, y_middle, f, f_middle
def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
"""Create the function and the Jacobian for the collocation system."""
x_middle = x[:-1] + 0.5 * h
i_jac, j_jac = compute_jac_indices(n, m, k)
def col_fun(y, p):
return collocation_fun(fun, y, p, x, h)
def sys_jac(y, p, y_middle, f, f_middle, bc0):
if fun_jac is None:
df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
df_dy_middle, df_dp_middle = estimate_fun_jac(
fun, x_middle, y_middle, p, f_middle)
else:
df_dy, df_dp = fun_jac(x, y, p)
df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
if bc_jac is None:
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
p, bc0)
else:
dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
df_dy_middle, df_dp, df_dp_middle, dbc_dya,
dbc_dyb, dbc_dp)
return col_fun, sys_jac
def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol, bc_tol):
"""Solve the nonlinear collocation system by a Newton method.
This is a simple Newton method with a backtracking line search. As
advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
is used, where J is the Jacobian matrix at the current iteration and r is
the vector or collocation residuals (values of the system lhs).
The method alters between full Newton iterations and the fixed-Jacobian
iterations based
There are other tricks proposed in [1]_, but they are not used as they
don't seem to improve anything significantly, and even break the
convergence on some test problems I tried.
All important parameters of the algorithm are defined inside the function.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
h : ndarray, shape (m-1,)
Mesh intervals.
col_fun : callable
Function computing collocation residuals.
bc : callable
Function computing boundary condition residuals.
jac : callable
Function computing the Jacobian of the whole system (including
collocation and boundary condition residuals). It is supposed to
return csc_matrix.
y : ndarray, shape (n, m)
Initial guess for the function values at the mesh nodes.
p : ndarray, shape (k,)
Initial guess for the unknown parameters.
B : ndarray with shape (n, n) or None
Matrix to force the S y(a) = 0 condition for a problems with the
singular term. If None, the singular term is assumed to be absent.
bvp_tol : float
Tolerance to which we want to solve a BVP.
bc_tol : float
Tolerance to which we want to satisfy the boundary conditions.
Returns
-------
y : ndarray, shape (n, m)
Final iterate for the function values at the mesh nodes.
p : ndarray, shape (k,)
Final iterate for the unknown parameters.
singular : bool
True, if the LU decomposition failed because Jacobian turned out
to be singular.
References
----------
.. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations"
"""
# We know that the solution residuals at the middle points of the mesh
# are connected with collocation residuals r_middle = 1.5 * col_res / h.
# As our BVP solver tries to decrease relative residuals below a certain
# tolerance it seems reasonable to terminated Newton iterations by
# comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
# which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
# the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
# should be computed as follows:
tol_r = 2/3 * h * 5e-2 * bvp_tol
# Maximum allowed number of Jacobian evaluation and factorization, in
# other words the maximum number of full Newton iterations. A small value
# is recommended in the literature.
max_njev = 4
# Maximum number of iterations, considering that some of them can be
# performed with the fixed Jacobian. In theory such iterations are cheap,
# but it's not that simple in Python.
max_iter = 8
# Minimum relative improvement of the criterion function to accept the
# step (Armijo constant).
sigma = 0.2
# Step size decrease factor for backtracking.
tau = 0.5
# Maximum number of backtracking steps, the minimum step is then
# tau ** n_trial.
n_trial = 4
col_res, y_middle, f, f_middle = col_fun(y, p)
bc_res = bc(y[:, 0], y[:, -1], p)
res = np.hstack((col_res.ravel(order='F'), bc_res))
njev = 0
singular = False
recompute_jac = True
for iteration in range(max_iter):
if recompute_jac:
J = jac(y, p, y_middle, f, f_middle, bc_res)
njev += 1
try:
LU = splu(J)
except RuntimeError:
singular = True
break
step = LU.solve(res)
cost = np.dot(step, step)
y_step = step[:m * n].reshape((n, m), order='F')
p_step = step[m * n:]
alpha = 1
for trial in range(n_trial + 1):
y_new = y - alpha * y_step
if B is not None:
y_new[:, 0] = np.dot(B, y_new[:, 0])
p_new = p - alpha * p_step
col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
res = np.hstack((col_res.ravel(order='F'), bc_res))
step_new = LU.solve(res)
cost_new = np.dot(step_new, step_new)
if cost_new < (1 - 2 * alpha * sigma) * cost:
break
if trial < n_trial:
alpha *= tau
y = y_new
p = p_new
if njev == max_njev:
break
if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
np.all(np.abs(bc_res) < bc_tol)):
break
# If the full step was taken, then we are going to continue with
# the same Jacobian. This is the approach of BVP_SOLVER.
if alpha == 1:
step = step_new
cost = cost_new
recompute_jac = False
else:
recompute_jac = True
return y, p, singular
def print_iteration_header():
print("{:^15}{:^15}{:^15}{:^15}{:^15}".format(
"Iteration", "Max residual", "Max BC residual", "Total nodes",
"Nodes added"))
def print_iteration_progress(iteration, residual, bc_residual, total_nodes,
nodes_added):
print("{:^15}{:^15.2e}{:^15.2e}{:^15}{:^15}".format(
iteration, residual, bc_residual, total_nodes, nodes_added))
class BVPResult(OptimizeResult):
pass
TERMINATION_MESSAGES = {
0: "The algorithm converged to the desired accuracy.",
1: "The maximum number of mesh nodes is exceeded.",
2: "A singular Jacobian encountered when solving the collocation system.",
3: "The solver was unable to satisfy boundary conditions tolerance on iteration 10."
}
def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
"""Estimate rms values of collocation residuals using Lobatto quadrature.
The residuals are defined as the difference between the derivatives of
our solution and rhs of the ODE system. We use relative residuals, i.e.
normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
normalized integrals of the squared relative residuals over each interval.
Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
fact that residuals at the mesh nodes are identically zero.
In [2] they don't normalize integrals by interval lengths, which gives
a higher rate of convergence of the residuals by the factor of h**0.5.
I chose to do such normalization for an ease of interpretation of return
values as RMS estimates.
Returns
-------
rms_res : ndarray, shape (m - 1,)
Estimated rms values of the relative residuals over each interval.
References
----------
.. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
.. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
x_middle = x[:-1] + 0.5 * h
s = 0.5 * h * (3/7)**0.5
x1 = x_middle + s
x2 = x_middle - s
y1 = sol(x1)
y2 = sol(x2)
y1_prime = sol(x1, 1)
y2_prime = sol(x2, 1)
f1 = fun(x1, y1, p)
f2 = fun(x2, y2, p)
r1 = y1_prime - f1
r2 = y2_prime - f2
r_middle /= 1 + np.abs(f_middle)
r1 /= 1 + np.abs(f1)
r2 /= 1 + np.abs(f2)
r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
def create_spline(y, yp, x, h):
"""Create a cubic spline given values and derivatives.
Formulas for the coefficients are taken from interpolate.CubicSpline.
Returns
-------
sol : PPoly
Constructed spline as a PPoly instance.
"""
from scipy.interpolate import PPoly
n, m = y.shape
c = np.empty((4, n, m - 1), dtype=y.dtype)
slope = (y[:, 1:] - y[:, :-1]) / h
t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
c[0] = t / h
c[1] = (slope - yp[:, :-1]) / h - t
c[2] = yp[:, :-1]
c[3] = y[:, :-1]
c = np.rollaxis(c, 1)
return PPoly(c, x, extrapolate=True, axis=1)
def modify_mesh(x, insert_1, insert_2):
"""Insert nodes into a mesh.
Nodes removal logic is not established, its impact on the solver is
presumably negligible. So only insertion is done in this function.
Parameters
----------
x : ndarray, shape (m,)
Mesh nodes.
insert_1 : ndarray
Intervals to each insert 1 new node in the middle.
insert_2 : ndarray
Intervals to each insert 2 new nodes, such that divide an interval
into 3 equal parts.
Returns
-------
x_new : ndarray
New mesh nodes.
Notes
-----
`insert_1` and `insert_2` should not have common values.
"""
# Because np.insert implementation apparently varies with a version of
# numpy, we use a simple and reliable approach with sorting.
return np.sort(np.hstack((
x,
0.5 * (x[insert_1] + x[insert_1 + 1]),
(2 * x[insert_2] + x[insert_2 + 1]) / 3,
(x[insert_2] + 2 * x[insert_2 + 1]) / 3
)))
def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
"""Wrap functions for unified usage in the solver."""
if fun_jac is None:
fun_jac_wrapped = None
if bc_jac is None:
bc_jac_wrapped = None
if k == 0:
def fun_p(x, y, _):
return np.asarray(fun(x, y), dtype)
def bc_wrapped(ya, yb, _):
return np.asarray(bc(ya, yb), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, _):
return np.asarray(fun_jac(x, y), dtype), None
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, _):
dbc_dya, dbc_dyb = bc_jac(ya, yb)
return (np.asarray(dbc_dya, dtype),
np.asarray(dbc_dyb, dtype), None)
else:
def fun_p(x, y, p):
return np.asarray(fun(x, y, p), dtype)
def bc_wrapped(x, y, p):
return np.asarray(bc(x, y, p), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, p):
df_dy, df_dp = fun_jac(x, y, p)
return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, p):
dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
np.asarray(dbc_dp, dtype))
if S is None:
fun_wrapped = fun_p
else:
def fun_wrapped(x, y, p):
f = fun_p(x, y, p)
if x[0] == a:
f[:, 0] = np.dot(D, f[:, 0])
f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
else:
f += np.dot(S, y) / (x - a)
return f
if fun_jac is not None:
if S is None:
fun_jac_wrapped = fun_jac_p
else:
Sr = S[:, :, np.newaxis]
def fun_jac_wrapped(x, y, p):
df_dy, df_dp = fun_jac_p(x, y, p)
if x[0] == a:
df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
df_dy[:, :, 1:] += Sr / (x[1:] - a)
else:
df_dy += Sr / (x - a)
return df_dy, df_dp
return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
tol=1e-3, max_nodes=1000, verbose=0, bc_tol=None):
"""Solve a boundary-value problem for a system of ODEs.
This function numerically solves a first order system of ODEs subject to
two-point boundary conditions::
dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
bc(y(a), y(b), p) = 0
Here x is a 1-dimensional independent variable, y(x) is a n-dimensional
vector-valued function and p is a k-dimensional vector of unknown
parameters which is to be found along with y(x). For the problem to be
determined there must be n + k boundary conditions, i.e. bc must be
(n + k)-dimensional function.
The last singular term in the right-hand side of the system is optional.
It is defined by an n-by-n matrix S, such that the solution must satisfy
S y(a) = 0. This condition will be forced during iterations, so it must not
contradict boundary conditions. See [2]_ for the explanation how this term
is handled when solving BVPs numerically.
Problems in a complex domain can be solved as well. In this case y and p
are considered to be complex, and f and bc are assumed to be complex-valued
functions, but x stays real. Note that f and bc must be complex
differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
should rewrite your problem for real and imaginary parts separately. To
solve a problem in a complex domain, pass an initial guess for y with a
complex data type (see below).
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(x, y)``,
or ``fun(x, y, p)`` if parameters are present. All arguments are
ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
return value must be an array with shape (n, m) and with the same
layout as ``y``.
bc : callable
Function evaluating residuals of the boundary conditions. The calling
signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
and ``p`` with shape (k,). The return value must be an array with
shape (n + k,).
x : array_like, shape (m,)
Initial mesh. Must be a strictly increasing sequence of real numbers
with ``x[0]=a`` and ``x[-1]=b``.
y : array_like, shape (n, m)
Initial guess for the function values at the mesh nodes, i-th column
corresponds to ``x[i]``. For problems in a complex domain pass `y`
with a complex data type (even if the initial guess is purely real).
p : array_like with shape (k,) or None, optional
Initial guess for the unknown parameters. If None (default), it is
assumed that the problem doesn't depend on any parameters.
S : array_like with shape (n, n) or None
Matrix defining the singular term. If None (default), the problem is
solved without the singular term.
fun_jac : callable or None, optional
Function computing derivatives of f with respect to y and p. The
calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
parameters are present. The return must contain 1 or 2 elements in the
following order:
* df_dy : array_like with shape (n, n, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
* df_dp : array_like with shape (n, k, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
Here q numbers nodes at which x and y are defined, whereas i and j
number vector components. If the problem is solved without unknown
parameters df_dp should not be returned.
If `fun_jac` is None (default), the derivatives will be estimated
by the forward finite differences.
bc_jac : callable or None, optional
Function computing derivatives of bc with respect to ya, yb and p.
The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
if parameters are present. The return must contain 2 or 3 elements in
the following order:
* dbc_dya : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d ya_j.
* dbc_dyb : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d yb_j.
* dbc_dp : array_like with shape (n, k) where an element (i, j)
equals to d bc_i(ya, yb, p) / d p_j.
If the problem is solved without unknown parameters dbc_dp should not
be returned.
If `bc_jac` is None (default), the derivatives will be estimated by
the forward finite differences.
tol : float, optional
Desired tolerance of the solution. If we define ``r = y' - f(x, y)``
where y is the found solution, then the solver tries to achieve on each
mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
estimated in a root mean squared sense (using a numerical quadrature
formula). Default is 1e-3.
max_nodes : int, optional
Maximum allowed number of the mesh nodes. If exceeded, the algorithm
terminates. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
bc_tol : float, optional
Desired absolute tolerance for the boundary condition residuals: `bc`
value should satisfy ``abs(bc) < bc_tol`` component-wise.
Equals to `tol` by default. Up to 10 iterations are allowed to achieve this
tolerance.
Returns
-------
Bunch object with the following fields defined:
sol : PPoly
Found solution for y as `scipy.interpolate.PPoly` instance, a C1
continuous cubic spline.
p : ndarray or None, shape (k,)
Found parameters. None, if the parameters were not present in the
problem.
x : ndarray, shape (m,)
Nodes of the final mesh.
y : ndarray, shape (n, m)
Solution values at the mesh nodes.
yp : ndarray, shape (n, m)
Solution derivatives at the mesh nodes.
rms_residuals : ndarray, shape (m - 1,)
RMS values of the relative residuals over each mesh interval (see the
description of `tol` parameter).
niter : int
Number of completed iterations.
status : int
Reason for algorithm termination:
* 0: The algorithm converged to the desired accuracy.
* 1: The maximum number of mesh nodes is exceeded.
* 2: A singular Jacobian encountered when solving the collocation
system.
message : string
Verbal description of the termination reason.
success : bool
True if the algorithm converged to the desired accuracy (``status=0``).
Notes
-----
This function implements a 4-th order collocation algorithm with the
control of residuals similar to [1]_. A collocation system is solved
by a damped Newton method with an affine-invariant criterion function as
described in [3]_.
Note that in [1]_ integral residuals are defined without normalization
by interval lengths. So their definition is different by a multiplier of
h**0.5 (h is an interval length) from the definition used here.
.. versionadded:: 0.18.0
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
.. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
Solver".
.. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations".
.. [4] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
Examples
--------
In the first example we solve Bratu's problem::
y'' + k * exp(y) = 0
y(0) = y(1) = 0
for k = 1.
We rewrite the equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -exp(y1)
>>> def fun(x, y):
... return np.vstack((y[1], -np.exp(y[0])))
Implement evaluation of the boundary condition residuals:
>>> def bc(ya, yb):
... return np.array([ya[0], yb[0]])
Define the initial mesh with 5 nodes:
>>> x = np.linspace(0, 1, 5)
This problem is known to have two solutions. To obtain both of them we
use two different initial guesses for y. We denote them by subscripts
a and b.
>>> y_a = np.zeros((2, x.size))
>>> y_b = np.zeros((2, x.size))
>>> y_b[0] = 3
Now we are ready to run the solver.
>>> from scipy.integrate import solve_bvp
>>> res_a = solve_bvp(fun, bc, x, y_a)
>>> res_b = solve_bvp(fun, bc, x, y_b)
Let's plot the two found solutions. We take an advantage of having the
solution in a spline form to produce a smooth plot.
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot_a = res_a.sol(x_plot)[0]
>>> y_plot_b = res_b.sol(x_plot)[0]
>>> import matplotlib.pyplot as plt
>>> plt.plot(x_plot, y_plot_a, label='y_a')
>>> plt.plot(x_plot, y_plot_b, label='y_b')
>>> plt.legend()
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
We see that the two solutions have similar shape, but differ in scale
significantly.
In the second example we solve a simple Sturm-Liouville problem::
y'' + k**2 * y = 0
y(0) = y(1) = 0
It is known that a non-trivial solution y = A * sin(k * x) is possible for
k = pi * n, where n is an integer. To establish the normalization constant
A = 1 we add a boundary condition::
y'(0) = k
Again we rewrite our equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -k**2 * y1
>>> def fun(x, y, p):
... k = p[0]
... return np.vstack((y[1], -k**2 * y[0]))
Note that parameters p are passed as a vector (with one element in our
case).
Implement the boundary conditions:
>>> def bc(ya, yb, p):
... k = p[0]
... return np.array([ya[0], yb[0], ya[1] - k])
Setup the initial mesh and guess for y. We aim to find the solution for
k = 2 * pi, to achieve that we set values of y to approximately follow
sin(2 * pi * x):
>>> x = np.linspace(0, 1, 5)
>>> y = np.zeros((2, x.size))
>>> y[0, 1] = 1
>>> y[0, 3] = -1
Run the solver with 6 as an initial guess for k.
>>> sol = solve_bvp(fun, bc, x, y, p=[6])
We see that the found k is approximately correct:
>>> sol.p[0]
6.28329460046
And finally plot the solution to see the anticipated sinusoid:
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot = sol.sol(x_plot)[0]
>>> plt.plot(x_plot, y_plot)
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
"""
x = np.asarray(x, dtype=float)
if x.ndim != 1:
raise ValueError("`x` must be 1 dimensional.")
h = np.diff(x)
if np.any(h <= 0):
raise ValueError("`x` must be strictly increasing.")
a = x[0]
y = np.asarray(y)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
if y.ndim != 2:
raise ValueError("`y` must be 2 dimensional.")
if y.shape[1] != x.shape[0]:
raise ValueError("`y` is expected to have {} columns, but actually "
"has {}.".format(x.shape[0], y.shape[1]))
if p is None:
p = np.array([])
else:
p = np.asarray(p, dtype=dtype)
if p.ndim != 1:
raise ValueError("`p` must be 1 dimensional.")
if tol < 100 * EPS:
warn("`tol` is too low, setting to {:.2e}".format(100 * EPS))
tol = 100 * EPS
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
n = y.shape[0]
k = p.shape[0]
if S is not None:
S = np.asarray(S, dtype=dtype)
if S.shape != (n, n):
raise ValueError("`S` is expected to have shape {}, "
"but actually has {}".format((n, n), S.shape))
# Compute I - S^+ S to impose necessary boundary conditions.
B = np.identity(n) - np.dot(pinv(S), S)
y[:, 0] = np.dot(B, y[:, 0])
# Compute (I - S)^+ to correct derivatives at x=a.
D = pinv(np.identity(n) - S)
else:
B = None
D = None
if bc_tol is None:
bc_tol = tol
# Maximum number of iterations
max_iteration = 10
fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
f = fun_wrapped(x, y, p)
if f.shape != y.shape:
raise ValueError("`fun` return is expected to have shape {}, "
"but actually has {}.".format(y.shape, f.shape))
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
if bc_res.shape != (n + k,):
raise ValueError("`bc` return is expected to have shape {}, "
"but actually has {}.".format((n + k,), bc_res.shape))
status = 0
iteration = 0
if verbose == 2:
print_iteration_header()
while True:
m = x.shape[0]
col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
fun_jac_wrapped, bc_jac_wrapped, x, h)
y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
y, p, B, tol, bc_tol)
iteration += 1
col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
p, x, h)
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
max_bc_res = np.max(abs(bc_res))
# This relation is not trivial, but can be verified.
r_middle = 1.5 * col_res / h
sol = create_spline(y, f, x, h)
rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
r_middle, f_middle)
max_rms_res = np.max(rms_res)
if singular:
status = 2
break
insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
insert_2, = np.nonzero(rms_res >= 100 * tol)
nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
if m + nodes_added > max_nodes:
status = 1
if verbose == 2:
nodes_added = "({})".format(nodes_added)
print_iteration_progress(iteration, max_rms_res, max_bc_res,
m, nodes_added)
break
if verbose == 2:
print_iteration_progress(iteration, max_rms_res, max_bc_res, m,
nodes_added)
if nodes_added > 0:
x = modify_mesh(x, insert_1, insert_2)
h = np.diff(x)
y = sol(x)
elif max_bc_res <= bc_tol:
status = 0
break
elif iteration >= max_iteration:
status = 3
break
if verbose > 0:
if status == 0:
print("Solved in {} iterations, number of nodes {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, x.shape[0], max_rms_res, max_bc_res))
elif status == 1:
print("Number of nodes is exceeded after iteration {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, max_rms_res, max_bc_res))
elif status == 2:
print("Singular Jacobian encountered when solving the collocation "
"system on iteration {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, max_rms_res, max_bc_res))
elif status == 3:
print("The solver was unable to satisfy boundary conditions "
"tolerance on iteration {}. \n"
"Maximum relative residual: {:.2e} \n"
"Maximum boundary residual: {:.2e}"
.format(iteration, max_rms_res, max_bc_res))
if p.size == 0:
p = None
return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
niter=iteration, status=status,
message=TERMINATION_MESSAGES[status], success=status == 0)
| bsd-3-clause |
gartler/pysdr_433MHz | Pysdr_2.py | 1 | 1811 | import matplotlib.pyplot as plt
from scipy import signal as sp_sig
import numpy as np
import wave
import sys
# ################### Signal ############################
f = 'C:\\Projekte\\Wetter\\rtl-weather-6.wav'
f = 'C:\\Projekte\\Wetter\\test_wav.wav'
spf = wave.open(f)
signal = spf.readframes(-1)
signal = np.fromstring(signal, 'Int16')*-0.1
signal_raw = signal * 1.0
signal[0:4800] = 0
signal_raw[0:4800] = 0
high_index = signal>=600
low_index = signal<600
signal[high_index] = 600
signal[low_index] = 0
signal[0] = 2
signal[1] = -1
fs = spf.getframerate()
sig_start = 0
sig_end = len(signal)/fs
sig_length = len(signal)
sig_time = np.linspace(sig_start, sig_end, num = sig_length)
# ######################## Clock #########################
pos_t = []
pos_i = []
for i in range(sig_length-1):
if signal[i] == 0 and signal[i+1] == 600:
pos_t.append( sig_time[i])
pos_i.append (i)
delta = pos_t[0]
freq = 10/(pos_t[10]-pos_t[0])
duration_t = 1/freq
duration_i = int(round(duration_t*fs))
t = np.linspace(sig_start-delta, sig_end-delta, num = sig_length, endpoint=False)
#print(t)
#print(t)
#print(sig_time)
clock = sp_sig.square(2 * np.pi * freq * t)
clock = clock * 0.5 + 0.5
clock = clock *600
t = t + delta
for i in range(pos_i[0]):
clock[i] =0
for i in range(pos_i[-1]+int(duration_i),sig_length):
clock[i] =0
for i in range(sig_length-1):
if clock[i] == 1 and clock[i+1] == 0:
#print(i)
if signal[i] == 0 :
print("1")
if signal[i] == 1:
print("0")
# ######################## Plot ##########################
plt.figure(1)
plt.title('Signal Wave...')
plt.plot(sig_time,signal)
plt.plot(sig_time,signal_raw)
plt.plot(t,clock)
plt.ylim(-2000, 2000)
plt.xlim(0.04, 0.29)
plt.show()
| gpl-3.0 |
arahuja/scikit-learn | sklearn/tree/tree.py | 5 | 34304 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError, check_is_fitted
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
for k in range(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "auto" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
zestrada/DECAF | decaf/plugins/keylogger/process_trace.py | 1 | 1685 | #!/usr/bin/env python
import sys
import pandas as pd
import numpy as np
#Expect input of the form:
#Process Read(0)/Write(1) vaddOfTaintedMem paddrOfTaintedMem Size
#TaintInfo CurEIP ModuleName CallerModuleName
#CallerSystemCall valueOfTaintedMem
df = pd.read_csv(sys.argv[1], delim_whitespace=True)
#We sent tainted keystrokes of 'a' and 'b'
keystrokes=['1e','30']
#Now we want to find which different processes read the same address
#print df.eq(df['paddrOfTaintedMem'], axis='index')
#for addr, rows in df.groupby('paddrOfTaintedMem'):
min_row = df.count #the row index corresponding to the earliest we see this
max_procs = 0 #the number of processes that use this function
eip = 0
#We want to minimize rownumber for the maximal process_count the use the tainted
#data
for addr, rows in df.groupby('CurEIP'):
#print rows[rows['valueOfTaintedMem'].str.contains(keystrokes[0]) | \
# rows['valueOfTaintedMem'].str.contains(keystrokes[1])]
num_procs = rows['Process'].unique().size
if(num_procs >= max_procs):
max_procs = num_procs
if(rows['valueOfTaintedMem'].str.contains(keystrokes[0]).any() and
rows['valueOfTaintedMem'].str.contains(keystrokes[1]).any()):
#print addr, rows['Process'].unique()
#print rows[['CurEIP','valueOfTaintedMem']].index.min()
#Converted to a numpy array to avoid recursion depth complaints:
row_index = np.min(np.array(rows[['CurEIP','valueOfTaintedMem']].index))
if(row_index<min_row):
min_row = row_index
eip = addr
print df[df['CurEIP'].str.contains(eip)]
print "Event number: %d, Number of processes: %d eip: %s" % \
(min_row, max_procs, eip)
| gpl-3.0 |
DucQuang1/BuildingMachineLearningSystemsWithPython | ch08/corrneighbours.py | 23 | 1779 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
import numpy as np
from load_ml100k import get_train_test
from scipy.spatial import distance
from sklearn import metrics
from norm import NormalizePositive
def predict(otrain):
binary = (otrain > 0)
norm = NormalizePositive(axis=1)
train = norm.fit_transform(otrain)
dists = distance.pdist(binary, 'correlation')
dists = distance.squareform(dists)
neighbors = dists.argsort(axis=1)
filled = train.copy()
for u in range(filled.shape[0]):
# n_u are the neighbors of user
n_u = neighbors[u, 1:]
for m in range(filled.shape[1]):
# This code could be faster using numpy indexing trickery as the
# cost of readibility (this is left as an exercise to the reader):
revs = [train[neigh, m]
for neigh in n_u
if binary[neigh, m]]
if len(revs):
n = len(revs)
n //= 2
n += 1
revs = revs[:n]
filled[u,m] = np.mean(revs)
return norm.inverse_transform(filled)
def main(transpose_inputs=False):
train, test = get_train_test(random_state=12)
if transpose_inputs:
train = train.T
test = test.T
predicted = predict(train)
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score (binary {} neighbours): {:.1%}'.format(
('movie' if transpose_inputs else 'user'),
r2))
if __name__ == '__main__':
main()
main(transpose_inputs=True)
| mit |
huggingface/transformers | examples/flax/language-modeling/run_clm_flax.py | 1 | 27576 | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pre-training/Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=causal-lm
"""
# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
block_size: Optional[int] = field(
default=None,
metadata={
"help": "Optional input sequence length after tokenization. "
"The training dataset will be truncated in block of this size for training. "
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
class TrainState(train_state.TrainState):
dropout_rng: jnp.ndarray
def replicate(self):
return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
"""
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if `shuffle` is `True`.
"""
steps_per_epoch = len(dataset) // batch_size
if shuffle:
batch_idx = jax.random.permutation(rng, len(dataset))
else:
batch_idx = jnp.arange(len(dataset))
batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
for idx in batch_idx:
batch = dataset[idx]
batch = {k: jnp.array(v) for k, v in batch.items()}
batch = shard(batch)
yield batch
def write_train_metric(summary_writer, train_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = get_metrics(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1)
def write_eval_metric(summary_writer, eval_metrics, step):
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step)
def create_learning_rate_fn(
train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
) -> Callable[[int], jnp.array]:
"""Returns a linear warmup, linear_decay learning rate function."""
steps_per_epoch = train_ds_size // train_batch_size
num_train_steps = steps_per_epoch * num_train_epochs
warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
decay_fn = optax.linear_schedule(
init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
)
schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
return schedule_fn
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
dataset = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False
)
if "validation" not in dataset.keys():
dataset["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
dataset["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = FlaxAutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
)
else:
model = FlaxAutoModelForCausalLM.from_config(
config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
)
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = dataset["train"].column_names
else:
column_names = dataset["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples[text_column_name])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
)
return output
tokenized_datasets = dataset.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > config.max_position_embeddings:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = lm_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = lm_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Enable tensorboard only on the master node
has_tensorboard = is_tensorboard_available()
if has_tensorboard and jax.process_index() == 0:
try:
from flax.metrics.tensorboard import SummaryWriter
summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
except ImportError as ie:
has_tensorboard = False
logger.warning(
f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
)
else:
logger.warning(
"Unable to display metrics through TensorBoard because the package is not installed: "
"Please run pip install tensorboard to enable."
)
# Initialize our training
rng = jax.random.PRNGKey(training_args.seed)
rng, dropout_rng = jax.random.split(rng)
# Store some constant
num_epochs = int(training_args.num_train_epochs)
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
steps_per_epoch = len(train_dataset) // train_batch_size
total_train_steps = steps_per_epoch * num_epochs
# Create learning rate schedule
linear_decay_lr_schedule_fn = create_learning_rate_fn(
len(train_dataset),
train_batch_size,
training_args.num_train_epochs,
training_args.warmup_steps,
training_args.learning_rate,
)
# We use Optax's "masking" functionality to not apply weight decay
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
# mask boolean with the same structure as the parameters.
# The mask is True for parameters that should be decayed.
# Note that this mask is specifically adapted for FlaxGPT2.
# For other models, one should correct the layer norm parameter naming
# accordingly.
def decay_mask_fn(params):
flat_params = traverse_util.flatten_dict(params)
flat_mask = {
path: (path[-1] != "bias" and path[-2:] not in [("ln_1", "scale"), ("ln_2", "scale"), ("ln_f", "scale")])
for path in flat_params
}
return traverse_util.unflatten_dict(flat_mask)
# create adam optimizer
if training_args.adafactor:
# We use the default parameters here to initialize adafactor,
# For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74
optimizer = optax.adafactor(
learning_rate=linear_decay_lr_schedule_fn,
)
else:
optimizer = optax.adamw(
learning_rate=linear_decay_lr_schedule_fn,
b1=training_args.adam_beta1,
b2=training_args.adam_beta2,
eps=training_args.adam_epsilon,
weight_decay=training_args.weight_decay,
mask=decay_mask_fn,
)
# Setup train state
state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer, dropout_rng=dropout_rng)
def loss_fn(logits, labels):
shift_logits = logits[..., :-1, :]
shift_labels = labels[..., 1:]
loss = optax.softmax_cross_entropy(shift_logits, onehot(shift_labels, shift_logits.shape[-1]))
return loss.mean()
# Define gradient update step fn
def train_step(state, batch):
dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng)
def compute_loss(params):
labels = batch.pop("labels")
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
loss = loss_fn(logits, labels)
return loss
grad_fn = jax.value_and_grad(compute_loss)
loss, grad = grad_fn(state.params)
grad = jax.lax.pmean(grad, "batch")
new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
metrics = jax.lax.pmean(metrics, axis_name="batch")
return new_state, metrics
# Define eval fn
def eval_step(params, batch):
labels = batch.pop("labels")
logits = model(**batch, params=params, train=False)[0]
loss = loss_fn(logits, labels)
# summarize metrics
metrics = {"loss": loss}
metrics = jax.lax.pmean(metrics, axis_name="batch")
return metrics
# Create parallel version of the train and eval step
p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
p_eval_step = jax.pmap(eval_step, "batch")
# Replicate the train state on each device
state = state.replicate()
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {num_epochs}")
logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}")
logger.info(f" Total optimization steps = {total_train_steps}")
train_time = 0
train_metrics = []
epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
for epoch in epochs:
# ======================== Training ================================
train_start = time.time()
# Create sampling rng
rng, input_rng = jax.random.split(rng)
# Generate an epoch by shuffling sampling indices from the train dataset
train_loader = data_loader(input_rng, train_dataset, train_batch_size, shuffle=True)
steps_per_epoch = len(train_dataset) // train_batch_size
# train
for step in tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False):
batch = next(train_loader)
state, train_metric = p_train_step(state, batch)
train_metrics.append(train_metric)
cur_step = epoch * (len(train_dataset) // train_batch_size) + step
if cur_step % training_args.logging_steps == 0 and cur_step > 0:
# Save metrics
train_metric = unreplicate(train_metric)
train_time += time.time() - train_start
if has_tensorboard and jax.process_index() == 0:
write_train_metric(summary_writer, train_metrics, train_time, cur_step)
epochs.write(
f"Step... ({cur_step} | Loss: {train_metric['loss'].mean()}, Learning Rate: {train_metric['learning_rate'].mean()})"
)
train_metrics = []
if cur_step % training_args.eval_steps == 0 and cur_step > 0:
# ======================== Evaluating ==============================
eval_metrics = []
eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size)
eval_steps = len(eval_dataset) // eval_batch_size
for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False):
# Model forward
batch = next(eval_loader)
metrics = p_eval_step(state.params, batch)
eval_metrics.append(metrics)
# normalize eval metrics
eval_metrics = get_metrics(eval_metrics)
eval_metrics = jax.tree_map(jnp.mean, eval_metrics)
try:
eval_metrics["perplexity"] = math.exp(eval_metrics["loss"])
except OverflowError:
eval_metrics["perplexity"] = float("inf")
# Print metrics and update progress bar
desc = f"Step... ({cur_step} | Eval Loss: {eval_metrics['loss']} | Eval Perplexity: {eval_metrics['perplexity']})"
epochs.write(desc)
epochs.desc = desc
# Save metrics
if has_tensorboard and jax.process_index() == 0:
cur_step = epoch * (len(train_dataset) // train_batch_size)
write_eval_metric(summary_writer, eval_metrics, cur_step)
if cur_step % training_args.save_steps == 0 and cur_step > 0:
# save checkpoint after each epoch and push checkpoint to the hub
if jax.process_index() == 0:
params = jax.device_get(unreplicate(state.params))
model.save_pretrained(
training_args.output_dir,
params=params,
push_to_hub=training_args.push_to_hub,
commit_message=f"Saving weights and logs of step {cur_step}",
)
if __name__ == "__main__":
main()
| apache-2.0 |
nakul02/incubator-systemml | src/main/python/systemml/defmatrix.py | 7 | 47077 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = [ 'setSparkContext', 'matrix', 'eval', 'solve', 'DMLOp', 'set_lazy', 'debug_array_conversion', 'load', 'full', 'seq' ]
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix, spmatrix
try:
import py4j.java_gateway
from py4j.java_gateway import JavaObject
from pyspark import SparkContext
from pyspark.sql import DataFrame, SparkSession
import pyspark.mllib.common
except ImportError:
raise ImportError('Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')
from . import MLContext, pydml, _java2py, Matrix
from .converters import *
def setSparkContext(sc):
"""
Before using the matrix, the user needs to invoke this function if SparkContext is not previously created in the session.
Parameters
----------
sc: SparkContext
SparkContext
"""
matrix.sc = sc
matrix.sparkSession = SparkSession.builder.getOrCreate()
matrix.ml = MLContext(matrix.sc)
def check_MLContext():
if matrix.ml is None:
if SparkContext._active_spark_context is not None:
setSparkContext(SparkContext._active_spark_context)
else:
raise Exception('Expected setSparkContext(sc) to be called, where sc is active SparkContext.')
########################## AST related operations ##################################
class DMLOp(object):
"""
Represents an intermediate node of Abstract syntax tree created to generate the PyDML script
"""
def __init__(self, inputs, dml=None):
self.inputs = inputs
self.dml = dml
self.ID = None
self.depth = 1
for m in self.inputs:
m.referenced = m.referenced + [ self ]
if isinstance(m, matrix) and m.op is not None:
self.depth = max(self.depth, m.op.depth + 1)
MAX_DEPTH = 0
def _visit(self, execute=True):
matrix.dml = matrix.dml + self.dml
def _print_ast(self, numSpaces):
ret = []
for m in self.inputs:
ret = [ m._print_ast(numSpaces+2) ]
return ''.join(ret)
# Special object used internally to specify the placeholder which will be replaced by output ID
# This helps to provide dml containing output ID in construct_intermediate_node
OUTPUT_ID = '$$OutputID$$'
def set_lazy(isLazy):
"""
This method allows users to set whether the matrix operations should be executed in lazy manner.
Parameters
----------
isLazy: True if matrix operations should be evaluated in lazy manner.
"""
if isLazy:
DMLOp.MAX_DEPTH = 0
else:
DMLOp.MAX_DEPTH = 1
def construct_intermediate_node(inputs, dml):
"""
Convenient utility to create an intermediate node of AST.
Parameters
----------
inputs = list of input matrix objects and/or DMLOp
dml = list of DML string (which will be eventually joined before execution). To specify out.ID, please use the placeholder
"""
dmlOp = DMLOp(inputs)
out = matrix(None, op=dmlOp)
dmlOp.dml = [out.ID if x==OUTPUT_ID else x for x in dml]
if DMLOp.MAX_DEPTH > 0 and out.op.depth >= DMLOp.MAX_DEPTH:
out.eval()
return out
def load(file, format='csv'):
"""
Allows user to load a matrix from filesystem
Parameters
----------
file: filepath
format: can be csv, text or binary or mm
"""
return construct_intermediate_node([], [OUTPUT_ID, ' = load(\"', file, '\", format=\"', format, '\")\n'])
def full(shape, fill_value):
"""
Return a new array of given shape filled with fill_value.
Parameters
----------
shape: tuple of length 2
fill_value: float or int
"""
return construct_intermediate_node([], [OUTPUT_ID, ' = full(', str(fill_value), ', rows=', str(shape[0]), ', cols=', str(shape[1]), ')\n'])
def reset():
"""
Resets the visited status of matrix and the operators in the generated AST.
"""
for m in matrix.visited:
m.visited = False
matrix.visited = []
matrix.ml = MLContext(matrix.sc)
matrix.dml = []
matrix.script = pydml('')
def perform_dfs(outputs, execute):
"""
Traverses the forest of nodes rooted at outputs nodes and returns the DML script to execute
"""
for m in outputs:
m.output = True
m._visit(execute=execute)
return ''.join(matrix.dml)
###############################################################################
########################## Utility functions ##################################
def _log_base(val, base):
if not isinstance(val, str):
raise ValueError('The val to _log_base should be of type string')
return '(log(' + val + ')/log(' + str(base) + '))'
def _matricize(lhs, inputs):
"""
Utility fn to convert the supported types to matrix class or to string (if float or int)
and return the string to be passed to DML as well as inputs
"""
if isinstance(lhs, SUPPORTED_TYPES):
lhs = matrix(lhs)
if isinstance(lhs, matrix):
lhsStr = lhs.ID
inputs = inputs + [lhs]
elif isinstance(lhs, float) or isinstance(lhs, int):
lhsStr = str(lhs)
else:
raise TypeError('Incorrect type')
return lhsStr, inputs
def binary_op(lhs, rhs, opStr):
"""
Common function called by all the binary operators in matrix class
"""
inputs = []
lhsStr, inputs = _matricize(lhs, inputs)
rhsStr, inputs = _matricize(rhs, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', lhsStr, opStr, rhsStr, '\n'])
def binaryMatrixFunction(X, Y, fnName):
"""
Common function called by supported PyDML built-in function that has two arguments.
"""
inputs = []
lhsStr, inputs = _matricize(X, inputs)
rhsStr, inputs = _matricize(Y, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', fnName,'(', lhsStr, ', ', rhsStr, ')\n'])
def unaryMatrixFunction(X, fnName):
"""
Common function called by supported PyDML built-in function that has one argument.
"""
inputs = []
lhsStr, inputs = _matricize(X, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', fnName,'(', lhsStr, ')\n'])
def seq(start=None, stop=None, step=1):
"""
Creates a single column vector with values starting from <start>, to <stop>, in increments of <step>.
Note: Unlike Numpy's arange which returns a row-vector, this returns a column vector.
Also, Unlike Numpy's arange which doesnot include stop, this method includes stop in the interval.
Parameters
----------
start: int or float [Optional: default = 0]
stop: int or float
step : int float [Optional: default = 1]
"""
if start is None and stop is None:
raise ValueError('Both start and stop cannot be None')
elif start is not None and stop is None:
stop = start
start = 0
return construct_intermediate_node([], [OUTPUT_ID, ' = seq(', str(start), ',', str(stop), ',', str(step), ')\n'])
# utility function that converts 1:3 into DML string
def convert_seq_to_dml(s):
ret = []
if s is None:
return ''
elif isinstance(s, slice):
if s.step is not None:
raise ValueError('Slicing with step is not supported.')
if s.start is None:
ret = ret + [ '0 : ' ]
else:
ret = ret + [ getValue(s.start), ':' ]
if s.start is None:
ret = ret + [ '' ]
else:
ret = ret + [ getValue(s.stop) ]
else:
ret = ret + [ getValue(s) ]
return ''.join(ret)
# utility function that converts index (such as [1, 2:3]) into DML string
def getIndexingDML(index):
ret = [ '[' ]
if isinstance(index, tuple) and len(index) == 1:
ret = ret + [ convert_seq_to_dml(index[0]), ',' ]
elif isinstance(index, tuple) and len(index) == 2:
ret = ret + [ convert_seq_to_dml(index[0]), ',', convert_seq_to_dml(index[1]) ]
else:
raise TypeError('matrix indexes can only be tuple of length 2. For example: m[1,1], m[0:1,], m[:, 0:1]')
return ret + [ ']' ]
def convert_outputs_to_list(outputs):
if isinstance(outputs, matrix):
return [ outputs ]
elif isinstance(outputs, list):
for o in outputs:
if not isinstance(o, matrix):
raise TypeError('Only matrix or list of matrix allowed')
return outputs
else:
raise TypeError('Only matrix or list of matrix allowed')
def reset_output_flag(outputs):
for m in outputs:
m.output = False
###############################################################################
########################## Global user-facing functions #######################
def solve(A, b):
"""
Computes the least squares solution for system of linear equations A %*% x = b
Examples
--------
>>> import numpy as np
>>> from sklearn import datasets
>>> import SystemML as sml
>>> from pyspark.sql import SparkSession
>>> diabetes = datasets.load_diabetes()
>>> diabetes_X = diabetes.data[:, np.newaxis, 2]
>>> X_train = diabetes_X[:-20]
>>> X_test = diabetes_X[-20:]
>>> y_train = diabetes.target[:-20]
>>> y_test = diabetes.target[-20:]
>>> sml.setSparkContext(sc)
>>> X = sml.matrix(X_train)
>>> y = sml.matrix(y_train)
>>> A = X.transpose().dot(X)
>>> b = X.transpose().dot(y)
>>> beta = sml.solve(A, b).toNumPy()
>>> y_predicted = X_test.dot(beta)
>>> print('Residual sum of squares: %.2f' % np.mean((y_predicted - y_test) ** 2))
Residual sum of squares: 25282.12
"""
return binaryMatrixFunction(A, b, 'solve')
def eval(outputs, execute=True):
"""
Executes the unevaluated DML script and computes the matrices specified by outputs.
Parameters
----------
outputs: list of matrices or a matrix object
execute: specified whether to execute the unevaluated operation or just return the script.
"""
check_MLContext()
reset()
outputs = convert_outputs_to_list(outputs)
matrix.script.setScriptString(perform_dfs(outputs, execute))
if not execute:
reset_output_flag(outputs)
return matrix.script.scriptString
results = matrix.ml.execute(matrix.script)
for m in outputs:
m.eval_data = results._java_results.get(m.ID)
reset_output_flag(outputs)
def debug_array_conversion(throwError):
matrix.THROW_ARRAY_CONVERSION_ERROR = throwError
def _get_new_var_id():
matrix.systemmlVarID += 1
return 'mVar' + str(matrix.systemmlVarID)
###############################################################################
class matrix(object):
"""
matrix class is a python wrapper that implements basic matrix operators, matrix functions
as well as converters to common Python types (for example: Numpy arrays, PySpark DataFrame
and Pandas DataFrame).
The operators supported are:
1. Arithmetic operators: +, -, *, /, //, %, ** as well as dot (i.e. matrix multiplication)
2. Indexing in the matrix
3. Relational/Boolean operators: <, <=, >, >=, ==, !=, &, |
In addition, following functions are supported for matrix:
1. transpose
2. Aggregation functions: sum, mean, var, sd, max, min, argmin, argmax, cumsum
3. Global statistical built-In functions: exp, log, abs, sqrt, round, floor, ceil, ceiling, sin, cos, tan, asin, acos, atan, sign, solve
For all the above functions, we always return a two dimensional matrix, especially for aggregation functions with axis.
For example: Assuming m1 is a matrix of (3, n), NumPy returns a 1d vector of dimension (3,) for operation m1.sum(axis=1)
whereas SystemML returns a 2d matrix of dimension (3, 1).
Note: an evaluated matrix contains a data field computed by eval method as DataFrame or NumPy array.
Examples
--------
>>> import SystemML as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
Welcome to Apache SystemML!
>>> m1 = sml.matrix(np.ones((3,3)) + 2)
>>> m2 = sml.matrix(np.ones((3,3)) + 3)
>>> m2 = m1 * (m2 + m1)
>>> m4 = 1.0 - m2
>>> m4
# This matrix (mVar5) is backed by below given PyDML script (which is not yet evaluated). To fetch the data of this matrix, invoke toNumPy() or toDF() or toPandas() methods.
mVar1 = load(" ", format="csv")
mVar2 = load(" ", format="csv")
mVar3 = mVar2 + mVar1
mVar4 = mVar1 * mVar3
mVar5 = 1.0 - mVar4
save(mVar5, " ")
>>> m2.eval()
>>> m2
# This matrix (mVar4) is backed by NumPy array. To fetch the NumPy array, invoke toNumPy() method.
>>> m4
# This matrix (mVar5) is backed by below given PyDML script (which is not yet evaluated). To fetch the data of this matrix, invoke toNumPy() or toDF() or toPandas() methods.
mVar4 = load(" ", format="csv")
mVar5 = 1.0 - mVar4
save(mVar5, " ")
>>> m4.sum(axis=1).toNumPy()
array([[-60.],
[-60.],
[-60.]])
Design Decisions:
1. Until eval() method is invoked, we create an AST (not exposed to the user) that consist of unevaluated operations and data required by those operations.
As an anology, a spark user can treat eval() method similar to calling RDD.persist() followed by RDD.count().
2. The AST consist of two kinds of nodes: either of type matrix or of type DMLOp.
Both these classes expose _visit method, that helps in traversing the AST in DFS manner.
3. A matrix object can either be evaluated or not.
If evaluated, the attribute 'data' is set to one of the supported types (for example: NumPy array or DataFrame). In this case, the attribute 'op' is set to None.
If not evaluated, the attribute 'op' which refers to one of the intermediate node of AST and if of type DMLOp. In this case, the attribute 'data' is set to None.
4. DMLOp has an attribute 'inputs' which contains list of matrix objects or DMLOp.
5. To simplify the traversal, every matrix object is considered immutable and an matrix operations creates a new matrix object.
As an example:
`m1 = sml.matrix(np.ones((3,3)))` creates a matrix object backed by 'data=(np.ones((3,3))'.
`m1 = m1 * 2` will create a new matrix object which is now backed by 'op=DMLOp( ... )' whose input is earlier created matrix object.
6. Left indexing (implemented in __setitem__ method) is a special case, where Python expects the existing object to be mutated.
To ensure the above property, we make deep copy of existing object and point any references to the left-indexed matrix to the newly created object.
Then the left-indexed matrix is set to be backed by DMLOp consisting of following pydml:
left-indexed-matrix = new-deep-copied-matrix
left-indexed-matrix[index] = value
7. Please use m.print_ast() and/or type `m` for debugging. Here is a sample session:
>>> npm = np.ones((3,3))
>>> m1 = sml.matrix(npm + 3)
>>> m2 = sml.matrix(npm + 5)
>>> m3 = m1 + m2
>>> m3
mVar2 = load(" ", format="csv")
mVar1 = load(" ", format="csv")
mVar3 = mVar1 + mVar2
save(mVar3, " ")
>>> m3.print_ast()
- [mVar3] (op).
- [mVar1] (data).
- [mVar2] (data).
"""
# Global variable that is used to keep track of intermediate matrix variables in the DML script
systemmlVarID = 0
# Since joining of string is expensive operation, we collect the set of strings into list and then join
# them before execution: See matrix.script.scriptString = ''.join(matrix.dml) in eval() method
dml = []
# Represents MLContext's script object
script = None
# Represents MLContext object
ml = None
# Contains list of nodes visited in Abstract Syntax Tree. This helps to avoid computation of matrix objects
# that have been previously evaluated.
visited = []
def __init__(self, data, op=None):
"""
Constructs a lazy matrix
Parameters
----------
data: NumPy ndarray, Pandas DataFrame, scipy sparse matrix or PySpark DataFrame. (data cannot be None for external users, 'data=None' is used internally for lazy evaluation).
"""
self.dtype = np.double
check_MLContext()
self.visited = False
self.output = False
self.ID = _get_new_var_id()
self.referenced = []
# op refers to the node of Abstract Syntax Tree created internally for lazy evaluation
self.op = op
self.eval_data = data
self._shape = None
if isinstance(data, SUPPORTED_TYPES):
self._shape = data.shape
if not (isinstance(data, SUPPORTED_TYPES) or hasattr(data, '_jdf') or (data is None and op is not None)):
raise TypeError('Unsupported input type')
def eval(self):
"""
This is a convenience function that calls the global eval method
"""
eval([self])
def toPandas(self):
"""
This is a convenience function that calls the global eval method and then converts the matrix object into Pandas DataFrame.
"""
self.eval()
if isinstance(self.eval_data, py4j.java_gateway.JavaObject):
self.eval_data = _java2py(SparkContext._active_spark_context, self.eval_data)
if isinstance(self.eval_data, Matrix):
self.eval_data = self.eval_data.toNumPy()
self.eval_data = convertToPandasDF(self.eval_data)
return self.eval_data
def toNumPy(self):
"""
This is a convenience function that calls the global eval method and then converts the matrix object into NumPy array.
"""
self.eval()
if isinstance(self.eval_data, py4j.java_gateway.JavaObject):
self.eval_data = _java2py(SparkContext._active_spark_context, self.eval_data)
if isinstance(self.eval_data, Matrix):
self.eval_data = self.eval_data.toNumPy()
return self.eval_data
if isinstance(self.eval_data, pd.DataFrame):
self.eval_data = self.eval_data.as_matrix()
elif isinstance(self.eval_data, DataFrame):
self.eval_data = self.eval_data.toPandas().as_matrix()
elif isinstance(self.eval_data, spmatrix):
self.eval_data = self.eval_data.toarray()
elif isinstance(self.eval_data, Matrix):
self.eval_data = self.eval_data.toNumPy()
# Always keep default format as NumPy array if possible
return self.eval_data
def toDF(self):
"""
This is a convenience function that calls the global eval method and then converts the matrix object into DataFrame.
"""
if isinstance(self.eval_data, DataFrame):
return self.eval_data
if isinstance(self.eval_data, py4j.java_gateway.JavaObject):
self.eval_data = _java2py(SparkContext._active_spark_context, self.eval_data)
if isinstance(self.eval_data, Matrix):
self.eval_data = self.eval_data.toDF()
return self.eval_data
self.eval_data = matrix.sparkSession.createDataFrame(self.toPandas())
return self.eval_data
def save(self, file, format='csv'):
"""
Allows user to save a matrix to filesystem
Parameters
----------
file: filepath
format: can be csv, text or binary or mm
"""
tmp = construct_intermediate_node([self], ['save(', self.ID , ',\"', file, '\", format=\"', format, '\")\n'])
construct_intermediate_node([tmp], [OUTPUT_ID, ' = full(0, rows=1, cols=1)\n']).eval()
def _mark_as_visited(self):
self.visited = True
# for cleanup
matrix.visited = matrix.visited + [ self ]
return self
def _register_as_input(self, execute):
# TODO: Remove this when automatic registration of frame is resolved
matrix.dml = [ self.ID, ' = load(\" \", format=\"csv\")\n'] + matrix.dml
if isinstance(self.eval_data, SUPPORTED_TYPES) and execute:
matrix.script.input(self.ID, convertToMatrixBlock(matrix.sc, self.eval_data))
elif execute:
matrix.script.input(self.ID, self.toDF())
return self
def _register_as_output(self, execute):
# TODO: Remove this when automatic registration of frame is resolved
matrix.dml = matrix.dml + ['save(', self.ID, ', \" \")\n']
if execute:
matrix.script.output(self.ID)
def _visit(self, execute=True):
"""
This function is called for two scenarios:
1. For printing the PyDML script which has not yet been evaluated (execute=False). See '__repr__' method.
2. Called as part of 'eval' method (execute=True). In this scenario, it builds the PyDML script by visiting itself
and its child nodes. Also, it does appropriate registration as input or output that is required by MLContext.
"""
if self.visited:
return self
self._mark_as_visited()
if self.eval_data is not None:
self._register_as_input(execute)
elif self.op is not None:
# Traverse the AST
for m in self.op.inputs:
m._visit(execute=execute)
self.op._visit(execute=execute)
else:
raise Exception('Expected either op or data to be set')
if self.eval_data is None and self.output:
self._register_as_output(execute)
return self
def print_ast(self):
"""
Please use m.print_ast() and/or type `m` for debugging. Here is a sample session:
>>> npm = np.ones((3,3))
>>> m1 = sml.matrix(npm + 3)
>>> m2 = sml.matrix(npm + 5)
>>> m3 = m1 + m2
>>> m3
mVar2 = load(" ", format="csv")
mVar1 = load(" ", format="csv")
mVar3 = mVar1 + mVar2
save(mVar3, " ")
>>> m3.print_ast()
- [mVar3] (op).
- [mVar1] (data).
- [mVar2] (data).
"""
return self._print_ast(0)
def _print_ast(self, numSpaces):
head = ''.join([ ' ' ]*numSpaces + [ '- [', self.ID, '] ' ])
if self.eval_data is not None:
out = head + '(data).\n'
elif self.op is not None:
ret = [ head, '(op).\n' ]
for m in self.op.inputs:
ret = ret + [ m._print_ast(numSpaces + 2) ]
out = ''.join(ret)
else:
raise ValueError('Either op or data needs to be set')
if numSpaces == 0:
print(out)
else:
return out
def __repr__(self):
"""
This function helps to debug matrix class and also examine the generated PyDML script
"""
if self.eval_data is None:
print('# This matrix (' + self.ID + ') is backed by below given PyDML script (which is not yet evaluated). To fetch the data of this matrix, invoke toNumPy() or toDF() or toPandas() methods.\n' + eval([self], execute=False))
else:
print('# This matrix (' + self.ID + ') is backed by ' + str(type(self.eval_data)) + '. To fetch the DataFrame or NumPy array, invoke toDF() or toNumPy() method respectively.')
return ''
######################### NumPy related methods ######################################
__array_priority__ = 10.2
ndim = 2
THROW_ARRAY_CONVERSION_ERROR = False
def __array__(self, dtype=np.double):
"""
As per NumPy from Python,
This method is called to obtain an ndarray object when needed. You should always guarantee this returns an actual ndarray object.
Using this method, you get back a ndarray object, and subsequent operations on the returned ndarray object will be singlenode.
"""
if not isinstance(self.eval_data, SUPPORTED_TYPES):
# Only warn if there is an unevaluated operation (which could potentially generate large matrix or if data is non-supported singlenode formats)
import inspect
frame,filename,line_number,function_name,lines,index = inspect.stack()[1]
msg = 'Conversion from SystemML matrix to NumPy array (occurs in ' + str(filename) + ':' + str(line_number) + ' ' + function_name + ")"
if matrix.THROW_ARRAY_CONVERSION_ERROR:
raise Exception('[ERROR]:' + msg)
else:
print('[WARN]:' + msg)
return np.array(self.toNumPy(), dtype)
def astype(self, t):
# TODO: Throw error if incorrect type
return self
def asfptype(self):
return self
def set_shape(self,shape):
raise NotImplementedError('Reshaping is not implemented')
def get_shape(self):
if self._shape is None:
lhsStr, inputs = _matricize(self, [])
rlen_ID = _get_new_var_id()
clen_ID = _get_new_var_id()
multiline_dml = [rlen_ID, ' = ', lhsStr, '.shape(0)\n']
multiline_dml = multiline_dml + [clen_ID, ' = ', lhsStr, '.shape(1)\n']
multiline_dml = multiline_dml + [OUTPUT_ID, ' = full(0, rows=2, cols=1)\n']
multiline_dml = multiline_dml + [ OUTPUT_ID, '[0,0] = ', rlen_ID, '\n' ]
multiline_dml = multiline_dml + [ OUTPUT_ID, '[1,0] = ', clen_ID, '\n' ]
ret = construct_intermediate_node(inputs, multiline_dml).toNumPy()
self._shape = tuple(np.array(ret, dtype=int).flatten())
return self._shape
shape = property(fget=get_shape, fset=set_shape)
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
"""
This function enables systemml matrix to be compatible with NumPy's ufuncs.
Parameters
----------
func: ufunc object that was called.
method: string indicating which Ufunc method was called (one of "__call__", "reduce", "reduceat", "accumulate", "outer", "inner").
pos: index of self in inputs.
inputs: tuple of the input arguments to the ufunc
kwargs: dictionary containing the optional input arguments of the ufunc.
"""
if method != '__call__' or kwargs:
return NotImplemented
if func in matrix._numpy_to_systeml_mapping:
fn = matrix._numpy_to_systeml_mapping[func]
else:
return NotImplemented
if len(inputs) == 2:
return fn(inputs[0], inputs[1])
elif len(inputs) == 1:
return fn(inputs[0])
else:
raise ValueError('Unsupported number of inputs')
def hstack(self, other):
"""
Stack matrices horizontally (column wise). Invokes cbind internally.
"""
return binaryMatrixFunction(self, other, 'cbind')
def vstack(self, other):
"""
Stack matrices vertically (row wise). Invokes rbind internally.
"""
return binaryMatrixFunction(self, other, 'rbind')
######################### Arithmetic operators ######################################
def negative(self):
lhsStr, inputs = _matricize(self, [])
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = -', lhsStr, '\n'])
def remainder(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = floor(', lhsStr, '/', rhsStr, ') * ', rhsStr, '\n'])
def ldexp(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', lhsStr, '* (2**', rhsStr, ')\n'])
def mod(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', lhsStr, ' - floor(', lhsStr, '/', rhsStr, ') * ', rhsStr, '\n'])
def logaddexp(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = log(exp(', lhsStr, ') + exp(', rhsStr, '))\n'])
def logaddexp2(self, other):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rhsStr, inputs = _matricize(other, inputs)
opStr = _log_base('2**' + lhsStr + '2**' + rhsStr, 2)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', opStr, '\n'])
def log1p(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = log(1 + ', lhsStr, ')\n'])
def exp(self):
return unaryMatrixFunction(self, 'exp')
def exp2(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = 2**', lhsStr, '\n'])
def square(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', lhsStr, '**2\n'])
def reciprocal(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = 1/', lhsStr, '\n'])
def expm1(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = exp(', lhsStr, ') - 1\n'])
def ones_like(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rlen = lhsStr + '.shape(axis=0)'
clen = lhsStr + '.shape(axis=1)'
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = full(1, rows=', rlen, ', cols=', clen, ')\n'])
def zeros_like(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
rlen = lhsStr + '.shape(axis=0)'
clen = lhsStr + '.shape(axis=1)'
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = full(0, rows=', rlen, ', cols=', clen, ')\n'])
def log2(self):
return self.log(2)
def log10(self):
return self.log(10)
def log(self, y=None):
if y is None:
return unaryMatrixFunction(self, 'log')
else:
return binaryMatrixFunction(self, y, 'log')
def abs(self):
return unaryMatrixFunction(self, 'abs')
def sqrt(self):
return unaryMatrixFunction(self, 'sqrt')
def round(self):
return unaryMatrixFunction(self, 'round')
def floor(self):
return unaryMatrixFunction(self, 'floor')
def ceil(self):
return unaryMatrixFunction(self, 'ceil')
def ceiling(self):
return unaryMatrixFunction(self, 'ceiling')
def sin(self):
return unaryMatrixFunction(self, 'sin')
def cos(self):
return unaryMatrixFunction(self, 'cos')
def tan(self):
return unaryMatrixFunction(self, 'tan')
def sinh(self):
return unaryMatrixFunction(self, 'sinh')
def cosh(self):
return unaryMatrixFunction(self, 'cosh')
def tanh(self):
return unaryMatrixFunction(self, 'tanh')
def arcsin(self):
return self.asin()
def arccos(self):
return self.acos()
def arctan(self):
return self.atan()
def asin(self):
return unaryMatrixFunction(self, 'asin')
def acos(self):
return unaryMatrixFunction(self, 'acos')
def atan(self):
return unaryMatrixFunction(self, 'atan')
def rad2deg(self):
"""
Convert angles from radians to degrees.
"""
inputs = []
lhsStr, inputs = _matricize(self, inputs)
# 180/pi = 57.2957795131
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', lhsStr, '*57.2957795131\n'])
def deg2rad(self):
"""
Convert angles from degrees to radians.
"""
inputs = []
lhsStr, inputs = _matricize(self, inputs)
# pi/180 = 0.01745329251
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = ', lhsStr, '*0.01745329251\n'])
def sign(self):
return unaryMatrixFunction(self, 'sign')
def __add__(self, other):
return binary_op(self, other, ' + ')
def __sub__(self, other):
return binary_op(self, other, ' - ')
def __mul__(self, other):
return binary_op(self, other, ' * ')
def __floordiv__(self, other):
return binary_op(self, other, ' // ')
def __div__(self, other):
"""
Performs division (Python 2 way).
"""
return binary_op(self, other, ' / ')
def __truediv__(self, other):
"""
Performs division (Python 3 way).
"""
return binary_op(self, other, ' / ')
def __mod__(self, other):
return binary_op(self, other, ' % ')
def __pow__(self, other):
return binary_op(self, other, ' ** ')
def __radd__(self, other):
return binary_op(other, self, ' + ')
def __rsub__(self, other):
return binary_op(other, self, ' - ')
def __rmul__(self, other):
return binary_op(other, self, ' * ')
def __rfloordiv__(self, other):
return binary_op(other, self, ' // ')
def __rdiv__(self, other):
return binary_op(other, self, ' / ')
def __rtruediv__(self, other):
"""
Performs division (Python 3 way).
"""
return binary_op(other, self, ' / ')
def __rmod__(self, other):
return binary_op(other, self, ' % ')
def __rpow__(self, other):
return binary_op(other, self, ' ** ')
def dot(self, other):
"""
Numpy way of performing matrix multiplication
"""
return binaryMatrixFunction(self, other, 'dot')
def __matmul__(self, other):
"""
Performs matrix multiplication (infix operator: @). See PEP 465)
"""
return binaryMatrixFunction(self, other, 'dot')
######################### Relational/Boolean operators ######################################
def __lt__(self, other):
return binary_op(self, other, ' < ')
def __le__(self, other):
return binary_op(self, other, ' <= ')
def __gt__(self, other):
return binary_op(self, other, ' > ')
def __ge__(self, other):
return binary_op(self, other,' >= ')
def __eq__(self, other):
return binary_op(self, other, ' == ')
def __ne__(self, other):
return binary_op(self, other, ' != ')
# TODO: Cast the output back into scalar and return boolean results
def __and__(self, other):
return binary_op(other, self, ' & ')
def __or__(self, other):
return binary_op(other, self, ' | ')
def logical_not(self):
inputs = []
lhsStr, inputs = _matricize(self, inputs)
return construct_intermediate_node(inputs, [OUTPUT_ID, ' = !', lhsStr, '\n'])
def remove_empty(self, axis=None):
"""
Removes all empty rows or columns from the input matrix target X according to specified axis.
Parameters
----------
axis : int (0 or 1)
"""
if axis is None:
raise ValueError('axis is a mandatory argument for remove_empty')
if axis == 0:
return self._parameterized_helper_fn(self, 'removeEmpty', { 'target':self, 'margin':'rows' })
elif axis == 1:
return self._parameterized_helper_fn(self, 'removeEmpty', { 'target':self, 'margin':'cols' })
else:
raise ValueError('axis for remove_empty needs to be either 0 or 1.')
def replace(self, pattern=None, replacement=None):
"""
Removes all empty rows or columns from the input matrix target X according to specified axis.
Parameters
----------
pattern : float or int
replacement : float or int
"""
if pattern is None or not isinstance(pattern, (float, int)):
raise ValueError('pattern should be of type float or int')
if replacement is None or not isinstance(replacement, (float, int)):
raise ValueError('replacement should be of type float or int')
return self._parameterized_helper_fn(self, 'replace', { 'target':self, 'pattern':pattern, 'replacement':replacement })
def _parameterized_helper_fn(self, fnName, **kwargs):
"""
Helper to invoke parameterized builtin function
"""
dml_script = ''
lhsStr, inputs = _matricize(self, [])
dml_script = [OUTPUT_ID, ' = ', fnName, '(', lhsStr ]
first_arg = True
for key in kwargs:
if first_arg:
first_arg = False
else:
dml_script = dml_script + [ ', ' ]
v = kwargs[key]
if isinstance(v, str):
dml_script = dml_script + [key, '=\"', v, '\"' ]
elif isinstance(v, matrix):
dml_script = dml_script + [key, '=', v.ID]
else:
dml_script = dml_script + [key, '=', str(v) ]
dml_script = dml_script + [ ')\n' ]
return construct_intermediate_node(inputs, dml_script)
######################### Aggregation functions ######################################
def prod(self):
"""
Return the product of all cells in matrix
"""
return self._aggFn('prod', None)
def sum(self, axis=None):
"""
Compute the sum along the specified axis
Parameters
----------
axis : int, optional
"""
return self._aggFn('sum', axis)
def mean(self, axis=None):
"""
Compute the arithmetic mean along the specified axis
Parameters
----------
axis : int, optional
"""
return self._aggFn('mean', axis)
def var(self, axis=None):
"""
Compute the variance along the specified axis.
We assume that delta degree of freedom is 1 (unlike NumPy which assumes ddof=0).
Parameters
----------
axis : int, optional
"""
return self._aggFn('var', axis)
def moment(self, moment=1, axis=None):
"""
Calculates the nth moment about the mean
Parameters
----------
moment : int
can be 1, 2, 3 or 4
axis : int, optional
"""
if moment == 1:
return self.mean(axis)
elif moment == 2:
return self.var(axis)
elif moment == 3 or moment == 4:
return self._moment_helper(moment, axis)
else:
raise ValueError('The specified moment is not supported:' + str(moment))
def _moment_helper(self, k, axis=0):
dml_script = ''
lhsStr, inputs = _matricize(self, [])
dml_script = [OUTPUT_ID, ' = moment(', lhsStr, ', ', str(k), ')\n' ]
dml_script = [OUTPUT_ID, ' = moment(', lhsStr, ', ', str(k), ')\n' ]
if axis is None:
dml_script = [OUTPUT_ID, ' = moment(full(', lhsStr, ', rows=length(', lhsStr, '), cols=1), ', str(k), ')\n' ]
elif axis == 0:
dml_script = [OUTPUT_ID, ' = full(0, rows=nrow(', lhsStr, '), cols=1)\n' ]
dml_script = dml_script + [ 'parfor(i in 1:nrow(', lhsStr, '), check=0):\n' ]
dml_script = dml_script + [ '\t', OUTPUT_ID, '[i-1, 0] = moment(full(', lhsStr, '[i-1,], rows=ncol(', lhsStr, '), cols=1), ', str(k), ')\n\n' ]
elif axis == 1:
dml_script = [OUTPUT_ID, ' = full(0, rows=1, cols=ncol(', lhsStr, '))\n' ]
dml_script = dml_script + [ 'parfor(i in 1:ncol(', lhsStr, '), check=0):\n' ]
dml_script = dml_script + [ '\t', OUTPUT_ID, '[0, i-1] = moment(', lhsStr, '[,i-1], ', str(k), ')\n\n' ]
else:
raise ValueError('Incorrect axis:' + axis)
return construct_intermediate_node(inputs, dml_script)
def sd(self, axis=None):
"""
Compute the standard deviation along the specified axis
Parameters
----------
axis : int, optional
"""
return self._aggFn('sd', axis)
def max(self, other=None, axis=None):
"""
Compute the maximum value along the specified axis
Parameters
----------
other: matrix or numpy array (& other supported types) or scalar
axis : int, optional
"""
if other is not None and axis is not None:
raise ValueError('Both axis and other cannot be not None')
elif other is None and axis is not None:
return self._aggFn('max', axis)
else:
return binaryMatrixFunction(self, other, 'max')
def min(self, other=None, axis=None):
"""
Compute the minimum value along the specified axis
Parameters
----------
other: matrix or numpy array (& other supported types) or scalar
axis : int, optional
"""
if other is not None and axis is not None:
raise ValueError('Both axis and other cannot be not None')
elif other is None and axis is not None:
return self._aggFn('min', axis)
else:
return binaryMatrixFunction(self, other, 'min')
def argmin(self, axis=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
axis : int, optional (only axis=1, i.e. rowIndexMax is supported in this version)
"""
return self._aggFn('argmin', axis)
def argmax(self, axis=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
axis : int, optional (only axis=1, i.e. rowIndexMax is supported in this version)
"""
return self._aggFn('argmax', axis)
def cumsum(self, axis=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
axis : int, optional (only axis=0, i.e. cumsum along the rows is supported in this version)
"""
return self._aggFn('cumsum', axis)
def transpose(self):
"""
Transposes the matrix.
"""
return self._aggFn('transpose', None)
def trace(self):
"""
Return the sum of the cells of the main diagonal square matrix
"""
return self._aggFn('trace', None)
def _aggFn(self, fnName, axis):
"""
Common function that is called for functions that have axis as parameter.
"""
dml_script = ''
lhsStr, inputs = _matricize(self, [])
if axis is None:
dml_script = [OUTPUT_ID, ' = ', fnName, '(', lhsStr, ')\n']
else:
dml_script = [OUTPUT_ID, ' = ', fnName, '(', lhsStr, ', axis=', str(axis) ,')\n']
return construct_intermediate_node(inputs, dml_script)
######################### Indexing operators ######################################
def __getitem__(self, index):
"""
Implements evaluation of right indexing operations such as m[1,1], m[0:1,], m[:, 0:1]
"""
return construct_intermediate_node([self], [OUTPUT_ID, ' = ', self.ID ] + getIndexingDML(index) + [ '\n' ])
# Performs deep copy if the matrix is backed by data
def _prepareForInPlaceUpdate(self):
temp = matrix(self.eval_data, op=self.op)
for op in self.referenced:
op.inputs = [temp if x.ID==self.ID else x for x in op.inputs]
self.ID, temp.ID = temp.ID, self.ID # Copy even the IDs as the IDs might be used to create DML
self.op = DMLOp([temp], dml=[self.ID, " = ", temp.ID])
self.eval_data = None
temp.referenced = self.referenced + [ self.op ]
self.referenced = []
def __setitem__(self, index, value):
"""
Implements evaluation of left indexing operations such as m[1,1]=2
"""
self._prepareForInPlaceUpdate()
if isinstance(value, matrix) or isinstance(value, DMLOp):
self.op.inputs = self.op.inputs + [ value ]
if isinstance(value, matrix):
value.referenced = value.referenced + [ self.op ]
self.op.dml = self.op.dml + [ '\n', self.ID ] + getIndexingDML(index) + [ ' = ', getValue(value), '\n']
# Not implemented: conj, hyperbolic/inverse-hyperbolic functions(i.e. sinh, arcsinh, cosh, ...), bitwise operator, xor operator, isreal, iscomplex, isfinite, isinf, isnan, copysign, nextafter, modf, frexp, trunc
_numpy_to_systeml_mapping = {np.add: __add__, np.subtract: __sub__, np.multiply: __mul__, np.divide: __div__, np.logaddexp: logaddexp, np.true_divide: __truediv__, np.floor_divide: __floordiv__, np.negative: negative, np.power: __pow__, np.remainder: remainder, np.mod: mod, np.fmod: __mod__, np.absolute: abs, np.rint: round, np.sign: sign, np.exp: exp, np.exp2: exp2, np.log: log, np.log2: log2, np.log10: log10, np.expm1: expm1, np.log1p: log1p, np.sqrt: sqrt, np.square: square, np.reciprocal: reciprocal, np.ones_like: ones_like, np.zeros_like: zeros_like, np.sin: sin, np.cos: cos, np.tan: tan, np.arcsin: arcsin, np.arccos: arccos, np.arctan: arctan, np.deg2rad: deg2rad, np.rad2deg: rad2deg, np.greater: __gt__, np.greater_equal: __ge__, np.less: __lt__, np.less_equal: __le__, np.not_equal: __ne__, np.equal: __eq__, np.logical_not: logical_not, np.logical_and: __and__, np.logical_or: __or__, np.maximum: max, np.minimum: min, np.signbit: sign, np.ldexp: ldexp, np.dot:dot}
| apache-2.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/neighbors/approximate.py | 30 | 22370 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| gpl-2.0 |
draperjames/bokeh | examples/app/movies/main.py | 13 | 4260 | from os.path import dirname, join
import numpy as np
import pandas.io.sql as psql
import sqlite3 as sql
from bokeh.plotting import figure
from bokeh.layouts import layout, widgetbox
from bokeh.models import ColumnDataSource, HoverTool, Div
from bokeh.models.widgets import Slider, Select, TextInput
from bokeh.io import curdoc
from bokeh.sampledata.movies_data import movie_path
conn = sql.connect(movie_path)
query = open(join(dirname(__file__), 'query.sql')).read()
movies = psql.read_sql(query, conn)
movies["color"] = np.where(movies["Oscars"] > 0, "orange", "grey")
movies["alpha"] = np.where(movies["Oscars"] > 0, 0.9, 0.25)
movies.fillna(0, inplace=True) # just replace missing values with zero
movies["revenue"] = movies.BoxOffice.apply(lambda x: '{:,d}'.format(int(x)))
with open(join(dirname(__file__), "razzies-clean.csv")) as f:
razzies = f.read().splitlines()
movies.loc[movies.imdbID.isin(razzies), "color"] = "purple"
movies.loc[movies.imdbID.isin(razzies), "alpha"] = 0.9
axis_map = {
"Tomato Meter": "Meter",
"Numeric Rating": "numericRating",
"Number of Reviews": "Reviews",
"Box Office (dollars)": "BoxOffice",
"Length (minutes)": "Runtime",
"Year": "Year",
}
desc = Div(text=open(join(dirname(__file__), "description.html")).read(), width=800)
# Create Input controls
reviews = Slider(title="Minimum number of reviews", value=80, start=10, end=300, step=10)
min_year = Slider(title="Year released", start=1940, end=2014, value=1970, step=1)
max_year = Slider(title="End Year released", start=1940, end=2014, value=2014, step=1)
oscars = Slider(title="Minimum number of Oscar wins", start=0, end=4, value=0, step=1)
boxoffice = Slider(title="Dollars at Box Office (millions)", start=0, end=800, value=0, step=1)
genre = Select(title="Genre", value="All",
options=open(join(dirname(__file__), 'genres.txt')).read().split())
director = TextInput(title="Director name contains")
cast = TextInput(title="Cast names contains")
x_axis = Select(title="X Axis", options=sorted(axis_map.keys()), value="Tomato Meter")
y_axis = Select(title="Y Axis", options=sorted(axis_map.keys()), value="Number of Reviews")
# Create Column Data Source that will be used by the plot
source = ColumnDataSource(data=dict(x=[], y=[], color=[], title=[], year=[], revenue=[], alpha=[]))
hover = HoverTool(tooltips=[
("Title", "@title"),
("Year", "@year"),
("$", "@revenue")
])
p = figure(plot_height=600, plot_width=700, title="", toolbar_location=None, tools=[hover])
p.circle(x="x", y="y", source=source, size=7, color="color", line_color=None, fill_alpha="alpha")
def select_movies():
genre_val = genre.value
director_val = director.value.strip()
cast_val = cast.value.strip()
selected = movies[
(movies.Reviews >= reviews.value) &
(movies.BoxOffice >= (boxoffice.value * 1e6)) &
(movies.Year >= min_year.value) &
(movies.Year <= max_year.value) &
(movies.Oscars >= oscars.value)
]
if (genre_val != "All"):
selected = selected[selected.Genre.str.contains(genre_val)==True]
if (director_val != ""):
selected = selected[selected.Director.str.contains(director_val)==True]
if (cast_val != ""):
selected = selected[selected.Cast.str.contains(cast_val)==True]
return selected
def update():
df = select_movies()
x_name = axis_map[x_axis.value]
y_name = axis_map[y_axis.value]
p.xaxis.axis_label = x_axis.value
p.yaxis.axis_label = y_axis.value
p.title.text = "%d movies selected" % len(df)
source.data = dict(
x=df[x_name],
y=df[y_name],
color=df["color"],
title=df["Title"],
year=df["Year"],
revenue=df["revenue"],
alpha=df["alpha"],
)
controls = [reviews, boxoffice, genre, min_year, max_year, oscars, director, cast, x_axis, y_axis]
for control in controls:
control.on_change('value', lambda attr, old, new: update())
sizing_mode = 'fixed' # 'scale_width' also looks nice with this example
inputs = widgetbox(*controls, sizing_mode=sizing_mode)
l = layout([
[desc],
[inputs, p],
], sizing_mode=sizing_mode)
update() # initial load of the data
curdoc().add_root(l)
curdoc().title = "Movies"
| bsd-3-clause |
Myasuka/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
marcsans/cf-cold-start | src/utils.py | 1 | 2279 | from sklearn.utils import as_float_array
from scipy.sparse import csr_matrix
from sklearn.metrics import mean_squared_error
import numpy as np
import pandas as pd
def sparse_matrix_input(X):
X = as_float_array(X)[:,:3]
return X
def sparse_matrix(X,n,p,w=1,names=['row','col','val']):
#X = sparse_matrix_input(X)
R = csr_matrix((w*X[names[2]], (X[names[0]],X[names[1]])), shape=(n,p))
return R
def RMSE(R_true,R_pred,W=None):
if W is None:
W = R_true.nonzero()
if 'sparse' in str(type(R_true)):
return mean_squared_error(np.array(R_true[W])[0],R_pred[W])**.5
else:
return mean_squared_error(R_true[W],R_pred[W])**.5
def sparseMatrix(data,k,n,p,include=False,names=['user','item','rating']):
# if include = True we take only cv=k, ortherwise we only exclude cv=k
if include:
R = csr_matrix((data[names[2]][data['cv']==k],
(data[names[0]][data['cv']==k],
data[names[1]][data['cv']==k])),
shape=(n,p))
else:
R = csr_matrix((data[names[2]][data['cv']!=k],
(data[names[0]][data['cv']!=k],
data[names[1]][data['cv']!=k])),
shape=(n,p))
return R
# to delete
def getLine(perf,model,param,cv):
try: # if the line exists
out = perf[(perf['model']==model) & (perf['params']==param) &
(perf['crossval']==cv)].index[0]
except IndexError: # create a new line
try: # the dataset is not empty
out = max(perf.index)+1
except ValueError:
out = 0
return out
def getLine_fromdict(perf,Dict):
tmp = pd.DataFrame(columns=list(Dict.keys()))
tmp.loc[0,list(Dict.keys())] = list(Dict.values())
tmp = pd.merge(perf,tmp)
try: # the dataset is not empty
out = max(perf.index)+1
except ValueError:
out = 0
return out
def extract_year(x):
try:
return int(x.split('-')[2])
except AttributeError:
return -1
def argmax(x):
p = np.argwhere(x==np.max(x))
return p[np.random.randint(len(p))][0]
def argmin(x):
p = np.argwhere(x==np.min(x))
return p[np.random.randint(len(p))][0] | mit |
JCHappytime/MyQuantopian | yahoo.py | 3 | 3419 | #!/usr/bin/env python
LICENSE="""
Copyright (C) 2011 Michael Ihde
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import os
import datetime
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.ticker as ticker
import matplotlib.dates as dates
from pycommando.commando import command
from utils.YahooQuote import *
@command("db_ls")
def list():
"""
Lists the symbols that are in the database
"""
return Market().cache.symbols()
@command("db_up")
def update(symbol):
"""
Updates the historical daily prices for all stocks
currently in the database.
"""
market = Market()
try:
ticker = market[symbol]
if ticker != None:
ticker.updateHistory()
except IndexError:
market.updateHistory()
@command("db_flush")
def flush():
"""
Completely removes the yahoo cache
"""
os.remove(YahooQuote.CACHE)
Market()._dbInit()
@command("db_load")
def load(symbol=None):
"""
Load's historical prices for a given ticker or index
symbol from 1950 until today. This may take a long time,
especially if you don't provide a symbol because it will
cache all major indexes from 1950 until today.
"""
market = Market()
if symbol == None:
market.fetchHistory()
else:
ticker = market[symbol]
if ticker != None:
ticker.fetchHistory()
@command("db_fetch")
def fetch(symbol, start="today", end="today"):
"""
Prints the daily price for the stock on a given day.
"""
if start.upper() == "TODAY":
day_start = datetime.date.today()
else:
day_start = datetime.datetime.strptime(start, "%Y-%m-%d")
day_start = (day_start.year * 10000) + (day_start.month * 100) + day_start.day
if end.upper() == "TODAY":
day_end = None
else:
day_end = datetime.datetime.strptime(end, "%Y-%m-%d")
day_end = (day_end.year * 10000) + (day_end.month * 100) + day_end.day
ticker = Market()[symbol]
if ticker != None:
if day_end == None:
return ticker[day_start]
else:
return ticker[day_start:day_end]
@command("db_plot")
def plot(symbol, start, end):
"""
Prints the daily price for the stock on a given day.
"""
quotes = fetch(symbol, start, end)
x_data = [QuoteDate(q.date).toDateTime() for q in quotes]
y_data = [q.adjclose for q in quotes]
fig = plt.figure()
fig.canvas.set_window_title("%s %s-%s" % (symbol, start, end))
sp = fig.add_subplot(111)
sp.plot(x_data, y_data, '-')
x_locator = dates.AutoDateLocator()
sp.xaxis.set_major_locator(x_locator)
sp.xaxis.set_major_formatter(dates.AutoDateFormatter(x_locator))
fig.autofmt_xdate()
fig.show()
| gpl-2.0 |
chemreac/chemreac | examples/robertson.py | 2 | 3609 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The fruit fly of stiff numerical chemical kinetics problems.
$ python robertson.py -A 1.0 -B 1e-20 -C 1e-20 --t0 0 --plot --tend 3e10 --nt \
1024 --logt --logy --verbose
"""
from __future__ import (absolute_import, division, print_function)
from math import log10
import numpy as np
from chemreac import ReactionDiffusion
from chemreac.chemistry import Reaction, ReactionSystem
from chemreac.integrate import run
from chemreac.util.analysis import suggest_t0
from chemreac.util.plotting import (
plot_C_vs_t, save_and_or_show_plot, plot_faded_time
)
def get_reactions(rates):
"""
A -> B
B + C -> A + C
B + B -> C
"""
return (
Reaction({'A': 1}, {'B': 1}, rates[0]),
Reaction({'B': 1, 'C': 1}, {'A': 1, 'C': 1}, rates[1]),
Reaction({'B': 2}, {'B': 1, 'C': 1}, rates[2])
)
def integrate_rd(tend=1e2, A0=1.0, B0=0.0, C0=0.0, k1=0.04, k2=1e4, k3=3e7,
t0=1e2, nt=100, N=1, nstencil=3, logt=False, logy=False,
plot=False, savefig='None', verbose=False, dump_expr='False',
use_chempy=False, D=2e-3):
if N == 1:
init_conc = (A0, B0, C0)
else:
init_conc = np.tile((A0, B0, C0), (N, 1))
init_conc /= np.linspace(1, 2, N).reshape((N, 1))**.5
rsys = ReactionSystem(get_reactions((k1, k2, k3)), 'ABC')
if verbose:
print([str(_) for _ in rsys.rxns])
if use_chempy:
from chempy.kinetics.ode import get_odesys
odesys = get_odesys(rsys, include_params=True)
if N != 1:
raise ValueError("ChemPy does not support diffusion")
odesys.integrate(np.logspace(log10(t0), log10(tend)), init_conc)
if plot:
odesys.plot_result(xscale='log', yscale='log')
result = None
else:
rd = ReactionDiffusion.from_ReactionSystem(
rsys, N=N, nstencil=1 if N == 1 else nstencil, logt=logt,
logy=logy, D=[D/2, D/3, D/5])
if dump_expr.lower() not in ('false', '0'):
from chemreac.symbolic import SymRD
import sympy as sp
cb = {'latex': sp.latex,
'ccode': sp.ccode}.get(dump_expr.lower(), str)
srd = SymRD.from_rd(rd, k=sp.symbols('k:3'))
print('dydx:')
print('\n'.join(map(cb, srd._f)))
print('jac:')
for ri, row in enumerate(srd.jacobian.tolist()):
for ci, expr in enumerate(row):
if expr == 0:
continue
print(ri, ci, cb(expr))
return None
if t0 == 0 and logt:
t0 = 1e-3*suggest_t0(rd, init_conc)
if verbose:
print("Using t0 = %12.5g" % t0)
t = np.logspace(np.log10(t0), np.log10(tend), nt)
print(t[0], t[-1])
integr = run(rd, init_conc, t)
if verbose:
import pprint
pprint.pprint(integr.info)
if plot:
if N == 1:
plot_C_vs_t(integr, xscale='log', yscale='log')
else:
import matplotlib.pyplot as plt
for idx, name in enumerate('ABC', 1):
plt.subplot(1, 3, idx)
rgb = [.5, .5, .5]
rgb[idx-1] = 1
plot_faded_time(integr, name, rgb=rgb, log_color=True)
result = integr
if plot:
save_and_or_show_plot(savefig=savefig)
return result
if __name__ == '__main__':
import argh
argh.dispatch_command(integrate_rd)
| bsd-2-clause |
springcoil/Data-Science-45min-Intros | support-vector-machines-101/svm-example.py | 26 | 2219 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
import sys
import pandas as pd
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.svm import SVC
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError as e:
sys.stderr.write("seaborn not installed. Using default matplotlib templates.")
# cobbled together from refs:
# http://scikit-learn.org/stable/auto_examples/svm/plot_iris.html
# http://scikit-learn.org/stable/auto_examples/svm/plot_separating_hyperplane.html
if len(sys.argv) > 1:
samples = int( sys.argv[1] )
c_std=2.0
else:
samples = 10
c_std=1.0
X, y = make_blobs(n_samples=samples, cluster_std=c_std, centers=2)
# make a plotting grid
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# svm
clf = SVC(kernel='linear').fit(X, y)
# predict all points in grid
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# separating plane and margins
w = clf.coef_[0]
a = -w[0] / w[1]
xxx = np.linspace(x_min, x_max)
yyy = a * xxx - (clf.intercept_[0]) / w[1]
# calculate the large margin boundaries defined by the support vectors
b = clf.support_vectors_[0]
yyy_down = a * xxx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yyy_up = a * xxx + (b[1] - a * b[0])
# plot margins
plt.figure(figsize=(8,6))
plt.plot(xxx, yyy, 'k-', linewidth=1)
plt.plot(xxx, yyy_down, 'k--', linewidth=1)
plt.plot(xxx, yyy_up, 'k--', linewidth=1)
# plot decision contours
Z = Z.reshape(xx.shape)
#plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.contourf(xx, yy, Z, alpha=0.25)
# plot data
plt.scatter(X[:, 0], X[:, 1],
s=100,
c=y,
alpha=0.8,
cmap=plt.cm.Paired
)
# plot support vectors
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=300,
facecolors='none'
)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xlabel('x')
plt.ylabel('y')
# SHOW ALL THE THINGS
plt.show()
| unlicense |
marcocaccin/scikit-learn | sklearn/ensemble/voting_classifier.py | 6 | 8038 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
MaxParsons/amo-physics | liexperiment/raman/rateequation_twolevel_1d_harmonic.py | 1 | 10739 | '''
Created on Dec 5, 2014
@author: Max
'''
import numpy as np
import matplotlib.pyplot as plt
from amo.core import odesolver
from amo.core import physicalconstants
from liexperiment.traps import harmonicoscillator3d
c = physicalconstants.PhysicalConstantsSI
Er = c.h * 75.0e3
class RamanSimulation(object):
def __init__(self):
# SI units everywhere
# experiment parameters
self.f_trap = 0.65e6 # trap frequency
self.detuning = -self.f_trap
self.cutoff_state = 8.0 # first state to tunnel, indexed from 0
self.pi_time = 10.0e-6 # raman carrier pi-time
self.pump_time = 10.0e-6 # pumping time
self.photons_per_raman = 2.0 # recoil heating for fudge factor
self.duration = 0.5e-3 # how long is the raman cooling
self.numsteps = 500 # how many points for the simulation
# constants
self.f_recoil = 75.0e3
self.f_anharmonicity = 26.0e3
# calculated from experiment parameters
@property
def cutoff_state(self):
return self._cutoff_state
@cutoff_state.setter
def cutoff_state(self, value):
self._cutoff_state = value
self.init_populations = np.zeros((self._cutoff_state + 1,))
self.transition_matrix = np.zeros((self._cutoff_state + 1, self._cutoff_state + 1))
@property
def eta_0(self):
return np.sqrt(self.f_recoil / self.f_trap)
@property
def eta(self):
return 0.19 * np.sqrt(1.0e6 / self.f_trap)
@property
def eps(self):
return self.photons_per_raman * self.eta_0 ** 2
def lineshape(self, detuning, raman_rabi, decay_rate):
s0 = 2.0 * raman_rabi ** 2 / decay_rate ** 2
return decay_rate / 2.0 * s0 / (1 + s0 + (2.0 * detuning / decay_rate) ** 2)
def sideband_frequencies(self, n0, nf):
if n0 < nf:
return self.f_trap * (nf - n0) - np.sign(nf - n0) * self.f_anharmonicity * np.sum(np.arange(n0, nf))
else:
return self.f_trap * (nf - n0) - np.sign(nf - n0) * self.f_anharmonicity * np.sum(np.arange(nf, n0))
def transition_rate_neighbor_only(self, n0, nrate):
decay_rate = 2.0 * np.pi / self.pump_time
if nrate == self._cutoff_state:
if (nrate - n0) == 1: # heating from state below
up_rabi = np.sqrt(n0 + 1) * self.eta * np.pi / self.pi_time
stay_rabi = np.pi / self.pi_time
f_heat = self.sideband_frequencies(n0, n0 + 1)
return (1 + self.eps) * (self.lineshape(self.detuning - f_heat, up_rabi, decay_rate) + self.eta ** 2 * self.lineshape(self.detuning, stay_rabi, decay_rate))
elif nrate == n0:
return 0.0
else:
return 0.0
elif nrate == self._cutoff_state - 1:
if (nrate - n0) == 1:
up_rabi = np.sqrt(n0 + 1) * self.eta * np.pi / self.pi_time
stay_rabi = np.pi / self.pi_time
f_heat = self.sideband_frequencies(n0 , n0 + 1)
return (1 + self.eps) * (self.lineshape(self.detuning - f_heat, up_rabi, decay_rate) + self.eta_0 ** 2 * self.lineshape(self.detuning, stay_rabi, decay_rate))
elif (nrate - n0) == -1:
return 0.0
elif nrate == n0:
down_rabi = np.sqrt(n0) * self.eta * np.pi / self.pi_time
up_rabi = np.sqrt(n0 + 1) * self.eta * np.pi / self.pi_time
stay_rabi = np.pi / self.pi_time
f_heat = self.sideband_frequencies(n0, n0 + 1)
f_cool = self.sideband_frequencies(n0, n0 - 1)
return (1 + self.eps) * (-self.lineshape(self.detuning - f_heat, up_rabi, decay_rate) - self.eta_0 ** 2 * self.lineshape(self.detuning, stay_rabi, decay_rate)) + \
(1 - self.eps) * (-self.lineshape(self.detuning - f_cool, down_rabi, decay_rate) - self.eta_0 ** 2 * self.lineshape(self.detuning, stay_rabi, decay_rate))
else:
return 0.0
else:
if (nrate - n0) == 1:
up_rabi = np.sqrt(n0 + 1) * self.eta * np.pi / self.pi_time
stay_rabi = np.pi / self.pi_time
f_heat = self.sideband_frequencies(n0 , n0 + 1)
return (1 + self.eps) * (self.lineshape(self.detuning - f_heat, up_rabi, decay_rate) + self.eta_0 ** 2 * self.lineshape(self.detuning, stay_rabi, decay_rate))
elif (nrate - n0) == -1:
down_rabi = np.sqrt(n0) * self.eta * np.pi / self.pi_time
stay_rabi = np.pi / self.pi_time
f_cool = self.sideband_frequencies(n0, n0 - 1)
return (1 - self.eps) * (self.lineshape(self.detuning - f_cool, down_rabi, decay_rate) + self.eta_0 ** 2 * self.lineshape(self.detuning, stay_rabi, decay_rate))
elif nrate == n0:
down_rabi = np.sqrt(n0) * self.eta * np.pi / self.pi_time
up_rabi = np.sqrt(n0 + 1) * self.eta * np.pi / self.pi_time
stay_rabi = np.pi / self.pi_time
f_heat = self.sideband_frequencies(n0, n0 + 1)
f_cool = self.sideband_frequencies(n0, n0 - 1)
if n0 == 0:
return (1 + self.eps) * (-self.lineshape(self.detuning - f_heat, up_rabi, decay_rate) - self.eta_0 ** 2 * self.lineshape(self.detuning, stay_rabi, decay_rate))
else:
return (1 + self.eps) * (-self.lineshape(self.detuning - f_heat, up_rabi, decay_rate) - self.eta_0 ** 2 * self.lineshape(self.detuning, stay_rabi, decay_rate)) + \
(1 - self.eps) * (-self.lineshape(self.detuning - f_cool, down_rabi, decay_rate) - self.eta_0 ** 2 * self.lineshape(self.detuning, stay_rabi, decay_rate))
else:
return 0.0
def set_transition_matrix(self):
for n0 in np.arange(0, len(self.init_populations)):
for nrate in np.arange(0, len(self.init_populations)):
self.transition_matrix[nrate, n0] = self.transition_rate_neighbor_only(n0, nrate)
def set_mean_phonon_number(self):
numbers = np.arange(0, self._cutoff_state + 1)
self.mean_phonon_number = np.dot(self.populations, numbers)
def simulate_cooling(self, isPlot=False):
self.set_transition_matrix()
# print np.sum(self.transition_matrix, 0)
r = odesolver.rateequation(self.transition_matrix, self.init_populations)
r.solve(self.duration, numsteps=self.numsteps)
self.populations = r.result.populations
self.times = r.result.t
self.set_mean_phonon_number()
if isPlot:
r.result.plot()
def set_default_simulation_parameters(sim):
sim.f_trap = 0.65e6 # trap frequency
sim.f_recoil = 75e3
sim.f_anharmonicity = 3.5e3
sim.cutoff_state = 15.0 # first state to tunnel, indexed from 0
trap_frequencies = np.array([sim.f_trap, 1000.0e3, 1300.0e3])
cutoffs = np.array([sim._cutoff_state, 8, 11])
sim.detuning = -sim.f_trap
sim.pi_time = 2.0e-6 # raman carrier pi-time
sim.pump_time = 10.0e-6 # pumping time
sim.photons_per_raman = 2.0 # recoil heating for fudge factor
sim.duration = 0.2e-3 # how long is the raman cooling
sim.numsteps = 100 # how many points for the simulation
if __name__ == "__main__":
figmean, axmean = plt.subplots(1, 1)
figgnd, axgnd = plt.subplots(1, 1)
figlost, axlost = plt.subplots(1, 1)
def detuningscan():
sim = RamanSimulation()
average_energy = 3.0 * Er
detunings = np.linspace(-1.5*sim.f_trap, 0.5*sim.f_trap, 20)
print detunings.shape
sample_idx = sim.numsteps - 1
for delta in detunings:
sim.detuning = delta
lat = harmonicoscillator3d.harmonicoscillator3d(trap_frequencies, cutoffs)
temperature = lat.temperature(average_energy)
sim.init_populations[0:-1] = lat.populations_sum_over_all_but_first_frequency(temperature)
sim.simulate_cooling(isPlot=False)
axmean.plot(delta/1.0e3, sim.mean_phonon_number[sample_idx], marker = "o", linestyle = "None", color = 'b')
print sim.mean_phonon_number[sample_idx]
axmean.set_title("Mean phonon number vs. Detuning")
axmean.set_ylabel("Mean phonon number")
axmean.set_xlabel("Detuning from carrier (kHz)")
axgnd.plot(delta/1.0e3, sim.populations[sample_idx, 0], marker = "o", linestyle = "None", color = 'b')
axgnd.set_title("Ground state fraction vs. Detuning")
axgnd.set_ylabel("Ground state fraction")
axgnd.set_xlabel("Detuning from carrier (kHz)")
axlost.plot(delta/1.0e3, sim.populations[sample_idx, sim._cutoff_state], marker = "o", linestyle = "None", color = 'b')
axlost.set_title("Fraction lost vs. Detuning")
axlost.set_ylabel("Fraction lost")
axlost.set_xlabel("Detuning from carrier (kHz)")
def durationscan():
sim = RamanSimulation()
average_energy = 3.0 * Er
durations = np.linspace(0.05*sim.pi_time, 50*sim.pi_time, 20)
sample_idx = sim.numsteps - 1
for duration in durations:
sim.duration = duration
lat = harmonicoscillator3d.harmonicoscillator3d(durations, cutoffs)
temperature = lat.temperature(average_energy)
sim.init_populations[0:-1] = lat.populations_sum_over_all_but_first_frequency(temperature)
sim.simulate_cooling(isPlot=False)
axmean.plot(duration*1.0e6, sim.mean_phonon_number[sample_idx], marker = "o", linestyle = "None", color = 'b')
print sim.mean_phonon_number[sample_idx]
axmean.set_title("duration vs. Detuning")
axmean.set_ylabel("Mean phonon number")
axmean.set_xlabel("Duration (us)")
axgnd.plot(duration*1.0e6, sim.populations[sample_idx, 0], marker = "o", linestyle = "None", color = 'b')
axgnd.set_title("Ground state fraction vs. Detuning")
axgnd.set_ylabel("Ground state fraction")
axgnd.set_xlabel("Duration (us)")
axlost.plot(duration*1.0e6, sim.populations[sample_idx, sim._cutoff_state], marker = "o", linestyle = "None", color = 'b')
axlost.set_title("Fraction lost vs. Duration")
axlost.set_ylabel("Fraction lost")
axlost.set_xlabel("Duration (us)")
durationscan()
plt.show()
| mit |
abhishekkrthakur/tensorLDA | tensorLDA.py | 1 | 11311 | # Implementation of Tensor LDA for object detection in images
# Author: Abhishek Thakur
# Based on paper by C. Bauckhage and J.K. Tsotsos
from scipy.linalg import sqrtm, inv
import numpy as np
import random
from PIL import Image
import glob
import os
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
import matplotlib.cm as cm
from skimage.feature import match_template
from sklearn import preprocessing
from numpy import *
from numpy.random import randint,random_integers
from numpy.core.umath_tests import inner1d
from scipy.optimize import minimize, rosen, rosen_der
import scipy
from scipy.signal import convolve2d, correlate2d
from PIL import Image, ImageDraw
from scipy.ndimage.filters import gaussian_filter
from matplotlib.patches import Rectangle
from numpy.lib.stride_tricks import as_strided as ast
def Gram_Schmidt(vecs, row_wise_storage=True, tol=1E-10):
vecs = asarray(vecs) # transform to array if list of vectors
if row_wise_storage:
A = transpose(vecs).copy()
else:
A = vecs.copy()
m, n = A.shape
V = zeros((m,n))
for j in xrange(n):
v0 = A[:,j]
v = v0.copy()
for i in xrange(j):
vi = V[:,i]
if (abs(vi) > tol).any():
v -= (vdot(v0,vi)/vdot(vi,vi))*vi
V[:,j] = v
return transpose(V) if row_wise_storage else V
def get_trainingdata():
path = '/Users/abhishek/Documents/workspace/tensor_LDA/Train/'
datapath = '/Users/abhishek/Documents/workspace/tensor_LDA/Train/*.pgm'
n = len(glob.glob(datapath))
traindata = np.empty((n,2511))
labels = np.empty(n)
tot_count = 0
count_p = 0
count_n = 0
for infile in glob.glob( os.path.join(path, '*.pgm') ):
lbl_str = infile[57:58]
#print lbl_str
img = Image.open(infile)
img = np.asarray(img)
img = np.hstack(img)
traindata[tot_count] = img
if (lbl_str=='P'):
labels[tot_count] = 1
count_p += 1
else:
labels[tot_count] = -1
count_n += 1
print tot_count
tot_count += 1
for i in range(n):
if (labels[i] == 1):
labels[i] /= count_p
else:
labels[i] /= count_n
traindata.dump('train_data_new.dat')
labels.dump('labels_new.dat')
def get_testdata():
path = '/Users/abhishek/Documents/workspace/tensor_LDA/Test/'
datapath = '/Users/abhishek/Documents/workspace/tensor_LDA/Test/*.PGM'
n = len(glob.glob(datapath))
traindata = np.empty((n,24150))
tot_count = 0
for infile in glob.glob( os.path.join(path, '*.PGM') ):
img = Image.open(infile)
img = np.asarray(img)
img = np.hstack(img)
traindata[tot_count] = img
print tot_count
tot_count += 1
traindata.dump('test_data.dat')
def createLabels(trainDir,n):
labels = np.empty((n,1))
for i in range(n):
print trainDir
def calc_contraction_u(traindata, u):
n = len(traindata)
contraction_data = np.empty((n,81))
for i in range(n):
img = np.reshape(traindata[i], (31,81))
#print img.shape, u.shape
#plt.imshow(img)
#plt.show()
#print img
contraction_data[i] = np.dot(u,img)
#print contraction_data[i].shape
#print contraction_data.shape
return contraction_data
def calc_contraction_v(traindata, v):
n = len(traindata)
contraction_data = np.empty((n,31))
for i in range(n):
img = np.reshape(traindata[i], (31,81))
#print img.shape, u.shape
#print i
temp = np.dot(img,v.T)
contraction_data[i] = temp.T
return contraction_data
def dist(x,y):
return np.sqrt(np.sum((x-y)**2))
def normalize(arr):
arr=arr.astype('float32')
if arr.max() > 1.0:
arr/=255.0
return arr
def qr_mgs( A ):
"""QR decomposition of A.
Modified Gram-Schmidt algorithm, row version (Bjorck alg. 2.1)"""
A = np.array(A, dtype=float)
m,n = A.shape
Q = np.zeros( (m,n) )
R = np.zeros( (n,n) )
for k in range( 0, n ) :
R[k,k] = np.linalg.norm( A[:,k] )
Q[:,k] = A[:,k] / R[k,k]
for j in range( k+1, n ) :
R[k,j] = np.dot( Q[:,k], A[:,j] )
A[:,j] = A[:,j] - Q[:,k] * R[k,j]
return Q,R
def rho_1(traindata, labels, eps,k):
#u_init = random_integers(-10,10,size=(1,31)) # change range to R
u_init =np.random.rand(1,31)
#v_init = random_integers(0,256,size=(1,81)) # change range to R
st_u = u_init
t = 0
dist2 = 0
while True:
t = t + 1
cont_u = calc_contraction_u(traindata,u_init)
v1 = inv(np.dot(cont_u.T,cont_u))
v2 = np.dot(cont_u.T,labels)
v_temp = np.dot(v1,v2)
#v_temp = normalize(u_temp)
vp = v_temp
cont_v = calc_contraction_v(traindata,v_temp)
u1 = inv(np.dot(cont_v.T,cont_v))
u2 = np.dot(cont_v.T,labels)
u_temp = np.dot(u1,u2)
dist1 = np.linalg.norm(u_temp - u_init)
up = u_temp
#t_st_u = np.vstack((st_u,u_temp))
#u_gs = Gram_Schmidt(t_st_u)
#print st_u.shape
#u_temp =u_gs[-1]
u_temp = normalize(u_temp)
#st_u = np.vstack((st_u,u))
up = u_temp
if(abs(dist2 - dist1) < eps):
break
u_init = u_temp
print t, abs(dist2-dist1)
dist2 = dist1
#print u_temp.shape, v_temp.shape
u_temp = up
v_temp = vp
x = rho_R(u_temp,v_temp,traindata,labels,eps,k)
return x
def rho_R(u,v,traindata,labels,eps,k):
p=u
q=v
st_u = p
st_v = q
u_init = u
print st_u
print st_v
for i in range(k-1):
t = 0
# change range to R
#u_init = random_integers(-10,10,size=(1,31))
u_init =np.random.rand(1,31)
u_init = Gram_Schmidt(np.vstack((st_u,u_init)))
u_init =u_init[-1]
dist2 = 0
while True:
t = t + 1
cont_u = calc_contraction_u(traindata,u_init)
v1 = inv(np.dot(cont_u.T,cont_u))
v2 = np.dot(cont_u.T,labels)
v_temp = np.dot(v1,v2)
t_st_v = np.vstack((st_v,v_temp))
v_gs = Gram_Schmidt(t_st_v)
v_temp = v_gs[-1]
vp = v_temp
#st_v =np.vstack((st_v,v))
#print vv.shape
cont_v = calc_contraction_v(traindata,v_temp)
u1 = inv(np.dot(cont_v.T,cont_v))
u2 = np.dot(cont_v.T,labels)
u_temp = np.dot(u1,u2)
t_st_u = np.vstack((st_u,u_temp))
u_gs = Gram_Schmidt(t_st_u)
#print st_u.shape
u_temp =u_gs[-1]
u_temp = normalize(u_temp)
#st_u = np.vstack((st_u,u))
up = u_temp
dist1 = np.linalg.norm(u_temp - st_u[-1])
#dist1 = dist(u_temp , st_u[-1])
if(abs(dist2 - dist1) < eps):
break
u_init = u_temp
print t, abs(dist2 - dist1)
dist2 = dist1
#print t
#st_u = u_gs
#st_v = v_gs
u_temp = up
v_temp = vp
st_u = np.vstack((st_u,u_temp))
st_v = np.vstack((st_v,v_temp))
print st_u.shape
#print t
#print u.shape,v.shape
#plt.imshow(np.outer(uu, vv.T))
#plt.show()
#print i
#p = u_gs
#q = v_gs
xxx= 0
st_u.dump('u.dat')
st_v.dump('v.dat')
for i in range(st_u.shape[0]):
xxx += np.outer(st_u[i], st_v[i].T)
#xxx[xxx < 0] = 0
#
x = normalize((xxx))
#x = xxx
#x = 255.0 * xxx/xxx.max()
##plt.imshow(x,cmap = plt.get_cmap('gray'))#, cmap = cm.Grays_r)
##plt.show()
scipy.misc.imsave('outfile.jpg', x)
#print x
#return x
XM(x,traindata,labels,3.0)
return x
def XM(M,traindata,labels,threshold):
n = len(traindata)
#for i in range(n):
c_p = 0
c_n = 0
n1 = len(labels)
pp1 = np.empty((n1,1))
#mean = M.mean(axis=0)
#M = M - mean[np.newaxis,:]
for i in range(n):
#print labels
img = np.reshape(traindata[i], (31,81))
pp = img*M#np.dot(M,img.T)#,'valid')
px = (np.mean(pp) - np.var(pp)) * 1000000.0
pp1[i] = px
print px
if(px>threshold):
c_p += 1
else:
c_n += 0
pp1.dump('hist_data.dat')
print c_p , c_n
def predict(testdata, M, threshold):
n = len(testdata)
predictions = np.empty((n,1))
for i in range(n):
img = np.reshape(testdata[i], (31,81))
pp = img*M#np.dot(M,img.T)#,'valid')
px = (np.mean(pp) - np.var(pp)) * 1000000.0
if (px < threshold):
predictions[i] = 0
else:
predictions[i] = 1
return predictions
def getW(traindata, labels, eps, k):
W = rho_1(traindata,labels,eps,k)
return W
# M = patch
# filename = test image filename(full path)
def slidingWindow(img, M,threshold):
img = Image.open(img)
img = np.asarray(img)
img2 = np.pad(img,(90,90),'constant', constant_values=(0,0))
val = np.empty((img.shape[0],img.shape[1]))
for j in range(val.shape[0]):
for i in range(val.shape[1]):
temp = img2[j+90:j+121,i+90:i+171]
val[j,i] = testOnImage(M,temp,threshold)
#print val[j,i]
val = scipy.ndimage.binary_erosion(val).astype(val.dtype)
val = np.where(val == val.max())
#val = np.asarray(val)
a = val[0]
b= val[1]
#ij = np.unravel_index(np.argmax(val), val.shape)
#x, y = ij[::-1]
# #fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
#
#
plt.imshow(img,cmap ="Greys_r")
ct = plt.gca()
ct.set_axis_off()
ct.set_title('image')
# highlight matched region
hcoin = 35
wcoin = 81
n = len(a)
for i in range(n):
rect = plt.Rectangle((b[i], a[i]), wcoin, hcoin, edgecolor='r', facecolor='none')
ct.add_patch(rect)
#
# # highlight matched region
plt.autoscale(False)
# #plt.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
#
plt.show()
#plt.imshow(val)
#plt.show()
#plt.imshow(img)
#plt.show()
return val
def testOnImage(M,img,thresh):
pp = img*M#np.dot(M,img.T)#,'valid')
px = (np.mean(pp) - np.var(pp)) * 1000000.0
if (px > thresh):
return 1
else:
return 0
if __name__ == '__main__':
traindata = np.load('train_data_new.dat')
labels = np.load('labels_new.dat')
#u_init = random.sample(range(256), 81)
#get_trainingdata()
# print "Normalizing..."
# mean = traindata.mean(axis=0)
# traindata = traindata - mean[np.newaxis,:]
M = rho_1(traindata,labels,0.0001,9)
#slidingWindow('/Users/abhishek/Documents/workspace/tensor_LDA/Test/TEST_40.PGM',M)
#print u,v
#calc_contraction_u(train_)
| mit |
hainm/statsmodels | statsmodels/graphics/dotplots.py | 31 | 18190 | import numpy as np
from statsmodels.compat import range
from . import utils
def dot_plot(points, intervals=None, lines=None, sections=None,
styles=None, marker_props=None, line_props=None,
split_names=None, section_order=None, line_order=None,
stacked=False, styles_order=None, striped=False,
horizontal=True, show_names="both",
fmt_left_name=None, fmt_right_name=None,
show_section_titles=None, ax=None):
"""
Produce a dotplot similar in style to those in Cleveland's
"Visualizing Data" book. These are also known as "forest plots".
Parameters
----------
points : array_like
The quantitative values to be plotted as markers.
intervals : array_like
The intervals to be plotted around the points. The elements
of `intervals` are either scalars or sequences of length 2. A
scalar indicates the half width of a symmetric interval. A
sequence of length 2 contains the left and right half-widths
(respectively) of a nonsymmetric interval. If None, no
intervals are drawn.
lines : array_like
A grouping variable indicating which points/intervals are
drawn on a common line. If None, each point/interval appears
on its own line.
sections : array_like
A grouping variable indicating which lines are grouped into
sections. If None, everything is drawn in a single section.
styles : array_like
A grouping label defining the plotting style of the markers
and intervals.
marker_props : dict
A dictionary mapping style codes (the values in `styles`) to
dictionaries defining key/value pairs to be passed as keyword
arguments to `plot` when plotting markers. Useful keyword
arguments are "color", "marker", and "ms" (marker size).
line_props : dict
A dictionary mapping style codes (the values in `styles`) to
dictionaries defining key/value pairs to be passed as keyword
arguments to `plot` when plotting interval lines. Useful
keyword arguments are "color", "linestyle", "solid_capstyle",
and "linewidth".
split_names : string
If not None, this is used to split the values of `lines` into
substrings that are drawn in the left and right margins,
respectively. If None, the values of `lines` are drawn in the
left margin.
section_order : array_like
The section labels in the order in which they appear in the
dotplot.
line_order : array_like
The line labels in the order in which they appear in the
dotplot.
stacked : boolean
If True, when multiple points or intervals are drawn on the
same line, they are offset from each other.
styles_order : array_like
If stacked=True, this is the order in which the point styles
on a given line are drawn from top to bottom (if horizontal
is True) or from left to right (if horiontal is False). If
None (default), the order is lexical.
striped : boolean
If True, every other line is enclosed in a shaded box.
horizontal : boolean
If True (default), the lines are drawn horizontally, otherwise
they are drawn vertically.
show_names : string
Determines whether labels (names) are shown in the left and/or
right margins (top/bottom margins if `horizontal` is True).
If `both`, labels are drawn in both margins, if 'left', labels
are drawn in the left or top margin. If `right`, labels are
drawn in the right or bottom margin.
fmt_left_name : function
The left/top margin names are passed through this function
before drawing on the plot.
fmt_right_name : function
The right/bottom marginnames are passed through this function
before drawing on the plot.
show_section_titles : bool or None
If None, section titles are drawn only if there is more than
one section. If False/True, section titles are never/always
drawn, respectively.
ax : matplotlib.axes
The axes on which the dotplot is drawn. If None, a new axes
is created.
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Notes
-----
`points`, `intervals`, `lines`, `sections`, `styles` must all have
the same length whenever present.
Examples
--------
This is a simple dotplot with one point per line:
>>> dot_plot(points=point_values)
This dotplot has labels on the lines (if elements in
`label_values` are repeated, the corresponding points appear on
the same line):
>>> dot_plot(points=point_values, lines=label_values)
References
----------
* Cleveland, William S. (1993). "Visualizing Data". Hobart
Press.
* Jacoby, William G. (2006) "The Dot Plot: A Graphical Display
for Labeled Quantitative Values." The Political Methodologist
14(1): 6-14.
"""
import matplotlib.transforms as transforms
fig, ax = utils.create_mpl_ax(ax)
# Convert to numpy arrays if that is not what we are given.
points = np.asarray(points)
asarray_or_none = lambda x : None if x is None else np.asarray(x)
intervals = asarray_or_none(intervals)
lines = asarray_or_none(lines)
sections = asarray_or_none(sections)
styles = asarray_or_none(styles)
# Total number of points
npoint = len(points)
# Set default line values if needed
if lines is None:
lines = np.arange(npoint)
# Set default section values if needed
if sections is None:
sections = np.zeros(npoint)
# Set default style values if needed
if styles is None:
styles = np.zeros(npoint)
# The vertical space (in inches) for a section title
section_title_space = 0.5
# The number of sections
nsect = len(set(sections))
if section_order is not None:
nsect = len(set(section_order))
# The number of section titles
if show_section_titles == False:
draw_section_titles = False
nsect_title = 0
elif show_section_titles == True:
draw_section_titles = True
nsect_title = nsect
else:
draw_section_titles = nsect > 1
nsect_title = nsect if nsect > 1 else 0
# The total vertical space devoted to section titles.
section_space_total = section_title_space * nsect_title
# Add a bit of room so that points that fall at the axis limits
# are not cut in half.
ax.set_xmargin(0.02)
ax.set_ymargin(0.02)
if section_order is None:
lines0 = list(set(sections))
lines0.sort()
else:
lines0 = section_order
if line_order is None:
lines1 = list(set(lines))
lines1.sort()
else:
lines1 = line_order
# A map from (section,line) codes to index positions.
lines_map = {}
for i in range(npoint):
if section_order is not None and sections[i] not in section_order:
continue
if line_order is not None and lines[i] not in line_order:
continue
ky = (sections[i], lines[i])
if ky not in lines_map:
lines_map[ky] = []
lines_map[ky].append(i)
# Get the size of the axes on the parent figure in inches
bbox = ax.get_window_extent().transformed(
fig.dpi_scale_trans.inverted())
awidth, aheight = bbox.width, bbox.height
# The number of lines in the plot.
nrows = len(lines_map)
# The positions of the lowest and highest guideline in axes
# coordinates (for horizontal dotplots), or the leftmost and
# rightmost guidelines (for vertical dotplots).
bottom, top = 0, 1
if horizontal:
# x coordinate is data, y coordinate is axes
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
else:
# x coordinate is axes, y coordinate is data
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
# Space used for a section title, in axes coordinates
title_space_axes = section_title_space / aheight
# Space between lines
if horizontal:
dpos = (top - bottom - nsect_title*title_space_axes) /\
float(nrows)
else:
dpos = (top - bottom) / float(nrows)
# Determine the spacing for stacked points
if styles_order is not None:
style_codes = styles_order
else:
style_codes = list(set(styles))
style_codes.sort()
# Order is top to bottom for horizontal plots, so need to
# flip.
if horizontal:
style_codes = style_codes[::-1]
# nval is the maximum number of points on one line.
nval = len(style_codes)
if nval > 1:
stackd = dpos / (2.5*(float(nval)-1))
else:
stackd = 0.
# Map from style code to its integer position
#style_codes_map = {x: style_codes.index(x) for x in style_codes}
# python 2.6 compat version:
style_codes_map = dict((x, style_codes.index(x)) for x in style_codes)
# Setup default marker styles
colors = ["r", "g", "b", "y", "k", "purple", "orange"]
if marker_props is None:
#marker_props = {x: {} for x in style_codes}
# python 2.6 compat version:
marker_props = dict((x, {}) for x in style_codes)
for j in range(nval):
sc = style_codes[j]
if "color" not in marker_props[sc]:
marker_props[sc]["color"] = colors[j % len(colors)]
if "marker" not in marker_props[sc]:
marker_props[sc]["marker"] = "o"
if "ms" not in marker_props[sc]:
marker_props[sc]["ms"] = 10 if stackd == 0 else 6
# Setup default line styles
if line_props is None:
#line_props = {x: {} for x in style_codes}
# python 2.6 compat version:
line_props = dict((x, {}) for x in style_codes)
for j in range(nval):
sc = style_codes[j]
if "color" not in line_props[sc]:
line_props[sc]["color"] = "grey"
if "linewidth" not in line_props[sc]:
line_props[sc]["linewidth"] = 2 if stackd > 0 else 8
if horizontal:
# The vertical position of the first line.
pos = top - dpos/2 if nsect == 1 else top
else:
# The horizontal position of the first line.
pos = bottom + dpos/2
# Points that have already been labeled
labeled = set()
# Positions of the y axis grid lines
ticks = []
# Loop through the sections
for k0 in lines0:
# Draw a section title
if draw_section_titles:
if horizontal:
y0 = pos + dpos/2 if k0 == lines0[0] else pos
ax.fill_between((0, 1), (y0,y0),
(pos-0.7*title_space_axes,
pos-0.7*title_space_axes),
color='darkgrey',
transform=ax.transAxes,
zorder=1)
txt = ax.text(0.5, pos - 0.35*title_space_axes, k0,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
txt.set_fontweight("bold")
pos -= title_space_axes
else:
m = len([k for k in lines_map if k[0] == k0])
ax.fill_between((pos-dpos/2+0.01,
pos+(m-1)*dpos+dpos/2-0.01),
(1.01,1.01), (1.06,1.06),
color='darkgrey',
transform=ax.transAxes,
zorder=1, clip_on=False)
txt = ax.text(pos + (m-1)*dpos/2, 1.02, k0,
horizontalalignment='center',
verticalalignment='bottom',
transform=ax.transAxes)
txt.set_fontweight("bold")
jrow = 0
for k1 in lines1:
# No data to plot
if (k0, k1) not in lines_map:
continue
# Draw the guideline
if horizontal:
ax.axhline(pos, color='grey')
else:
ax.axvline(pos, color='grey')
# Set up the labels
if split_names is not None:
us = k1.split(split_names)
if len(us) >= 2:
left_label, right_label = us[0], us[1]
else:
left_label, right_label = k1, None
else:
left_label, right_label = k1, None
if fmt_left_name is not None:
left_label = fmt_left_name(left_label)
if fmt_right_name is not None:
right_label = fmt_right_name(right_label)
# Draw the stripe
if striped and jrow % 2 == 0:
if horizontal:
ax.fill_between((0, 1), (pos-dpos/2, pos-dpos/2),
(pos+dpos/2, pos+dpos/2),
color='lightgrey',
transform=ax.transAxes,
zorder=0)
else:
ax.fill_between((pos-dpos/2, pos+dpos/2),
(0, 0), (1, 1),
color='lightgrey',
transform=ax.transAxes,
zorder=0)
jrow += 1
# Draw the left margin label
if show_names.lower() in ("left", "both"):
if horizontal:
ax.text(-0.1/awidth, pos, left_label,
horizontalalignment="right",
verticalalignment='center',
transform=ax.transAxes,
family='monospace')
else:
ax.text(pos, -0.1/aheight, left_label,
horizontalalignment="center",
verticalalignment='top',
transform=ax.transAxes,
family='monospace')
# Draw the right margin label
if show_names.lower() in ("right", "both"):
if right_label is not None:
if horizontal:
ax.text(1 + 0.1/awidth, pos, right_label,
horizontalalignment="left",
verticalalignment='center',
transform=ax.transAxes,
family='monospace')
else:
ax.text(pos, 1 + 0.1/aheight, right_label,
horizontalalignment="center",
verticalalignment='bottom',
transform=ax.transAxes,
family='monospace')
# Save the vertical position so that we can place the
# tick marks
ticks.append(pos)
# Loop over the points in one line
for ji,jp in enumerate(lines_map[(k0,k1)]):
# Calculate the vertical offset
yo = 0
if stacked:
yo = -dpos/5 + style_codes_map[styles[jp]]*stackd
pt = points[jp]
# Plot the interval
if intervals is not None:
# Symmetric interval
if np.isscalar(intervals[jp]):
lcb, ucb = pt - intervals[jp],\
pt + intervals[jp]
# Nonsymmetric interval
else:
lcb, ucb = pt - intervals[jp][0],\
pt + intervals[jp][1]
# Draw the interval
if horizontal:
ax.plot([lcb, ucb], [pos+yo, pos+yo], '-',
transform=trans,
**line_props[styles[jp]])
else:
ax.plot([pos+yo, pos+yo], [lcb, ucb], '-',
transform=trans,
**line_props[styles[jp]])
# Plot the point
sl = styles[jp]
sll = sl if sl not in labeled else None
labeled.add(sl)
if horizontal:
ax.plot([pt,], [pos+yo,], ls='None',
transform=trans, label=sll,
**marker_props[sl])
else:
ax.plot([pos+yo,], [pt,], ls='None',
transform=trans, label=sll,
**marker_props[sl])
if horizontal:
pos -= dpos
else:
pos += dpos
# Set up the axis
if horizontal:
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.set_ticks_position("none")
ax.set_yticklabels([])
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_position(('axes', -0.1/aheight))
ax.set_ylim(0, 1)
ax.yaxis.set_ticks(ticks)
ax.autoscale_view(scaley=False, tight=True)
else:
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("none")
ax.set_xticklabels([])
ax.spines['bottom'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_position(('axes', -0.1/awidth))
ax.set_xlim(0, 1)
ax.xaxis.set_ticks(ticks)
ax.autoscale_view(scalex=False, tight=True)
return fig
| bsd-3-clause |
huongttlan/bokeh | bokeh/compat/mpl_helpers.py | 20 | 5287 | "Helpers function for mpl module."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
from itertools import cycle, islice
from scipy import interpolate, signal
from ..models import GlyphRenderer
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def convert_color(mplcolor):
"Converts mpl color formats to Bokeh color formats."
charmap = dict(b="blue", g="green", r="red", c="cyan", m="magenta",
y="yellow", k="black", w="white")
if mplcolor in charmap:
return charmap[mplcolor]
try:
colorfloat = float(mplcolor)
if 0 <= colorfloat <= 1.0:
# This is a grayscale value
return tuple([int(255 * colorfloat)] * 3)
except:
pass
if isinstance(mplcolor, tuple):
# These will be floats in the range 0..1
return int(255 * mplcolor[0]), int(255 * mplcolor[1]), int(255 * mplcolor[2])
return mplcolor
def convert_dashes(dash):
""" Converts a Matplotlib dash specification
bokeh.properties.DashPattern supports the matplotlib named dash styles,
but not the little shorthand characters. This function takes care of
mapping those.
"""
mpl_dash_map = {
"-": "solid",
"--": "dashed",
":": "dotted",
"-.": "dashdot",
}
# If the value doesn't exist in the map, then just return the value back.
return mpl_dash_map.get(dash, dash)
def get_props_cycled(col, prop, fx=lambda x: x):
""" We need to cycle the `get.property` list (where property can be colors,
line_width, etc) as matplotlib does. We use itertools tools for do this
cycling ans slice manipulation.
Parameters:
col: matplotlib collection object
prop: property we want to get from matplotlib collection
fx: funtion (optional) to transform the elements from list obtained
after the property call. Deafults to identity function.
"""
n = len(col.get_paths())
t_prop = [fx(x) for x in prop]
sliced = islice(cycle(t_prop), None, n)
return list(sliced)
def is_ax_end(r):
"Check if the 'name' (if it exists) in the Glyph's datasource is 'ax_end'"
if isinstance(r, GlyphRenderer):
try:
if r.data_source.data["name"] == "ax_end":
return True
except KeyError:
return False
else:
return False
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.001, f3=5):
"""
Mimic a hand-drawn line from (x, y) data
Source: http://jakevdp.github.io/blog/2012/10/07/xkcd-style-plots-in-matplotlib/
Parameters
----------
x, y : array_like
arrays to be modified
xlim, ylim : data range
the assumed plot range for the modification. If not specified,
they will be guessed from the data
mag : float
magnitude of distortions
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
Returns
-------
x, y : ndarrays
The modified lines
"""
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None:
xlim = (x.min(), x.max())
if ylim is None:
ylim = (y.min(), y.max())
if xlim[1] == xlim[0]:
xlim = ylim
if ylim[1] == ylim[0]:
ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
k = min(3, len(x) - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we'll perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int
| bsd-3-clause |
nkmk/python-snippets | notebook/pandas_describe.py | 1 | 7170 | import pandas as pd
df = pd.DataFrame({'a': [1, 2, 1, 3],
'b': [0.4, 1.1, 0.1, 0.8],
'c': ['X', 'Y', 'X', 'Z'],
'd': ['3', '5', '2', '1'],
'e': [True, True, False, True]})
print(df)
# a b c d e
# 0 1 0.4 X 3 True
# 1 2 1.1 Y 5 True
# 2 1 0.1 X 2 False
# 3 3 0.8 Z 1 True
print(df.dtypes)
# a int64
# b float64
# c object
# d object
# e bool
# dtype: object
print(df.describe())
# a b
# count 4.000000 4.000000
# mean 1.750000 0.600000
# std 0.957427 0.439697
# min 1.000000 0.100000
# 25% 1.000000 0.325000
# 50% 1.500000 0.600000
# 75% 2.250000 0.875000
# max 3.000000 1.100000
print(type(df.describe()))
# <class 'pandas.core.frame.DataFrame'>
print(df.describe().loc['std'])
# a 0.957427
# b 0.439697
# Name: std, dtype: float64
print(df.describe().at['std', 'b'])
# 0.439696865275764
print(df.describe(exclude='number'))
# c d e
# count 4 4 4
# unique 3 4 2
# top X 3 True
# freq 2 1 3
df_notnum = df[['c', 'd', 'e']]
print(df_notnum)
# c d e
# 0 X 3 True
# 1 Y 5 True
# 2 X 2 False
# 3 Z 1 True
print(df_notnum.dtypes)
# c object
# d object
# e bool
# dtype: object
print(df_notnum.describe())
# c d e
# count 4 4 4
# unique 3 4 2
# top X 3 True
# freq 2 1 3
print(df.describe(include='all'))
# a b c d e
# count 4.000000 4.000000 4 4 4
# unique NaN NaN 3 4 2
# top NaN NaN X 3 True
# freq NaN NaN 2 1 3
# mean 1.750000 0.600000 NaN NaN NaN
# std 0.957427 0.439697 NaN NaN NaN
# min 1.000000 0.100000 NaN NaN NaN
# 25% 1.000000 0.325000 NaN NaN NaN
# 50% 1.500000 0.600000 NaN NaN NaN
# 75% 2.250000 0.875000 NaN NaN NaN
# max 3.000000 1.100000 NaN NaN NaN
print(df.describe(include=int))
# a
# count 4.000000
# mean 1.750000
# std 0.957427
# min 1.000000
# 25% 1.000000
# 50% 1.500000
# 75% 2.250000
# max 3.000000
print(type(df.describe(include=int)))
# <class 'pandas.core.frame.DataFrame'>
print(df.describe(include=[object, bool]))
# c d e
# count 4 4 4
# unique 3 4 2
# top X 3 True
# freq 2 1 3
print(df.describe(exclude=[float, object]))
# a e
# count 4.000000 4
# unique NaN 2
# top NaN True
# freq NaN 3
# mean 1.750000 NaN
# std 0.957427 NaN
# min 1.000000 NaN
# 25% 1.000000 NaN
# 50% 1.500000 NaN
# 75% 2.250000 NaN
# max 3.000000 NaN
print(df.count())
# a 4
# b 4
# c 4
# d 4
# e 4
# dtype: int64
print(df.nunique())
# a 3
# b 4
# c 3
# d 4
# e 2
# dtype: int64
print(df.mode())
# a b c d e
# 0 1.0 0.1 X 1 True
# 1 NaN 0.4 NaN 2 NaN
# 2 NaN 0.8 NaN 3 NaN
# 3 NaN 1.1 NaN 5 NaN
print(df.mode().count())
# a 1
# b 4
# c 1
# d 4
# e 1
# dtype: int64
print(df.mode().iloc[0])
# a 1
# b 0.1
# c X
# d 1
# e True
# Name: 0, dtype: object
print(df['c'].value_counts().iat[0])
# 2
print(df.apply(lambda x: x.value_counts().iat[0]))
# a 2
# b 1
# c 2
# d 1
# e 3
# dtype: int64
print(df.mean(numeric_only=True))
# a 1.75
# b 0.60
# e 0.75
# dtype: float64
print(df.std(numeric_only=True))
# a 0.957427
# b 0.439697
# e 0.500000
# dtype: float64
print(df.min(numeric_only=True))
# a 1.0
# b 0.1
# e 0.0
# dtype: float64
print(df.max(numeric_only=True))
# a 3.0
# b 1.1
# e 1.0
# dtype: float64
print(df.median(numeric_only=True))
# a 1.5
# b 0.6
# e 1.0
# dtype: float64
print(df.quantile(q=[0.25, 0.75], numeric_only=True))
# a b e
# 0.25 1.00 0.325 0.75
# 0.75 2.25 0.875 1.00
print(df.quantile(q=[0, 0.25, 0.5, 0.75, 1], numeric_only=True))
# a b e
# 0.00 1.00 0.100 0.00
# 0.25 1.00 0.325 0.75
# 0.50 1.50 0.600 1.00
# 0.75 2.25 0.875 1.00
# 1.00 3.00 1.100 1.00
print(df.describe(percentiles=[0.2, 0.4, 0.6, 0.8]))
# a b
# count 4.000000 4.000000
# mean 1.750000 0.600000
# std 0.957427 0.439697
# min 1.000000 0.100000
# 20% 1.000000 0.280000
# 40% 1.200000 0.480000
# 50% 1.500000 0.600000
# 60% 1.800000 0.720000
# 80% 2.400000 0.920000
# max 3.000000 1.100000
print(df.astype('str').describe())
# a b c d e
# count 4 4 4 4 4
# unique 3 4 3 4 2
# top 1 1.1 X 3 True
# freq 2 1 2 1 3
print(df.astype({'a': str}).describe(exclude='number'))
# a c d e
# count 4 4 4 4
# unique 3 3 4 2
# top 1 X 3 True
# freq 2 2 1 3
print(df.astype({'d': int, 'e': int}).describe())
# a b d e
# count 4.000000 4.000000 4.000000 4.00
# mean 1.750000 0.600000 2.750000 0.75
# std 0.957427 0.439697 1.707825 0.50
# min 1.000000 0.100000 1.000000 0.00
# 25% 1.000000 0.325000 1.750000 0.75
# 50% 1.500000 0.600000 2.500000 1.00
# 75% 2.250000 0.875000 3.500000 1.00
# max 3.000000 1.100000 5.000000 1.00
s_int = df['a']
print(s_int)
# 0 1
# 1 2
# 2 1
# 3 3
# Name: a, dtype: int64
print(s_int.describe())
# count 4.000000
# mean 1.750000
# std 0.957427
# min 1.000000
# 25% 1.000000
# 50% 1.500000
# 75% 2.250000
# max 3.000000
# Name: a, dtype: float64
print(type(s_int.describe()))
# <class 'pandas.core.series.Series'>
s_str = df['d']
print(s_str.describe())
# count 4
# unique 4
# top 3
# freq 1
# Name: d, dtype: object
print(s_str.astype('int').describe())
# count 4.000000
# mean 2.750000
# std 1.707825
# min 1.000000
# 25% 1.750000
# 50% 2.500000
# 75% 3.500000
# max 5.000000
# Name: d, dtype: float64
df['dt'] = pd.to_datetime(['2018-01-01', '2018-03-15', '2018-02-20', '2018-03-15'])
print(df.dtypes)
# a int64
# b float64
# c object
# d object
# e bool
# dt datetime64[ns]
# dtype: object
print(df.describe(include='datetime'))
# dt
# count 4
# unique 3
# top 2018-03-15 00:00:00
# freq 2
# first 2018-01-01 00:00:00
# last 2018-03-15 00:00:00
print(df['dt'].min())
# 2018-01-01 00:00:00
print(df['dt'].max())
# 2018-03-15 00:00:00
print(df.T.describe())
# 0 1 2 3
# count 6 6 6 6
# unique 5 6 6 6
# top 1 2018-03-15 00:00:00 2018-02-20 00:00:00 2018-03-15 00:00:00
# freq 2 1 1 1
| mit |
ishank08/scikit-learn | examples/covariance/plot_covariance_estimation.py | 99 | 5074 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.model_selection import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
xwolf12/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
moonbury/pythonanywhere | github/Numpy/Chapter11/matplotlib_demo.py | 2 | 1481 | import pygame, sys
from pygame.locals import *
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.backends.backend_agg as agg
fig = plt.figure(figsize=[3, 3])
ax = fig.add_subplot(111)
canvas = agg.FigureCanvasAgg(fig)
def plot(data):
ax.plot(data)
canvas.draw()
renderer = canvas.get_renderer()
raw_data = renderer.tostring_rgb()
size = canvas.get_width_height()
return pygame.image.fromstring(raw_data, size, "RGB")
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((400, 400))
pygame.display.set_caption('Animating Objects')
img = pygame.image.load('head.jpg')
steps = np.linspace(20, 360, 40).astype(int)
right = np.zeros((2, len(steps)))
down = np.zeros((2, len(steps)))
left = np.zeros((2, len(steps)))
up = np.zeros((2, len(steps)))
right[0] = steps
right[1] = 20
down[0] = 360
down[1] = steps
left[0] = steps[::-1]
left[1] = 360
up[0] = 20
up[1] = steps[::-1]
pos = np.concatenate((right.T, down.T, left.T, up.T))
i = 0
history = np.array([])
surf = plot(history)
while True:
# Erase screen
screen.fill((255, 255, 255))
if i >= len(pos):
i = 0
surf = plot(history)
screen.blit(img, pos[i])
history = np.append(history, pos[i])
screen.blit(surf, (100, 100))
i += 1
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
clock.tick(30)
| gpl-3.0 |
Averroes/statsmodels | examples/python/contrasts.py | 33 | 8722 |
## Contrasts Overview
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
# This document is based heavily on this excellent resource from UCLA http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm
# A categorical variable of K categories, or levels, usually enters a regression as a sequence of K-1 dummy variables. This amounts to a linear hypothesis on the level means. That is, each test statistic for these variables amounts to testing whether the mean for that level is statistically significantly different from the mean of the base category. This dummy coding is called Treatment coding in R parlance, and we will follow this convention. There are, however, different coding methods that amount to different sets of linear hypotheses.
#
# In fact, the dummy coding is not technically a contrast coding. This is because the dummy variables add to one and are not functionally independent of the model's intercept. On the other hand, a set of *contrasts* for a categorical variable with `k` levels is a set of `k-1` functionally independent linear combinations of the factor level means that are also independent of the sum of the dummy variables. The dummy coding isn't wrong *per se*. It captures all of the coefficients, but it complicates matters when the model assumes independence of the coefficients such as in ANOVA. Linear regression models do not assume independence of the coefficients and thus dummy coding is often the only coding that is taught in this context.
#
# To have a look at the contrast matrices in Patsy, we will use data from UCLA ATS. First let's load the data.
##### Example Data
import pandas as pd
url = 'http://www.ats.ucla.edu/stat/data/hsb2.csv'
hsb2 = pd.read_table(url, delimiter=",")
hsb2.head(10)
# It will be instructive to look at the mean of the dependent variable, write, for each level of race ((1 = Hispanic, 2 = Asian, 3 = African American and 4 = Caucasian)).
hsb2.groupby('race')['write'].mean()
##### Treatment (Dummy) Coding
# Dummy coding is likely the most well known coding scheme. It compares each level of the categorical variable to a base reference level. The base reference level is the value of the intercept. It is the default contrast in Patsy for unordered categorical factors. The Treatment contrast matrix for race would be
from patsy.contrasts import Treatment
levels = [1,2,3,4]
contrast = Treatment(reference=0).code_without_intercept(levels)
print(contrast.matrix)
# Here we used `reference=0`, which implies that the first level, Hispanic, is the reference category against which the other level effects are measured. As mentioned above, the columns do not sum to zero and are thus not independent of the intercept. To be explicit, let's look at how this would encode the `race` variable.
hsb2.race.head(10)
print(contrast.matrix[hsb2.race-1, :][:20])
sm.categorical(hsb2.race.values)
# This is a bit of a trick, as the `race` category conveniently maps to zero-based indices. If it does not, this conversion happens under the hood, so this won't work in general but nonetheless is a useful exercise to fix ideas. The below illustrates the output using the three contrasts above
from statsmodels.formula.api import ols
mod = ols("write ~ C(race, Treatment)", data=hsb2)
res = mod.fit()
print(res.summary())
# We explicitly gave the contrast for race; however, since Treatment is the default, we could have omitted this.
#### Simple Coding
# Like Treatment Coding, Simple Coding compares each level to a fixed reference level. However, with simple coding, the intercept is the grand mean of all the levels of the factors. Patsy doesn't have the Simple contrast included, but you can easily define your own contrasts. To do so, write a class that contains a code_with_intercept and a code_without_intercept method that returns a patsy.contrast.ContrastMatrix instance
from patsy.contrasts import ContrastMatrix
def _name_levels(prefix, levels):
return ["[%s%s]" % (prefix, level) for level in levels]
class Simple(object):
def _simple_contrast(self, levels):
nlevels = len(levels)
contr = -1./nlevels * np.ones((nlevels, nlevels-1))
contr[1:][np.diag_indices(nlevels-1)] = (nlevels-1.)/nlevels
return contr
def code_with_intercept(self, levels):
contrast = np.column_stack((np.ones(len(levels)),
self._simple_contrast(levels)))
return ContrastMatrix(contrast, _name_levels("Simp.", levels))
def code_without_intercept(self, levels):
contrast = self._simple_contrast(levels)
return ContrastMatrix(contrast, _name_levels("Simp.", levels[:-1]))
hsb2.groupby('race')['write'].mean().mean()
contrast = Simple().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Simple)", data=hsb2)
res = mod.fit()
print(res.summary())
#### Sum (Deviation) Coding
# Sum coding compares the mean of the dependent variable for a given level to the overall mean of the dependent variable over all the levels. That is, it uses contrasts between each of the first k-1 levels and level k In this example, level 1 is compared to all the others, level 2 to all the others, and level 3 to all the others.
from patsy.contrasts import Sum
contrast = Sum().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Sum)", data=hsb2)
res = mod.fit()
print(res.summary())
# This corresponds to a parameterization that forces all the coefficients to sum to zero. Notice that the intercept here is the grand mean where the grand mean is the mean of means of the dependent variable by each level.
hsb2.groupby('race')['write'].mean().mean()
#### Backward Difference Coding
# In backward difference coding, the mean of the dependent variable for a level is compared with the mean of the dependent variable for the prior level. This type of coding may be useful for a nominal or an ordinal variable.
from patsy.contrasts import Diff
contrast = Diff().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Diff)", data=hsb2)
res = mod.fit()
print(res.summary())
# For example, here the coefficient on level 1 is the mean of `write` at level 2 compared with the mean at level 1. Ie.,
res.params["C(race, Diff)[D.1]"]
hsb2.groupby('race').mean()["write"][2] - hsb2.groupby('race').mean()["write"][1]
#### Helmert Coding
# Our version of Helmert coding is sometimes referred to as Reverse Helmert Coding. The mean of the dependent variable for a level is compared to the mean of the dependent variable over all previous levels. Hence, the name 'reverse' being sometimes applied to differentiate from forward Helmert coding. This comparison does not make much sense for a nominal variable such as race, but we would use the Helmert contrast like so:
from patsy.contrasts import Helmert
contrast = Helmert().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Helmert)", data=hsb2)
res = mod.fit()
print(res.summary())
# To illustrate, the comparison on level 4 is the mean of the dependent variable at the previous three levels taken from the mean at level 4
grouped = hsb2.groupby('race')
grouped.mean()["write"][4] - grouped.mean()["write"][:3].mean()
# As you can see, these are only equal up to a constant. Other versions of the Helmert contrast give the actual difference in means. Regardless, the hypothesis tests are the same.
k = 4
1./k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k-1].mean())
k = 3
1./k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k-1].mean())
#### Orthogonal Polynomial Coding
# The coefficients taken on by polynomial coding for `k=4` levels are the linear, quadratic, and cubic trends in the categorical variable. The categorical variable here is assumed to be represented by an underlying, equally spaced numeric variable. Therefore, this type of encoding is used only for ordered categorical variables with equal spacing. In general, the polynomial contrast produces polynomials of order `k-1`. Since `race` is not an ordered factor variable let's use `read` as an example. First we need to create an ordered categorical from `read`.
hsb2['readcat'] = pd.cut(hsb2.read, bins=3)
hsb2.groupby('readcat').mean()['write']
from patsy.contrasts import Poly
levels = hsb2.readcat.unique().tolist()
contrast = Poly().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(readcat, Poly)", data=hsb2)
res = mod.fit()
print(res.summary())
# As you can see, readcat has a significant linear effect on the dependent variable `write` but not a significant quadratic or cubic effect.
| bsd-3-clause |
icoxfog417/number_recognizer | tests/test_machine_loader.py | 3 | 1450 | import os
import unittest
from sklearn import datasets
from machines.machine_loader import MachineLoader
import machines.number_recognizer
class TestMachineLoader(unittest.TestCase):
def test_load(self):
machine = MachineLoader.load(machines.number_recognizer)
self.assertTrue(machine)
def test_feedback(self):
test_file = "test_feedback.txt"
feedback_file = MachineLoader.feedback(machines.number_recognizer, None, file_name=test_file)
if os.path.isfile(feedback_file):
os.remove(feedback_file)
data = [0] * 64
target = [0]
feedback = target + data
# create file
MachineLoader.feedback(machines.number_recognizer, feedback, file_name=test_file)
# append file
MachineLoader.feedback(machines.number_recognizer, feedback, file_name=test_file)
with open(feedback_file, mode="rb") as r:
lines = r.readlines()
self.assertEqual(2, len(lines))
os.remove(feedback_file)
def test_predict(self):
digits = datasets.load_digits()
from sklearn import svm
from sklearn import cross_validation
clf = svm.SVC(gamma=0.001, C=100)
clf = clf.fit(digits.data, digits.target)
cross_validation.cross_val_score(clf, digits.data[:-1], digits.target[:-1], cv=5)
predicted = clf.predict(digits.data[-1])
self.assertGreaterEqual(predicted, 0) | mit |
farmer-martin/ardupilot | Tools/mavproxy_modules/lib/magcal_graph_ui.py | 108 | 8248 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from pymavlink.mavutil import mavlink
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
import geodesic_grid as grid
class MagcalPanel(wx.Panel):
_status_markup_strings = {
mavlink.MAG_CAL_NOT_STARTED: 'Not started',
mavlink.MAG_CAL_WAITING_TO_START: 'Waiting to start',
mavlink.MAG_CAL_RUNNING_STEP_ONE: 'Step one',
mavlink.MAG_CAL_RUNNING_STEP_TWO: 'Step two',
mavlink.MAG_CAL_SUCCESS: '<span color="blue">Success</span>',
mavlink.MAG_CAL_FAILED: '<span color="red">Failed</span>',
}
_empty_color = '#7ea6ce'
_filled_color = '#4680b9'
def __init__(self, *k, **kw):
super(MagcalPanel, self).__init__(*k, **kw)
facecolor = self.GetBackgroundColour().GetAsString(wx.C2S_HTML_SYNTAX)
fig = plt.figure(facecolor=facecolor, figsize=(1,1))
self._canvas = FigureCanvas(self, wx.ID_ANY, fig)
self._canvas.SetMinSize((300,300))
self._id_text = wx.StaticText(self, wx.ID_ANY)
self._status_text = wx.StaticText(self, wx.ID_ANY)
self._completion_pct_text = wx.StaticText(self, wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._id_text)
sizer.Add(self._status_text)
sizer.Add(self._completion_pct_text)
sizer.Add(self._canvas, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
ax = fig.add_subplot(111, axis_bgcolor=facecolor, projection='3d')
self.configure_plot(ax)
def configure_plot(self, ax):
extra = .5
lim = grid.radius + extra
ax.set_xlim3d(-lim, lim)
ax.set_ylim3d(-lim, lim)
ax.set_zlim3d(-lim, lim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
self._polygons_collection = Poly3DCollection(
grid.sections_triangles,
edgecolors='#386694',
)
ax.add_collection3d(self._polygons_collection)
def update_status_from_mavlink(self, m):
status_string = self._status_markup_strings.get(m.cal_status, '???')
self._status_text.SetLabelMarkup(
'<b>Status:</b> %s' % status_string,
)
def mavlink_magcal_report(self, m):
self.update_status_from_mavlink(m)
self._completion_pct_text.SetLabel('')
def mavlink_magcal_progress(self, m):
facecolors = []
for i, mask in enumerate(m.completion_mask):
for j in range(8):
section = i * 8 + j
if mask & 1 << j:
facecolor = self._filled_color
else:
facecolor = self._empty_color
facecolors.append(facecolor)
self._polygons_collection.set_facecolors(facecolors)
self._canvas.draw()
self._id_text.SetLabelMarkup(
'<b>Compass id:</b> %d' % m.compass_id
)
self._completion_pct_text.SetLabelMarkup(
'<b>Completion:</b> %d%%' % m.completion_pct
)
self.update_status_from_mavlink(m)
_legend_panel = None
@staticmethod
def legend_panel(*k, **kw):
if MagcalPanel._legend_panel:
return MagcalPanel._legend_panel
p = MagcalPanel._legend_panel = wx.Panel(*k, **kw)
sizer = wx.BoxSizer(wx.HORIZONTAL)
p.SetSizer(sizer)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._empty_color)
sizer.Add(marker, flag=wx.ALIGN_CENTER)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections not hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._filled_color)
sizer.Add(marker, border=10, flag=wx.ALIGN_CENTER | wx.LEFT)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
return p
class MagcalFrame(wx.Frame):
def __init__(self, conn):
super(MagcalFrame, self).__init__(
None,
wx.ID_ANY,
title='Magcal Graph',
)
self.SetMinSize((300, 300))
self._conn = conn
self._main_panel = wx.ScrolledWindow(self, wx.ID_ANY)
self._main_panel.SetScrollbars(1, 1, 1, 1)
self._magcal_panels = {}
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._main_panel.SetSizer(self._sizer)
idle_text = wx.StaticText(self._main_panel, wx.ID_ANY)
idle_text.SetLabelMarkup('<i>No calibration messages received yet...</i>')
idle_text.SetForegroundColour('#444444')
self._sizer.AddStretchSpacer()
self._sizer.Add(
idle_text,
proportion=0,
flag=wx.ALIGN_CENTER | wx.ALL,
border=10,
)
self._sizer.AddStretchSpacer()
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.timer_callback, self._timer)
self._timer.Start(200)
def add_compass(self, id):
if not self._magcal_panels:
self._sizer.Clear(deleteWindows=True)
self._magcal_panels_sizer = wx.BoxSizer(wx.HORIZONTAL)
self._sizer.Add(
self._magcal_panels_sizer,
proportion=1,
flag=wx.EXPAND,
)
legend = MagcalPanel.legend_panel(self._main_panel, wx.ID_ANY)
self._sizer.Add(
legend,
proportion=0,
flag=wx.ALIGN_CENTER,
)
self._magcal_panels[id] = MagcalPanel(self._main_panel, wx.ID_ANY)
self._magcal_panels_sizer.Add(
self._magcal_panels[id],
proportion=1,
border=10,
flag=wx.EXPAND | wx.ALL,
)
def timer_callback(self, evt):
close_requested = False
mavlink_msgs = {}
while self._conn.poll():
m = self._conn.recv()
if isinstance(m, str) and m == 'close':
close_requested = True
continue
if m.compass_id not in mavlink_msgs:
# Keep the last two messages so that we get the last progress
# if the last message is the calibration report.
mavlink_msgs[m.compass_id] = [None, m]
else:
l = mavlink_msgs[m.compass_id]
l[0] = l[1]
l[1] = m
if close_requested:
self._timer.Stop()
self.Destroy()
return
if not mavlink_msgs:
return
needs_fit = False
for k in mavlink_msgs:
if k not in self._magcal_panels:
self.add_compass(k)
needs_fit = True
if needs_fit:
self._sizer.Fit(self)
for k, l in mavlink_msgs.items():
for m in l:
if not m:
continue
panel = self._magcal_panels[k]
if m.get_type() == 'MAG_CAL_PROGRESS':
panel.mavlink_magcal_progress(m)
elif m.get_type() == 'MAG_CAL_REPORT':
panel.mavlink_magcal_report(m)
| gpl-3.0 |
zfrenchee/pandas | pandas/io/api.py | 14 | 1146 | """
Data IO api
"""
# flake8: noqa
from pandas.io.parsers import read_csv, read_table, read_fwf
from pandas.io.clipboards import read_clipboard
from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
from pandas.io.pytables import HDFStore, get_store, read_hdf
from pandas.io.json import read_json
from pandas.io.html import read_html
from pandas.io.sql import read_sql, read_sql_table, read_sql_query
from pandas.io.sas import read_sas
from pandas.io.feather_format import read_feather
from pandas.io.parquet import read_parquet
from pandas.io.stata import read_stata
from pandas.io.pickle import read_pickle, to_pickle
from pandas.io.packers import read_msgpack, to_msgpack
from pandas.io.gbq import read_gbq
# deprecation, xref #13790
def Term(*args, **kwargs):
import warnings
warnings.warn("pd.Term is deprecated as it is not "
"applicable to user code. Instead use in-line "
"string expressions in the where clause when "
"searching in HDFStore",
FutureWarning, stacklevel=2)
from pandas.io.pytables import Term
return Term(*args, **kwargs)
| bsd-3-clause |
yarikoptic/pystatsmodels | statsmodels/sandbox/tsa/fftarma.py | 4 | 16631 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 19:53:25 2009
Author: josef-pktd
generate arma sample using fft with all the lfilter it looks slow
to get the ma representation first
apply arma filter (in ar representation) to time series to get white noise
but seems slow to be useful for fast estimation for nobs=10000
change/check: instead of using marep, use fft-transform of ar and ma
separately, use ratio check theory is correct and example works
DONE : feels much faster than lfilter
-> use for estimation of ARMA
-> use pade (scipy.misc) approximation to get starting polynomial
from autocorrelation (is autocorrelation of AR(p) related to marep?)
check if pade is fast, not for larger arrays ?
maybe pade doesn't do the right thing for this, not tried yet
scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)
raises LinAlgError: singular matrix
also doesn't have roots inside unit circle ??
-> even without initialization, it might be fast for estimation
-> how do I enforce stationarity and invertibility,
need helper function
get function drop imag if close to zero from numpy/scipy source, where?
"""
import numpy as np
import numpy.fft as fft
#import scipy.fftpack as fft
from scipy import signal
#from try_var_convolve import maxabs
from statsmodels.sandbox.archive.linalg_decomp_1 import OneTimeProperty
from statsmodels.tsa.arima_process import ArmaProcess
#trying to convert old experiments to a class
class ArmaFft(ArmaProcess):
'''fft tools for arma processes
This class contains several methods that are providing the same or similar
returns to try out and test different implementations.
Notes
-----
TODO:
check whether we don't want to fix maxlags, and create new instance if
maxlag changes. usage for different lengths of timeseries ?
or fix frequency and length for fft
check default frequencies w, terminology norw n_or_w
some ffts are currently done without padding with zeros
returns for spectral density methods needs checking, is it always the power
spectrum hw*hw.conj()
normalization of the power spectrum, spectral density: not checked yet, for
example no variance of underlying process is used
'''
def __init__(self, ar, ma, n):
#duplicates now that are subclassing ArmaProcess
super(ArmaFft, self).__init__(ar, ma)
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.nobs = n
#could make the polynomials into cached attributes
self.arpoly = np.polynomial.Polynomial(ar)
self.mapoly = np.polynomial.Polynomial(ma)
self.nar = len(ar) #1d only currently
self.nma = len(ma)
if self.nar > 1:
self.arroots = self.arpoly.roots()
else:
self.arroots = np.array([])
if self.nma > 1:
self.maroots = self.mapoly.roots()
else:
self.maroots = np.array([])
def padarr(self, arr, maxlag, atend=True):
'''pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : boolean
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy.
'''
if atend:
return np.r_[arr, np.zeros(maxlag-len(arr))]
else:
return np.r_[np.zeros(maxlag-len(arr)), arr]
def pad(self, maxlag):
'''construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients
'''
arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]
mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]
return arpad, mapad
def fftar(self, n=None):
'''Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ar, n))
def fftma(self, n):
'''Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ma, n))
#@OneTimeProperty # not while still debugging things
def fftarma(self, n=None):
'''Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial
'''
if n is None:
n = self.nobs
return (self.fftma(n) / self.fftar(n))
def spd(self, npos):
'''raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft
'''
n = npos
w = fft.fftfreq(2*n) * 2 * np.pi
hw = self.fftarma(2*n) #not sure, need to check normalization
#return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #doesn't show in plot
return (hw*hw.conj()).real * 0.5 / np.pi, w
def spdshift(self, n):
'''power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
mapadded = self.padarr(self.ma, n)
arpadded = self.padarr(self.ar, n)
hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))
#return np.abs(spd)[n//2-1:]
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(n//2-1, None, None)
#return (hw*hw.conj()).real[wslice], w[wslice]
return (hw*hw.conj()).real, w
def spddirect(self, n):
'''power spectral density using padding to length n done by fft
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
#abs looks wrong
hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(None, n//2, None)
#return (np.abs(hw)**2)[wslice], w[wslice]
return (np.abs(hw)**2) * 0.5/np.pi, w
def _spddirect2(self, n):
'''this looks bad, maybe with an fftshift
'''
#size = s1+s2-1
hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)
/ fft.fft(np.r_[self.ar[::-1],self.ar], n))
return (hw*hw.conj()) #.real[n//2-1:]
def spdroots(self, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
'''
return self.spdroots_(self.arroots, self.maroots, w)
def spdroots_(self, arroots, maroots, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
Parameters
----------
arroots : ndarray
roots of ar (denominator) lag-polynomial
maroots : ndarray
roots of ma (numerator) lag-polynomial
w : array_like
frequencies for which spd is calculated
Notes
-----
this should go into a function
'''
w = np.atleast_2d(w).T
cosw = np.cos(w)
#Greene 5th edt. p626, section 20.2.7.a.
maroots = 1./maroots
arroots = 1./arroots
num = 1 + maroots**2 - 2* maroots * cosw
den = 1 + arroots**2 - 2* arroots * cosw
#print 'num.shape, den.shape', num.shape, den.shape
hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog
return np.squeeze(hw), w.squeeze()
def spdpoly(self, w, nma=50):
'''spectral density from MA polynomial representation for ARMA process
References
----------
Cochrane, section 8.3.3
'''
mpoly = np.polynomial.Polynomial(self.arma2ma(nma))
hw = mpoly(np.exp(1j * w))
spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)
return spd, w
def filter(self, x):
'''
filter a timeseries with the ARMA filter
padding with zero is missing, in example I needed the padding to get
initial conditions identical to direct filter
Initial filtered observations differ from filter2 and signal.lfilter, but
at end they are the same.
See Also
--------
tsa.filters.fftconvolve
'''
n = x.shape[0]
if n == self.fftarma:
fftarma = self.fftarma
else:
fftarma = self.fftma(n) / self.fftar(n)
tmpfft = fftarma * fft.fft(x)
return fft.ifft(tmpfft)
def filter2(self, x, pad=0):
'''filter a time series using fftconvolve3 with ARMA filter
padding of x currently works only if x is 1d
in example it produces same observations at beginning as lfilter even
without padding.
TODO: this returns 1 additional observation at the end
'''
from statsmodels.tsa.filters import fftconvolve3
if not pad:
pass
elif pad == 'auto':
#just guessing how much padding
x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)
else:
x = self.padarr(x, x.shape[0] + int(pad), atend=False)
return fftconvolve3(x, self.ma, self.ar)
def acf2spdfreq(self, acovf, nfreq=100, w=None):
'''
not really a method
just for comparison, not efficient for large n or long acf
this is also similarly use in tsa.stattools.periodogram with window
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)[:, None]
nac = len(acovf)
hw = 0.5 / np.pi * (acovf[0] +
2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))
return hw
def invpowerspd(self, n):
'''autocovariance from spectral density
scaling is correct, but n needs to be large for numerical accuracy
maybe padding with zero in fft would be faster
without slicing it returns 2-sided autocovariance with fftshift
>>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
>>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
'''
hw = self.fftarma(n)
return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]
def spdmapoly(self, w, twosided=False):
'''ma only, need division for ar, use LagPolynomial
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)
return 0.5 / np.pi * self.mapoly(np.exp(w*1j))
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
rvs = self.generate_sample(size=100, burnin=500)
acf = self.acf(nacf)[:nacf] #TODO: check return length
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq)
spdr, wr = self.spdroots(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
ax.plot(rvs)
ax.set_title('Random Sample \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,3)
ax.plot(wr, spdr)
ax.set_title('Power Spectrum \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation \nar=%s, ma=%s' % (self.ar, self.ma))
return fig
def spdar1(ar, w):
if np.ndim(ar) == 0:
rho = ar
else:
rho = -ar[1]
return 0.5 / np.pi /(1 + rho*rho - 2 * rho * np.cos(w))
if __name__ == '__main__':
def maxabs(x,y):
return np.max(np.abs(x-y))
nobs = 200 #10000
ar = [1, 0.0]
ma = [1, 0.0]
ar2 = np.zeros(nobs)
ar2[:2] = [1, -0.9]
uni = np.zeros(nobs)
uni[0]=1.
#arrep = signal.lfilter(ma, ar, ar2)
#marep = signal.lfilter([1],arrep, uni)
# same faster:
arcomb = np.convolve(ar, ar2, mode='same')
marep = signal.lfilter(ma,arcomb, uni) #[len(ma):]
print marep[:10]
mafr = fft.fft(marep)
rvs = np.random.normal(size=nobs)
datafr = fft.fft(rvs)
y = fft.ifft(mafr*datafr)
print np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]],rowvar=0)
arrep = signal.lfilter([1],marep, uni)
print arrep[:20] # roundtrip to ar
arfr = fft.fft(arrep)
yfr = fft.fft(y)
x = fft.ifft(arfr*yfr).real #imag part is e-15
# the next two are equal, roundtrip works
print x[:5]
print rvs[:5]
print np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]],rowvar=0)
# ARMA filter using fft with ratio of fft of ma/ar lag polynomial
# seems much faster than using lfilter
#padding, note arcomb is already full length
arcombp = np.zeros(nobs)
arcombp[:len(arcomb)] = arcomb
map_ = np.zeros(nobs) #rename: map was shadowing builtin
map_[:len(ma)] = ma
ar0fr = fft.fft(arcombp)
ma0fr = fft.fft(map_)
y2 = fft.ifft(ma0fr/ar0fr*datafr)
#the next two are (almost) equal in real part, almost zero but different in imag
print y2[:10]
print y[:10]
print maxabs(y, y2) # from chfdiscrete
#1.1282071239631782e-014
ar = [1, -0.4]
ma = [1, 0.2]
arma1 = ArmaFft([1, -0.5,0,0,0,00, -0.7, 0.3], [1, 0.8], nobs)
nfreq = nobs
w = np.linspace(0, np.pi, nfreq)
w2 = np.linspace(0, 2*np.pi, nfreq)
import matplotlib.pyplot as plt
plt.close('all')
plt.figure()
spd1, w1 = arma1.spd(2**10)
print spd1.shape
_ = plt.plot(spd1)
plt.title('spd fft complex')
plt.figure()
spd2, w2 = arma1.spdshift(2**10)
print spd2.shape
_ = plt.plot(w2, spd2)
plt.title('spd fft shift')
plt.figure()
spd3, w3 = arma1.spddirect(2**10)
print spd3.shape
_ = plt.plot(w3, spd3)
plt.title('spd fft direct')
plt.figure()
spd3b = arma1._spddirect2(2**10)
print spd3b.shape
_ = plt.plot(spd3b)
plt.title('spd fft direct mirrored')
plt.figure()
spdr, wr = arma1.spdroots(w)
print spdr.shape
plt.plot(w, spdr)
plt.title('spd from roots')
plt.figure()
spdar1_ = spdar1(arma1.ar, w)
print spdar1_.shape
_ = plt.plot(w, spdar1_)
plt.title('spd ar1')
plt.figure()
wper, spdper = arma1.periodogram(nfreq)
print spdper.shape
_ = plt.plot(w, spdper)
plt.title('periodogram')
startup = 1000
rvs = arma1.generate_sample(startup+10000)[startup:]
import matplotlib.mlab as mlb
plt.figure()
sdm, wm = mlb.psd(x)
print 'sdm.shape', sdm.shape
sdm = sdm.ravel()
plt.plot(wm, sdm)
plt.title('matplotlib')
from nitime.algorithms import LD_AR_est
#yule_AR_est(s, order, Nfreqs)
wnt, spdnt = LD_AR_est(rvs, 10, 512)
plt.figure()
print 'spdnt.shape', spdnt.shape
_ = plt.plot(spdnt.ravel())
print spdnt[:10]
plt.title('nitime')
fig = plt.figure()
arma1.plot4(fig)
#plt.show()
| bsd-3-clause |
statkclee/ThinkStats2 | code/analytic.py | 69 | 6265 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import numpy as np
import pandas
import nsfg
import thinkplot
import thinkstats2
def ParetoMedian(xmin, alpha):
"""Computes the median of a Pareto distribution."""
return xmin * pow(2, 1/alpha)
def MakeExpoCdf():
"""Generates a plot of the exponential CDF."""
thinkplot.PrePlot(3)
for lam in [2.0, 1, 0.5]:
xs, ps = thinkstats2.RenderExpoCdf(lam, 0, 3.0, 50)
label = r'$\lambda=%g$' % lam
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_expo_cdf',
title='Exponential CDF',
xlabel='x',
ylabel='CDF')
def ReadBabyBoom(filename='babyboom.dat'):
"""Reads the babyboom data.
filename: string
returns: DataFrame
"""
var_info = [
('time', 1, 8, int),
('sex', 9, 16, int),
('weight_g', 17, 24, int),
('minutes', 25, 32, int),
]
columns = ['name', 'start', 'end', 'type']
variables = pandas.DataFrame(var_info, columns=columns)
variables.end += 1
dct = thinkstats2.FixedWidthVariables(variables, index_base=1)
df = dct.ReadFixedWidth(filename, skiprows=59)
return df
def MakeBabyBoom():
"""Plot CDF of interarrival time on log and linear scales.
"""
# compute the interarrival times
df = ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label='actual')
thinkplot.PrePlot(cols=2)
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='minutes',
ylabel='CDF',
legend=False)
thinkplot.SubPlot(2)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel='minutes',
ylabel='CCDF',
yscale='log',
legend=False)
thinkplot.Save(root='analytic_interarrivals',
legend=False)
def MakeParetoCdf():
"""Generates a plot of the Pareto CDF."""
xmin = 0.5
thinkplot.PrePlot(3)
for alpha in [2.0, 1.0, 0.5]:
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 10.0, n=100)
thinkplot.Plot(xs, ps, label=r'$\alpha=%g$' % alpha)
thinkplot.Save(root='analytic_pareto_cdf',
title='Pareto CDF',
xlabel='x',
ylabel='CDF')
def MakeParetoCdf2():
"""Generates a plot of the CDF of height in Pareto World."""
xmin = 100
alpha = 1.7
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 1000.0, n=100)
thinkplot.Plot(xs, ps)
thinkplot.Save(root='analytic_pareto_height',
title='Pareto CDF',
xlabel='height (cm)',
ylabel='CDF',
legend=False)
def MakeNormalCdf():
"""Generates a plot of the normal CDF."""
thinkplot.PrePlot(3)
mus = [1.0, 2.0, 3.0]
sigmas = [0.5, 0.4, 0.3]
for mu, sigma in zip(mus, sigmas):
xs, ps = thinkstats2.RenderNormalCdf(mu=mu, sigma=sigma,
low=-1.0, high=4.0)
label = r'$\mu=%g$, $\sigma=%g$' % (mu, sigma)
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_normal_cdf',
title='Normal CDF',
xlabel='x',
ylabel='CDF',
loc=2)
def MakeNormalModel(weights):
"""Plot the CDF of birthweights with a normal model."""
# estimate parameters: trimming outliers yields a better fit
mu, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
print('Mean, Var', mu, var)
# plot the model
sigma = math.sqrt(var)
print('Sigma', sigma)
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=12.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
# plot the data
cdf = thinkstats2.Cdf(weights, label='data')
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Save(root='analytic_birthwgt_model',
title='Birth weights',
xlabel='birth weight (lbs)',
ylabel='CDF')
def MakeExampleNormalPlot():
"""Generates a sample normal probability plot.
"""
n = 1000
thinkplot.PrePlot(3)
mus = [0, 1, 5]
sigmas = [1, 1, 2]
for mu, sigma in zip(mus, sigmas):
sample = np.random.normal(mu, sigma, n)
xs, ys = thinkstats2.NormalProbability(sample)
label = '$\mu=%d$, $\sigma=%d$' % (mu, sigma)
thinkplot.Plot(xs, ys, label=label)
thinkplot.Save(root='analytic_normal_prob_example',
title='Normal probability plot',
xlabel='standard normal sample',
ylabel='sample values')
def MakeNormalPlot(weights, term_weights):
"""Generates a normal probability plot of birth weights."""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color='0.8')
thinkplot.PrePlot(2)
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='all live')
xs, ys = thinkstats2.NormalProbability(term_weights)
thinkplot.Plot(xs, ys, label='full term')
thinkplot.Save(root='analytic_birthwgt_normal',
title='Normal probability plot',
xlabel='Standard deviations from mean',
ylabel='Birth weight (lbs)')
def main():
thinkstats2.RandomSeed(18)
MakeExampleNormalPlot()
# make the analytic CDFs
MakeExpoCdf()
MakeBabyBoom()
MakeParetoCdf()
MakeParetoCdf2()
MakeNormalCdf()
# test the distribution of birth weights for normality
preg = nsfg.ReadFemPreg()
full_term = preg[preg.prglngth >= 37]
weights = preg.totalwgt_lb.dropna()
term_weights = full_term.totalwgt_lb.dropna()
MakeNormalModel(weights)
MakeNormalPlot(weights, term_weights)
if __name__ == "__main__":
main()
| gpl-3.0 |
cbertinato/pandas | pandas/tests/indexes/multi/test_constructor.py | 1 | 22051 | from collections import OrderedDict
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import Index, MultiIndex, date_range
import pandas.util.testing as tm
def test_constructor_single_level():
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
codes=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels():
msg = "non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=[], codes=[])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=[])
with pytest.raises(TypeError, match=msg):
MultiIndex(codes=[])
def test_constructor_nonhashable_names():
# GH 20527
levels = [[1, 2], ['one', 'two']]
codes = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = (['foo'], ['bar'])
msg = r"MultiIndex\.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=levels, codes=codes, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], ['one', 'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
with pytest.raises(TypeError, match=msg):
mi.rename(names=renamed)
# With .set_names()
with pytest.raises(TypeError, match=msg):
mi.set_names(names=renamed)
def test_constructor_mismatched_codes_levels(idx):
codes = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
msg = "Length of levels and codes must be the same"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=levels, codes=codes)
length_error = (r"On level 0, code max \(3\) >= length of level \(1\)\."
" NOTE: this index is in an inconsistent state")
label_error = r"Unequal code lengths: \[4, 2\]"
code_value_error = r"On level 0, code value \(-2\) < -1"
# important to check that it's looking at the right thing.
with pytest.raises(ValueError, match=length_error):
MultiIndex(levels=[['a'], ['b']],
codes=[[0, 1, 2, 3], [0, 3, 4, 1]])
with pytest.raises(ValueError, match=label_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, 0, 0, 0], [0, 0]])
# external API
with pytest.raises(ValueError, match=length_error):
idx.copy().set_levels([['a'], ['b']])
with pytest.raises(ValueError, match=label_error):
idx.copy().set_codes([[0, 0, 0, 0], [0, 0]])
# test set_codes with verify_integrity=False
# the setting should not raise any value error
idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]],
verify_integrity=False)
# code value smaller than -1
with pytest.raises(ValueError, match=code_value_error):
MultiIndex(levels=[['a'], ['b']], codes=[[0, -2], [0, 0]])
def test_na_levels():
# GH26408
# test if codes are re-assigned value -1 for levels
# with mising values (NaN, NaT, None)
result = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, None, pd.NaT, 128, 2]],
codes=[[-1, -1, -1, -1, 3, 4]])
tm.assert_index_equal(result, expected)
result = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[0, -1, 1, 2, 3, 4]])
expected = MultiIndex(levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[-1, -1, 1, -1, 3, -1]])
tm.assert_index_equal(result, expected)
# verify set_levels and set_codes
result = MultiIndex(
levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]]).set_levels(
[[np.nan, 's', pd.NaT, 128, None]])
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, 's', pd.NaT, 128, None]],
codes=[[1, 2, 2, 2, 2, 2]]).set_codes(
[[0, -1, 1, 2, 3, 4]])
tm.assert_index_equal(result, expected)
def test_labels_deprecated(idx):
# GH23752
with tm.assert_produces_warning(FutureWarning):
MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
with tm.assert_produces_warning(FutureWarning):
idx.labels
def test_copy_in_constructor():
levels = np.array(["a", "b", "c"])
codes = np.array([1, 1, 2, 0, 0, 1, 1])
val = codes[0]
mi = MultiIndex(levels=[levels, levels], codes=[codes, codes],
copy=True)
assert mi.codes[0][0] == val
codes[0] = 15
assert mi.codes[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
# ----------------------------------------------------------------------------
# from_arrays
# ----------------------------------------------------------------------------
def test_from_arrays(idx):
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(idx):
# GH 18434
arrays = [np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)]
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=idx.names)
tm.assert_index_equal(result, idx)
# invalid iterator input
msg = "Input must be a list / sequence of array-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(0)
def test_from_arrays_tuples(idx):
arrays = tuple(tuple(np.asarray(lev).take(level_codes))
for lev, level_codes in zip(idx.levels, idx.codes))
# tuple of tuples as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
def test_from_arrays_index_series_datetimetz():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta():
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period():
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed():
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical():
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, codes=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('invalid_sequence_of_arrays', [
1, [1], [1, 2], [[1], 2], [1, [2]], 'a', ['a'], ['a', 'b'], [['a'], 'b'],
(1,), (1, 2), ([1], 2), (1, [2]), 'a', ('a',), ('a', 'b'), (['a'], 'b'),
[(1,), 2], [1, (2,)], [('a',), 'b'],
((1,), 2), (1, (2,)), (('a',), 'b')
])
def test_from_arrays_invalid_input(invalid_sequence_of_arrays):
msg = "Input must be a list / sequence of array-likes"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays)
@pytest.mark.parametrize('idx1, idx2', [
([1, 2, 3], ['a', 'b']),
([], ['a', 'b']),
([1, 2, 3], [])
])
def test_from_arrays_different_lengths(idx1, idx2):
# see gh-13599
msg = '^all arrays must be same length$'
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays([idx1, idx2])
# ----------------------------------------------------------------------------
# from_tuples
# ----------------------------------------------------------------------------
def test_from_tuples():
msg = 'Cannot infer number of levels from empty list'
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples([])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
codes=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator():
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
codes=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
msg = 'Input must be a list / sequence of tuple-likes.'
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples(0)
def test_from_tuples_empty():
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_index_values(idx):
result = MultiIndex.from_tuples(idx)
assert (result.values == idx.values).all()
def test_tuples_with_name_string():
# GH 15110 and GH 14848
li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)]
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
pd.Index(li, name='abc')
with pytest.raises(ValueError, match=msg):
pd.Index(li, name='a')
def test_from_tuples_with_tuple_label():
# GH 15457
expected = pd.DataFrame([[2, 1, 2], [4, (1, 2), 3]],
columns=['a', 'b', 'c']).set_index(['a', 'b'])
idx = pd.MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=('a', 'b'))
result = pd.DataFrame([2, 3], columns=['c'], index=idx)
tm.assert_frame_equal(expected, result)
# ----------------------------------------------------------------------------
# from_product
# ----------------------------------------------------------------------------
def test_from_product_empty_zero_levels():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_product([])
def test_from_product_empty_one_level():
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
@pytest.mark.parametrize('first, second', [
([], []),
(['foo', 'bar', 'baz'], []),
([], ['a', 'b', 'c']),
])
def test_from_product_empty_two_levels(first, second):
names = ['A', 'B']
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
codes=[[], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('N', list(range(4)))
def test_from_product_empty_three_levels(N):
# GH12258
names = ['A', 'B', 'C']
lvl2 = list(range(N))
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
codes=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('invalid_input', [
1,
[1],
[1, 2],
[[1], 2],
'a',
['a'],
['a', 'b'],
[['a'], 'b'],
])
def test_from_product_invalid_input(invalid_input):
msg = (r"Input must be a list / sequence of iterables|"
"Input must be list-like")
with pytest.raises(TypeError, match=msg):
MultiIndex.from_product(iterables=invalid_input)
def test_from_product_datetimeindex():
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([
(1, pd.Timestamp('2000-01-01')),
(1, pd.Timestamp('2000-01-02')),
(2, pd.Timestamp('2000-01-01')),
(2, pd.Timestamp('2000-01-02')),
])
tm.assert_numpy_array_equal(mi.values, etalon)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('f', [
lambda x: x,
lambda x: pd.Series(x),
lambda x: x.values
])
def test_from_product_index_series_categorical(ordered, f):
# GH13743
first = ['foo', 'bar']
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
result = pd.MultiIndex.from_product([first, f(idx)])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_from_product():
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator():
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
msg = "Input must be a list / sequence of iterables."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_product(0)
def test_create_index_existing_name(idx):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
index = idx
index.names = ['foo', 'bar']
result = pd.Index(index)
expected = Index(
Index([
('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'
),
names=['foo', 'bar']
)
tm.assert_index_equal(result, expected)
result = pd.Index(index, names=['A', 'B'])
expected = Index(
Index([
('foo', 'one'), ('foo', 'two'),
('bar', 'one'), ('baz', 'two'),
('qux', 'one'), ('qux', 'two')],
dtype='object'
),
names=['A', 'B']
)
tm.assert_index_equal(result, expected)
# ----------------------------------------------------------------------------
# from_frame
# ----------------------------------------------------------------------------
def test_from_frame():
# GH 22420
df = pd.DataFrame([['a', 'a'], ['a', 'b'], ['b', 'a'], ['b', 'b']],
columns=['L1', 'L2'])
expected = pd.MultiIndex.from_tuples([('a', 'a'), ('a', 'b'),
('b', 'a'), ('b', 'b')],
names=['L1', 'L2'])
result = pd.MultiIndex.from_frame(df)
tm.assert_index_equal(expected, result)
@pytest.mark.parametrize('non_frame', [
pd.Series([1, 2, 3, 4]),
[1, 2, 3, 4],
[[1, 2], [3, 4], [5, 6]],
pd.Index([1, 2, 3, 4]),
np.array([[1, 2], [3, 4], [5, 6]]),
27
])
def test_from_frame_error(non_frame):
# GH 22420
with pytest.raises(TypeError, match='Input must be a DataFrame'):
pd.MultiIndex.from_frame(non_frame)
def test_from_frame_dtype_fidelity():
# GH 22420
df = pd.DataFrame(OrderedDict([
('dates', pd.date_range('19910905', periods=6, tz='US/Eastern')),
('a', [1, 1, 1, 2, 2, 2]),
('b', pd.Categorical(['a', 'a', 'b', 'b', 'c', 'c'], ordered=True)),
('c', ['x', 'x', 'y', 'z', 'x', 'y'])
]))
original_dtypes = df.dtypes.to_dict()
expected_mi = pd.MultiIndex.from_arrays([
pd.date_range('19910905', periods=6, tz='US/Eastern'),
[1, 1, 1, 2, 2, 2],
pd.Categorical(['a', 'a', 'b', 'b', 'c', 'c'], ordered=True),
['x', 'x', 'y', 'z', 'x', 'y']
], names=['dates', 'a', 'b', 'c'])
mi = pd.MultiIndex.from_frame(df)
mi_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)}
tm.assert_index_equal(expected_mi, mi)
assert original_dtypes == mi_dtypes
@pytest.mark.parametrize('names_in,names_out', [
(None, [('L1', 'x'), ('L2', 'y')]),
(['x', 'y'], ['x', 'y']),
])
def test_from_frame_valid_names(names_in, names_out):
# GH 22420
df = pd.DataFrame([['a', 'a'], ['a', 'b'], ['b', 'a'], ['b', 'b']],
columns=pd.MultiIndex.from_tuples([('L1', 'x'),
('L2', 'y')]))
mi = pd.MultiIndex.from_frame(df, names=names_in)
assert mi.names == names_out
@pytest.mark.parametrize('names,expected_error_msg', [
('bad_input', "Names should be list-like for a MultiIndex"),
(['a', 'b', 'c'],
"Length of names must match number of levels in MultiIndex")
])
def test_from_frame_invalid_names(names, expected_error_msg):
# GH 22420
df = pd.DataFrame([['a', 'a'], ['a', 'b'], ['b', 'a'], ['b', 'b']],
columns=pd.MultiIndex.from_tuples([('L1', 'x'),
('L2', 'y')]))
with pytest.raises(ValueError, match=expected_error_msg):
pd.MultiIndex.from_frame(df, names=names)
| bsd-3-clause |
zednis/rpinfo2 | ingest/ingest.py | 1 | 4942 | import pandas
import json
from bs4 import BeautifulSoup
import re
courses_csv = "courses-list-2016-04-24_20.25.23.xlsx"
programs_csv = "programs-list-2016-04-24_20.27.21.xlsx"
def get_metadata(id):
return {"index": {"_index": "rpinfo2", "_type": "course", "_id": id}}
def get_course_level(code):
digit = int(code[0])
if digit >= 5:
return "Graduate"
else:
return "Undergraduate"
def get_courses():
df = pandas.read_excel("resources/" + courses_csv, sheetname='Worksheet')
courses = []
when_offered = set()
credit_hours = set()
prefixes = set()
for index, row in df.iterrows():
if row["Name"] is not pandas.np.nan:
_prefixes = row["Prefix"].strip().upper().split(" OR ")
_code = row["Code"]
for prefix in _prefixes:
prefixes.add(prefix)
_id = prefix + "-" + _code
subject_code = prefix + " " + _code
course = {"id": _id,
"title": row["Name"],
"prefix": prefix,
"code": _code,
"subjectCode": subject_code,
"level": get_course_level(_code)}
if row["Department Name"] is not pandas.np.nan:
department = {"name": row["Department Name"]}
course["department"] = department
if row["School/College Name"] is not pandas.np.nan:
school = {"name": row["School/College Name"]}
course["school"] = school
# TODO program?
if row["Course Type"] is not pandas.np.nan:
course["courseType"] = row["Course Type"].strip()
if row["Description (Rendered no HTML)"] is not pandas.np.nan:
course["description"] = row["Description (Rendered no HTML)"]
if row["When Offered:"] is not pandas.np.nan:
course["whenOffered"] = row["When Offered:"].strip()
when_offered.add(row["When Offered:"].strip())
if row["Credit Hours:"] is not pandas.np.nan:
course["creditHours"] = row["Credit Hours:"].strip()
credit_hours.add(row["Credit Hours:"].strip())
if row["Prerequisites/Corequisites: (Rendered no HTML)"] is not pandas.np.nan:
# TODO parse prerequisites/corequisites text
course["prerequisites_corequisites"] = row["Prerequisites/Corequisites: (Rendered no HTML)"]
if row["Cross Listed:"] is not pandas.np.nan:
# TODO parse crosslisted text
course["crossListed"] = row["Cross Listed:"]
courses.append(course)
# print(when_offered)
# print(len(when_offered))
# print(credit_hours)
# print(prefixes)
# print(len(prefixes))
return courses
def get_programs():
df = pandas.read_excel("resources/" + programs_csv, sheetname='Worksheet')
programs = []
for index, row in df.iterrows():
program = {"name": row["Program Name"].strip()}
if row["Degree Type"] is not pandas.np.nan:
program["degree_type"] = row["Degree Type"].strip()
if row["Program Type"] is not pandas.np.nan:
program["program_type"] = row["Program Type"].strip()
if row["Entity Name"] is not pandas.np.nan:
program["department"] = row["Entity Name"].strip()
if row["Cores"] is not pandas.np.nan:
cores = get_program_core_courses(row["Cores"].strip())
program["core_courses"] = list(cores)
programs.append(program)
return programs
def get_program_core_courses(html):
core_courses = set()
soup = BeautifulSoup(html, 'html.parser')
pattern = re.compile("[A-Z]{4}\s[0-9]{4}")
for line_item in soup.find_all('li'):
text = line_item.text
for match in pattern.findall(text):
core_courses.add(match)
return core_courses
def main():
courses = get_courses()
programs = get_programs()
for program in programs:
for core_course in program["core_courses"]:
matches = [_course for _course in courses if _course["subjectCode"] == core_course]
for course in matches:
if "core_for" not in course:
course["core_for"] = []
course["core_for"].append(program)
data = []
for course in courses:
_id = course["id"]
data.append(json.dumps(get_metadata(_id)))
data.append(json.dumps(course))
with open("out.bulk", "w") as bulk_file:
bulk_file.write('\n'.join(data)+'\n')
# print(json.dumps(get_programs()))
# print(get_program_core_courses(open("resources/CompSciCores.html")))
if __name__ == "__main__":
main()
| gpl-3.0 |
ibis-project/ibis | ibis/backends/postgres/tests/test_client.py | 1 | 7118 | # Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pandas as pd
import pytest
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
from ibis.backends.base.sql.alchemy import schema_from_table
from ibis.tests.util import assert_equal
pytestmark = pytest.mark.postgres
POSTGRES_TEST_DB = os.environ.get(
'IBIS_TEST_POSTGRES_DATABASE', 'ibis_testing'
)
IBIS_POSTGRES_HOST = os.environ.get('IBIS_TEST_POSTGRES_HOST', 'localhost')
IBIS_POSTGRES_USER = os.environ.get('IBIS_TEST_POSTGRES_USER', 'postgres')
IBIS_POSTGRES_PASS = os.environ.get('IBIS_TEST_POSTGRES_PASSWORD', 'postgres')
def test_table(alltypes):
assert isinstance(alltypes, ir.TableExpr)
def test_array_execute(alltypes):
d = alltypes.limit(10).double_col
s = d.execute()
assert isinstance(s, pd.Series)
assert len(s) == 10
def test_literal_execute(con):
expr = ibis.literal('1234')
result = con.execute(expr)
assert result == '1234'
def test_simple_aggregate_execute(alltypes):
d = alltypes.double_col.sum()
v = d.execute()
assert isinstance(v, float)
def test_list_tables(con):
assert len(con.list_tables()) > 0
assert len(con.list_tables(like='functional')) == 1
def test_compile_verify(alltypes):
unsupported_expr = alltypes.double_col.approx_median()
assert not unsupported_expr.verify()
supported_expr = alltypes.double_col.sum()
assert supported_expr.verify()
def test_database_layer(con, alltypes):
db = con.database()
t = db.functional_alltypes
assert_equal(t, alltypes)
assert db.list_tables() == con.list_tables()
db_schema = con.schema("information_schema")
assert db_schema.list_tables() != con.list_tables()
def test_compile_toplevel():
t = ibis.table([('foo', 'double')], name='t0')
# it works!
expr = t.foo.sum()
result = ibis.postgres.compile(expr)
expected = "SELECT sum(t0.foo) AS sum \nFROM t0 AS t0" # noqa
assert str(result) == expected
def test_list_databases(con):
assert POSTGRES_TEST_DB is not None
assert POSTGRES_TEST_DB in con.list_databases()
def test_list_schemas(con):
assert 'public' in con.list_schemas()
assert 'information_schema' in con.list_schemas()
def test_metadata_is_per_table():
con = ibis.postgres.connect(
host=IBIS_POSTGRES_HOST,
database=POSTGRES_TEST_DB,
user=IBIS_POSTGRES_USER,
password=IBIS_POSTGRES_PASS,
)
assert len(con.meta.tables) == 0
# assert that we reflect only when a table is requested
t = con.table('functional_alltypes') # noqa
assert 'functional_alltypes' in con.meta.tables
assert len(con.meta.tables) == 1
def test_schema_table():
con = ibis.postgres.connect(
host=IBIS_POSTGRES_HOST,
database=POSTGRES_TEST_DB,
user=IBIS_POSTGRES_USER,
password=IBIS_POSTGRES_PASS,
)
# ensure that we can reflect the information schema (which is guaranteed
# to exist)
schema = con.schema('information_schema')
assert isinstance(schema['tables'], ir.TableExpr)
def test_schema_type_conversion():
typespec = [
# name, type, nullable
('json', postgresql.JSON, True, dt.JSON),
('jsonb', postgresql.JSONB, True, dt.JSONB),
('uuid', postgresql.UUID, True, dt.UUID),
('macaddr', postgresql.MACADDR, True, dt.MACADDR),
('inet', postgresql.INET, True, dt.INET),
]
sqla_types = []
ibis_types = []
for name, t, nullable, ibis_type in typespec:
sqla_type = sa.Column(name, t, nullable=nullable)
sqla_types.append(sqla_type)
ibis_types.append((name, ibis_type(nullable=nullable)))
# Create a table with placeholder stubs for JSON, JSONB, and UUID.
engine = sa.create_engine('postgresql://')
table = sa.Table('tname', sa.MetaData(bind=engine), *sqla_types)
# Check that we can correctly create a schema with dt.any for the
# missing types.
schema = schema_from_table(table)
expected = ibis.schema(ibis_types)
assert_equal(schema, expected)
def test_interval_films_schema(con):
t = con.table("films")
assert t.len.type() == dt.Interval(unit="m")
assert t.len.execute().dtype == np.dtype("timedelta64[ns]")
@pytest.mark.parametrize(
("column", "expected_dtype"),
[
# ("a", dt.Interval("Y")),
# ("b", dt.Interval("M")),
("c", dt.Interval("D")),
("d", dt.Interval("h")),
("e", dt.Interval("m")),
("f", dt.Interval("s")),
# ("g", dt.Interval("M")),
("h", dt.Interval("h")),
("i", dt.Interval("m")),
("j", dt.Interval("s")),
("k", dt.Interval("m")),
("l", dt.Interval("s")),
("m", dt.Interval("s")),
],
)
def test_all_interval_types_schema(intervals, column, expected_dtype):
assert intervals[column].type() == expected_dtype
@pytest.mark.parametrize(
("column", "expected_dtype"),
[
# ("a", dt.Interval("Y")),
# ("b", dt.Interval("M")),
("c", dt.Interval("D")),
("d", dt.Interval("h")),
("e", dt.Interval("m")),
("f", dt.Interval("s")),
# ("g", dt.Interval("M")),
("h", dt.Interval("h")),
("i", dt.Interval("m")),
("j", dt.Interval("s")),
("k", dt.Interval("m")),
("l", dt.Interval("s")),
("m", dt.Interval("s")),
],
)
def test_all_interval_types_execute(intervals, column, expected_dtype):
expr = intervals[column]
series = expr.execute()
assert series.dtype == np.dtype("timedelta64[ns]")
@pytest.mark.xfail(
raises=ValueError, reason="Year and month interval types not yet supported"
)
def test_unsupported_intervals(con):
t = con.table("not_supported_intervals")
assert t["a"].type() == dt.Interval("Y")
assert t["b"].type() == dt.Interval("M")
assert t["g"].type() == dt.Interval("M")
@pytest.mark.parametrize('params', [{}, {'database': POSTGRES_TEST_DB}])
def test_create_and_drop_table(con, temp_table, params):
sch = ibis.schema(
[
('first_name', 'string'),
('last_name', 'string'),
('department_name', 'string'),
('salary', 'float64'),
]
)
con.create_table(temp_table, schema=sch, **params)
assert con.table(temp_table, **params) is not None
con.drop_table(temp_table, **params)
with pytest.raises(sa.exc.NoSuchTableError):
con.table(temp_table, **params)
| apache-2.0 |
datapythonista/pandas | pandas/tests/frame/indexing/test_getitem.py | 2 | 11073 | import re
import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
get_dummies,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
class TestGetitem:
def test_getitem_unused_level_raises(self):
# GH#20410
mi = MultiIndex(
levels=[["a_lot", "onlyone", "notevenone"], [1970, ""]],
codes=[[1, 0], [1, 0]],
)
df = DataFrame(-1, index=range(3), columns=mi)
with pytest.raises(KeyError, match="notevenone"):
df["notevenone"]
def test_getitem_periodindex(self):
rng = period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH#1211; smoketest unrelated to the rest of this test
repr(df)
ts = df["1/1/2000"]
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_getitem_list_of_labels_categoricalindex_cols(self):
# GH#16115
cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
expected = DataFrame(
[[1, 0], [0, 1]], dtype="uint8", index=[0, 1], columns=cats
)
dummies = get_dummies(cats)
result = dummies[list(dummies.columns)]
tm.assert_frame_equal(result, expected)
def test_getitem_sparse_column_return_type_and_dtype(self):
# https://github.com/pandas-dev/pandas/issues/23559
data = SparseArray([0, 1])
df = DataFrame({"A": data})
expected = Series(data, name="A")
result = df["A"]
tm.assert_series_equal(result, expected)
# Also check iloc and loc while we're here
result = df.iloc[:, 0]
tm.assert_series_equal(result, expected)
result = df.loc[:, "A"]
tm.assert_series_equal(result, expected)
class TestGetitemListLike:
def test_getitem_list_missing_key(self):
# GH#13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]})
df.columns = ["x", "x", "z"]
# Check that we get the correct value in the KeyError
with pytest.raises(KeyError, match=r"\['y'\] not in index"):
df[["x", "y", "z"]]
def test_getitem_list_duplicates(self):
# GH#1943
df = DataFrame(np.random.randn(4, 4), columns=list("AABC"))
df.columns.name = "foo"
result = df[["B", "C"]]
assert result.columns.name == "foo"
expected = df.iloc[:, 2:]
tm.assert_frame_equal(result, expected)
def test_getitem_dupe_cols(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "a", "b"])
msg = "\"None of [Index(['baf'], dtype='object')] are in the [columns]\""
with pytest.raises(KeyError, match=re.escape(msg)):
df[["baf"]]
@pytest.mark.parametrize(
"idx_type",
[
list,
iter,
Index,
set,
lambda l: dict(zip(l, range(len(l)))),
lambda l: dict(zip(l, range(len(l)))).keys(),
],
ids=["list", "iter", "Index", "set", "dict", "dict_keys"],
)
@pytest.mark.parametrize("levels", [1, 2])
def test_getitem_listlike(self, idx_type, levels, float_frame):
# GH#21294
if levels == 1:
frame, missing = float_frame, "food"
else:
# MultiIndex columns
frame = DataFrame(
np.random.randn(8, 3),
columns=Index(
[("foo", "bar"), ("baz", "qux"), ("peek", "aboo")],
name=("sth", "sth2"),
),
)
missing = ("good", "food")
keys = [frame.columns[1], frame.columns[0]]
idx = idx_type(keys)
idx_check = list(idx_type(keys))
result = frame[idx]
expected = frame.loc[:, idx_check]
expected.columns.names = frame.columns.names
tm.assert_frame_equal(result, expected)
idx = idx_type(keys + [missing])
with pytest.raises(KeyError, match="not in index"):
frame[idx]
def test_getitem_iloc_generator(self):
# GH#39614
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
result = df.iloc[indexer]
expected = DataFrame({"a": [2, 3], "b": [5, 6]}, index=[1, 2])
tm.assert_frame_equal(result, expected)
def test_getitem_iloc_two_dimensional_generator(self):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
result = df.iloc[indexer, 1]
expected = Series([5, 6], name="b", index=[1, 2])
tm.assert_series_equal(result, expected)
class TestGetitemCallable:
def test_getitem_callable(self, float_frame):
# GH#12533
result = float_frame[lambda x: "A"]
expected = float_frame.loc[:, "A"]
tm.assert_series_equal(result, expected)
result = float_frame[lambda x: ["A", "B"]]
expected = float_frame.loc[:, ["A", "B"]]
tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]])
df = float_frame[:3]
result = df[lambda x: [True, False, True]]
expected = float_frame.iloc[[0, 2], :]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_columns_one_level(self):
# GH#29749
df = DataFrame([[1, 2]], columns=[["a", "b"]])
expected = DataFrame([1], columns=[["a"]])
result = df["a"]
tm.assert_frame_equal(result, expected)
result = df.loc[:, "a"]
tm.assert_frame_equal(result, expected)
class TestGetitemBooleanMask:
def test_getitem_bool_mask_categorical_index(self):
df3 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=True),
name="B",
),
)
df4 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=False),
name="B",
),
)
result = df3[df3.index == "a"]
expected = df3.iloc[[]]
tm.assert_frame_equal(result, expected)
result = df4[df4.index == "a"]
expected = df4.iloc[[]]
tm.assert_frame_equal(result, expected)
result = df3[df3.index == 1]
expected = df3.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df4[df4.index == 1]
expected = df4.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
# since we have an ordered categorical
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=True,
# name='B')
result = df3[df3.index < 2]
expected = df3.iloc[[4]]
tm.assert_frame_equal(result, expected)
result = df3[df3.index > 1]
expected = df3.iloc[[]]
tm.assert_frame_equal(result, expected)
# unordered
# cannot be compared
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=False,
# name='B')
msg = "Unordered Categoricals can only compare equality or not"
with pytest.raises(TypeError, match=msg):
df4[df4.index < 2]
with pytest.raises(TypeError, match=msg):
df4[df4.index > 1]
@pytest.mark.parametrize(
"data1,data2,expected_data",
(
(
[[1, 2], [3, 4]],
[[0.5, 6], [7, 8]],
[[np.nan, 3.0], [np.nan, 4.0], [np.nan, 7.0], [6.0, 8.0]],
),
(
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[np.nan, 3.0], [np.nan, 4.0], [5, 7], [6, 8]],
),
),
)
def test_getitem_bool_mask_duplicate_columns_mixed_dtypes(
self,
data1,
data2,
expected_data,
):
# GH#31954
df1 = DataFrame(np.array(data1))
df2 = DataFrame(np.array(data2))
df = concat([df1, df2], axis=1)
result = df[df > 2]
exdict = {i: np.array(col) for i, col in enumerate(expected_data)}
expected = DataFrame(exdict).rename(columns={2: 0, 3: 1})
tm.assert_frame_equal(result, expected)
@pytest.fixture
def df_dup_cols(self):
dups = ["A", "A", "C", "D"]
df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype="float64")
return df
def test_getitem_boolean_frame_unaligned_with_duplicate_columns(self, df_dup_cols):
# `df.A > 6` is a DataFrame with a different shape from df
# boolean with the duplicate raises
df = df_dup_cols
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df[df.A > 6]
def test_getitem_boolean_series_with_duplicate_columns(self, df_dup_cols):
# boolean indexing
# GH#4879
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
expected = df[df.C > 6]
expected.columns = df_dup_cols.columns
df = df_dup_cols
result = df[df.C > 6]
tm.assert_frame_equal(result, expected)
result.dtypes
str(result)
def test_getitem_boolean_frame_with_duplicate_columns(self, df_dup_cols):
# where
df = DataFrame(
np.arange(12).reshape(3, 4), columns=["A", "B", "C", "D"], dtype="float64"
)
# `df > 6` is a DataFrame with the same shape+alignment as df
expected = df[df > 6]
expected.columns = df_dup_cols.columns
df = df_dup_cols
result = df[df > 6]
tm.assert_frame_equal(result, expected)
result.dtypes
str(result)
def test_getitem_empty_frame_with_boolean(self):
# Test for issue GH#11859
df = DataFrame()
df2 = df[df > 0]
tm.assert_frame_equal(df, df2)
class TestGetitemSlice:
def test_getitem_slice_float64(self, frame_or_series):
values = np.arange(10.0, 50.0, 2)
index = Index(values)
start, end = values[[5, 15]]
data = np.random.randn(20, 3)
if frame_or_series is not DataFrame:
data = data[:, 0]
obj = frame_or_series(data, index=index)
result = obj[start:end]
expected = obj.iloc[5:16]
tm.assert_equal(result, expected)
result = obj.loc[start:end]
tm.assert_equal(result, expected)
| bsd-3-clause |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/core/nanops.py | 9 | 23144 | import itertools
import functools
import numpy as np
try:
import bottleneck as bn
_USE_BOTTLENECK = True
except ImportError: # pragma: no cover
_USE_BOTTLENECK = False
import pandas.hashtable as _hash
from pandas import compat, lib, algos, tslib
from pandas.compat import builtins
from pandas.core.common import (isnull, notnull, _values_from_object,
_maybe_upcast_putmask,
ensure_float, _ensure_float64,
_ensure_int64, _ensure_object,
is_float, is_integer, is_complex,
is_float_dtype,
is_complex_dtype, is_integer_dtype,
is_bool_dtype, is_object_dtype,
is_datetime64_dtype, is_timedelta64_dtype,
is_datetime_or_timedelta_dtype, _get_dtype,
is_int_or_datetime_dtype, is_any_int_dtype,
_int64_max)
class disallow(object):
def __init__(self, *dtypes):
super(disallow, self).__init__()
self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes)
def check(self, obj):
return hasattr(obj, 'dtype') and issubclass(obj.dtype.type,
self.dtypes)
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, compat.itervalues(kwargs))
if any(self.check(obj) for obj in obj_iter):
raise TypeError('reduction operation {0!r} not allowed for '
'this dtype'.format(f.__name__.replace('nan',
'')))
try:
return f(*args, **kwargs)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(args[0]):
raise TypeError(e)
raise
return _f
class bottleneck_switch(object):
def __init__(self, zero_value=None, **kwargs):
self.zero_value = zero_value
self.kwargs = kwargs
def __call__(self, alt):
bn_name = alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
for k, v in compat.iteritems(self.kwargs):
if k not in kwds:
kwds[k] = v
try:
if self.zero_value is not None and values.size == 0:
if values.ndim == 1:
# wrap the 0's if needed
if is_timedelta64_dtype(values):
return lib.Timedelta(0)
return 0
else:
result_shape = (values.shape[:axis] +
values.shape[axis + 1:])
result = np.empty(result_shape)
result.fill(0)
return result
if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype,
bn_name):
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except Exception:
try:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(values):
raise TypeError(e)
raise
return result
return f
def _bn_ok_dtype(dt, name):
# Bottleneck chokes on datetime64
if (not is_object_dtype(dt) and
not is_datetime_or_timedelta_dtype(dt)):
# bottleneck does not properly upcast during the sum
# so can overflow
if name == 'nansum':
if dt.itemsize < 8:
return False
return True
return False
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == 'f8':
return lib.has_infs_f8(result.ravel())
elif result.dtype == 'f4':
return lib.has_infs_f4(result.ravel())
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError) as e:
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == '+inf':
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return tslib.iNaT
else:
if fill_value_typ == '+inf':
# need the max int here
return _int64_max
else:
return tslib.iNaT
def _get_values(values, skipna, fill_value=None, fill_value_typ=None,
isfinite=False, copy=True):
""" utility to get the values view, mask, dtype
if necessary copy and mask using the specified fill_value
copy = True will force the copy """
values = _values_from_object(values)
if isfinite:
mask = _isfinite(values)
else:
mask = isnull(values)
dtype = values.dtype
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(dtype, fill_value=fill_value,
fill_value_typ=fill_value_typ)
if skipna:
if copy:
values = values.copy()
if dtype_ok:
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, changed = _maybe_upcast_putmask(values, mask, fill_value)
elif copy:
values = values.copy()
values = _view_if_needed(values)
# return a platform independent precision dtype
dtype_max = dtype
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
dtype_max = np.int64
elif is_float_dtype(dtype):
dtype_max = np.float64
return values, mask, dtype, dtype_max
def _isfinite(values):
if is_datetime_or_timedelta_dtype(values):
return isnull(values)
if (is_complex_dtype(values) or is_float_dtype(values) or
is_integer_dtype(values) or is_bool_dtype(values)):
return ~np.isfinite(values)
return ~np.isfinite(values.astype('float64'))
def _na_ok_dtype(dtype):
return not is_int_or_datetime_dtype(dtype)
def _view_if_needed(values):
if is_datetime_or_timedelta_dtype(values):
return values.view(np.int64)
return values
def _wrap_results(result, dtype):
""" wrap our results if needed """
if is_datetime64_dtype(dtype):
if not isinstance(result, np.ndarray):
result = lib.Timestamp(result)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > _int64_max:
raise ValueError("overflow in timedelta operation")
result = lib.Timedelta(result, unit='ns')
else:
result = result.astype('i8').view(dtype)
return result
def nanany(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna)
return values.any(axis)
def nanall(values, axis=None, skipna=True):
values, mask, dtype, _ = _get_values(values, skipna, True, copy=skipna)
return values.all(axis)
@disallow('M8')
@bottleneck_switch(zero_value=0)
def nansum(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
elif is_timedelta64_dtype(dtype):
dtype_sum = np.float64
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask)
return _wrap_results(the_sum, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmean(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna, 0)
dtype_sum = dtype_max
dtype_count = np.float64
if is_integer_dtype(dtype) or is_timedelta64_dtype(dtype):
dtype_sum = np.float64
elif is_float_dtype(dtype):
dtype_sum = dtype
dtype_count = dtype
count = _get_counts(mask, axis, dtype=dtype_count)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
if axis is not None and getattr(the_sum, 'ndim', False):
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype)
@disallow('M8')
@bottleneck_switch()
def nanmedian(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(values, skipna)
def get_median(x):
mask = notnull(x)
if not skipna and not mask.all():
return np.nan
return algos.median(_values_from_object(x[mask]))
if not is_float_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if axis is None:
values = values.ravel()
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
return _wrap_results(np.apply_along_axis(get_median, axis, values), dtype)
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
shp = np.array(values.shape)
dims = np.arange(values.ndim)
ret = np.empty(shp[dims != axis])
ret.fill(np.nan)
return _wrap_results(ret, dtype)
# otherwise return a scalar value
return _wrap_results(get_median(values) if notempty else np.nan, dtype)
def _get_counts_nanvar(mask, axis, ddof, dtype=float):
dtype = _get_dtype(dtype)
count = _get_counts(mask, axis, dtype=dtype)
d = count - dtype.type(ddof)
# always return NaN, never inf
if np.isscalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
mask2 = count <= ddof
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
return count, d
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanstd(values, axis=None, skipna=True, ddof=1):
result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof))
return _wrap_results(result, values.dtype)
@disallow('M8')
@bottleneck_switch(ddof=1)
def nanvar(values, axis=None, skipna=True, ddof=1):
dtype = values.dtype
mask = isnull(values)
if is_any_int_dtype(values):
values = values.astype('f8')
values[mask] = np.nan
if is_float_dtype(values):
count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype)
else:
count, d = _get_counts_nanvar(mask, axis, ddof)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
# xref GH10242
# Compute variance via two-pass algorithm, which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations.
#
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
if axis is not None:
avg = np.expand_dims(avg, axis)
sqr = _ensure_numeric((avg - values) ** 2)
np.putmask(sqr, mask, 0)
result = sqr.sum(axis=axis, dtype=np.float64) / d
# Return variance as np.float64 (the datatype used in the accumulator),
# unless we were dealing with a float array, in which case use the same
# precision as the original values array.
if is_float_dtype(dtype):
result = result.astype(dtype)
return _wrap_results(result, values.dtype)
@disallow('M8', 'm8')
def nansem(values, axis=None, skipna=True, ddof=1):
var = nanvar(values, axis, skipna, ddof=ddof)
mask = isnull(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count, _ = _get_counts_nanvar(mask, axis, ddof, values.dtype)
var = nanvar(values, axis, skipna, ddof=ddof)
return np.sqrt(var) / np.sqrt(count)
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch()
def reduction(values, axis=None, skipna=True):
values, mask, dtype, dtype_max = _get_values(
values,
skipna,
fill_value_typ=fill_value_typ,
)
if ((axis is not None and values.shape[axis] == 0)
or values.size == 0):
try:
result = getattr(values, meth)(axis, dtype=dtype_max)
result.fill(np.nan)
except:
result = np.nan
else:
result = getattr(values, meth)(axis)
result = _wrap_results(result, dtype)
return _maybe_null_out(result, axis, mask)
reduction.__name__ = 'nan' + meth
return reduction
nanmin = _nanminmax('min', fill_value_typ='+inf')
nanmax = _nanminmax('max', fill_value_typ='-inf')
def nanargmax(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='-inf',
isfinite=True)
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
def nanargmin(values, axis=None, skipna=True):
"""
Returns -1 in the NA case
"""
values, mask, dtype, _ = _get_values(values, skipna, fill_value_typ='+inf',
isfinite=True)
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow('M8','m8')
def nanskew(values, axis=None, skipna=True):
mask = isnull(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
typ = values.dtype.type
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** typ(2)
C = (values ** 3).sum(axis) / count - A ** typ(3) - typ(3) * A * B
# floating point error
B = _zero_out_fperr(B)
C = _zero_out_fperr(C)
result = ((np.sqrt(count * count - count) * C) /
((count - typ(2)) * np.sqrt(B) ** typ(3)))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if B == 0 else result
if count < 3:
return np.nan
return result
@disallow('M8','m8')
def nankurt(values, axis=None, skipna=True):
mask = isnull(values)
if not is_float_dtype(values.dtype):
values = values.astype('f8')
count = _get_counts(mask, axis)
else:
count = _get_counts(mask, axis, dtype=values.dtype)
if skipna:
values = values.copy()
np.putmask(values, mask, 0)
typ = values.dtype.type
A = values.sum(axis) / count
B = (values ** 2).sum(axis) / count - A ** typ(2)
C = (values ** 3).sum(axis) / count - A ** typ(3) - typ(3) * A * B
D = (values ** 4).sum(axis) / count - A ** typ(4) - typ(6) * B * A * A - typ(4) * C * A
B = _zero_out_fperr(B)
D = _zero_out_fperr(D)
if not isinstance(B, np.ndarray):
# if B is a scalar, check these corner cases first before doing division
if count < 4:
return np.nan
if B == 0:
return 0
result = (((count * count - typ(1)) * D / (B * B) - typ(3) * ((count - typ(1)) ** typ(2))) /
((count - typ(2)) * (count - typ(3))))
if isinstance(result, np.ndarray):
result = np.where(B == 0, 0, result)
result[count < 4] = np.nan
return result
@disallow('M8','m8')
def nanprod(values, axis=None, skipna=True):
mask = isnull(values)
if skipna and not is_any_int_dtype(values):
values = values.copy()
values[mask] = 1
result = values.prod(axis)
return _maybe_null_out(result, axis, mask)
def _maybe_arg_null_out(result, axis, mask, skipna):
# helper function for nanargmin/nanargmax
if axis is None or not getattr(result, 'ndim', False):
if skipna:
if mask.all():
result = -1
else:
if mask.any():
result = -1
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(mask, axis, dtype=float):
dtype = _get_dtype(dtype)
if axis is None:
return dtype.type(mask.size - mask.sum())
count = mask.shape[axis] - mask.sum(axis)
if np.isscalar(count):
return dtype.type(count)
try:
return count.astype(dtype)
except AttributeError:
return np.array(count, dtype=dtype)
def _maybe_null_out(result, axis, mask):
if axis is not None and getattr(result, 'ndim', False):
null_mask = (mask.shape[axis] - mask.sum(axis)) == 0
if np.any(null_mask):
if np.iscomplexobj(result):
result = result.astype('c16')
else:
result = result.astype('f8')
result[null_mask] = np.nan
elif result is not tslib.NaT:
null_mask = mask.size - mask.sum()
if null_mask == 0:
result = np.nan
return result
def _zero_out_fperr(arg):
if isinstance(arg, np.ndarray):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
@disallow('M8','m8')
def nancorr(a, b, method='pearson', min_periods=None):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError('Operands to nancorr must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method in ['kendall', 'spearman']:
from scipy.stats import kendalltau, spearmanr
def _pearson(a, b):
return np.corrcoef(a, b)[0, 1]
def _kendall(a, b):
rs = kendalltau(a, b)
if isinstance(rs, tuple):
return rs[0]
return rs
def _spearman(a, b):
return spearmanr(a, b)[0]
_cor_methods = {
'pearson': _pearson,
'kendall': _kendall,
'spearman': _spearman
}
return _cor_methods[method]
@disallow('M8','m8')
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError('Operands to nancov must have same size')
if min_periods is None:
min_periods = 1
valid = notnull(a) & notnull(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if is_integer_dtype(x) or is_bool_dtype(x):
x = x.astype(np.float64)
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
except:
x = x.astype(np.float64)
else:
if not np.any(x.imag):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
except Exception:
try:
x = complex(x)
except Exception:
raise TypeError('Could not convert %s to numeric' % str(x))
return x
# NA-friendly array comparisons
import operator
def make_nancomp(op):
def f(x, y):
xmask = isnull(x)
ymask = isnull(y)
mask = xmask | ymask
result = op(x, y)
if mask.any():
if is_bool_dtype(result):
result = result.astype('O')
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
def unique1d(values):
"""
Hash table-based unique
"""
if np.issubdtype(values.dtype, np.floating):
table = _hash.Float64HashTable(len(values))
uniques = np.array(table.unique(_ensure_float64(values)),
dtype=np.float64)
elif np.issubdtype(values.dtype, np.datetime64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('M8[ns]')
elif np.issubdtype(values.dtype, np.timedelta64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('m8[ns]')
elif np.issubdtype(values.dtype, np.integer):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
else:
table = _hash.PyObjectHashTable(len(values))
uniques = table.unique(_ensure_object(values))
return uniques
| mit |
CivilNet/Gemfield | dockerfiles/py-faster-rcnn/files/gemfield/py-faster-rcnn/caffe-fast-rcnn/examples/finetune_flickr_style/assemble_data.py | 38 | 3636 | #!/usr/bin/env python
"""
Form a subset of the Flickr Style data, download images to dirname, and write
Caffe ImagesDataLayer training file.
"""
import os
import urllib
import hashlib
import argparse
import numpy as np
import pandas as pd
from skimage import io
import multiprocessing
# Flickr returns a special image if the request is unavailable.
MISSING_IMAGE_SHA1 = '6a92790b1c2a301c6e7ddef645dca1f53ea97ac2'
example_dirname = os.path.abspath(os.path.dirname(__file__))
caffe_dirname = os.path.abspath(os.path.join(example_dirname, '../..'))
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
def download_image(args_tuple):
"For use with multiprocessing map. Returns filename on fail."
try:
url, filename = args_tuple
if not os.path.exists(filename):
urllib.urlretrieve(url, filename)
with open(filename) as f:
assert hashlib.sha1(f.read()).hexdigest() != MISSING_IMAGE_SHA1
test_read_image = io.imread(filename)
return True
except KeyboardInterrupt:
raise Exception() # multiprocessing doesn't catch keyboard exceptions
except:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download a subset of Flickr Style to a directory')
parser.add_argument(
'-s', '--seed', type=int, default=0,
help="random seed")
parser.add_argument(
'-i', '--images', type=int, default=-1,
help="number of images to use (-1 for all [default])",
)
parser.add_argument(
'-w', '--workers', type=int, default=-1,
help="num workers used to download images. -x uses (all - x) cores [-1 default]."
)
parser.add_argument(
'-l', '--labels', type=int, default=0,
help="if set to a positive value, only sample images from the first number of labels."
)
args = parser.parse_args()
np.random.seed(args.seed)
# Read data, shuffle order, and subsample.
csv_filename = os.path.join(example_dirname, 'flickr_style.csv.gz')
df = pd.read_csv(csv_filename, index_col=0, compression='gzip')
df = df.iloc[np.random.permutation(df.shape[0])]
if args.labels > 0:
df = df.loc[df['label'] < args.labels]
if args.images > 0 and args.images < df.shape[0]:
df = df.iloc[:args.images]
# Make directory for images and get local filenames.
if training_dirname is None:
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
images_dirname = os.path.join(training_dirname, 'images')
if not os.path.exists(images_dirname):
os.makedirs(images_dirname)
df['image_filename'] = [
os.path.join(images_dirname, _.split('/')[-1]) for _ in df['image_url']
]
# Download images.
num_workers = args.workers
if num_workers <= 0:
num_workers = multiprocessing.cpu_count() + num_workers
print('Downloading {} images with {} workers...'.format(
df.shape[0], num_workers))
pool = multiprocessing.Pool(processes=num_workers)
map_args = zip(df['image_url'], df['image_filename'])
results = pool.map(download_image, map_args)
# Only keep rows with valid images, and write out training file lists.
df = df[results]
for split in ['train', 'test']:
split_df = df[df['_split'] == split]
filename = os.path.join(training_dirname, '{}.txt'.format(split))
split_df[['image_filename', 'label']].to_csv(
filename, sep=' ', header=None, index=None)
print('Writing train/val for {} successfully downloaded images.'.format(
df.shape[0]))
| gpl-3.0 |
hainm/statsmodels | statsmodels/examples/ex_kernel_regression_dgp.py | 34 | 1202 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 06 09:50:54 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.dgp_examples as dgp
seed = np.random.randint(999999)
seed = 430973
print(seed)
np.random.seed(seed)
funcs = [dgp.UnivariateFanGijbels1(),
dgp.UnivariateFanGijbels2(),
dgp.UnivariateFanGijbels1EU(),
#dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4))
dgp.UnivariateFunc1()
]
res = []
fig = plt.figure()
for i,func in enumerate(funcs):
#f = func()
f = func
model = KernelReg(endog=[f.y], exog=[f.x], reg_type='ll',
var_type='c', bw='cv_ls')
mean, mfx = model.fit()
ax = fig.add_subplot(2, 2, i+1)
f.plot(ax=ax)
ax.plot(f.x, mean, color='r', lw=2, label='est. mean')
ax.legend(loc='upper left')
res.append((model, mean, mfx))
fig.suptitle('Kernel Regression')
fig.show()
| bsd-3-clause |
davidgardenier/frbpoppy | tests/rates/flattening_rates.py | 1 | 8550 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from copy import deepcopy
from frbpoppy import CosmicPopulation, SurveyPopulation, Survey
from frbpoppy import poisson_interval
from tests.convenience import plot_aa_style, rel_path
ACTUAL_RATES = False
SURVEYS = ['askap-fly', 'fast-crafts', 'chime-frb', 'wsrt-apertif']
SIZE = 1e5
class Rates:
def __init__(self, size, survey_names, rates=None):
self.size = size
self.survey_names = survey_names
self.surveys = [self.gen_survey(s) for s in survey_names]
self.data = []
self.rates = rates
self.zs = [0.01, 2, 6]
# Generate populations
self.gen_def_pop()
self.gen_cos_pops()
self.gen_lum_pops()
self.gen_si_pops()
self.gen_w_pops()
self.df = pd.DataFrame(self.data)
self.plot()
def loop_surveys(self, cosmic_pop, z_max):
for survey in self.surveys:
surv_pop = SurveyPopulation(cosmic_pop, survey)
surv_pop.z_max = z_max
rate = surv_pop.source_rate.det
n = surv_pop.n_sources()
errs = [np.abs(e-n) for e in poisson_interval(n)]
d = {'survey': survey.name,
'pop': cosmic_pop.name,
'rate': rate,
'err_low': errs[0],
'err_high': errs[1],
'z_max': z_max}
self.data.append(d)
def gen_def_pop(self):
self.default_pop = CosmicPopulation.simple(self.size)
self.default_pop.set_lum(model='constant', value=1e43)
self.default_pop.generate()
def gen_survey(self, name):
survey = Survey(name)
survey.set_beam('gaussian')
return survey
def gen_cos_pops(self):
cosmo_pop = deepcopy(self.default_pop)
for z in self.zs:
cosmo_pop.set_dist(z_max=z)
cosmo_pop.name = r'z$_{\text{max}}$=' + str(z)
cosmo_pop.generate()
self.loop_surveys(cosmo_pop, z)
def gen_lum_pops(self):
lum_pop = deepcopy(self.default_pop)
for z in [0.01, 6]:
# Standard candles
lum_pop.set_lum(model='constant', value=1e40)
lum_pop.generate()
lum_pop.name = f'std candle'
self.loop_surveys(lum_pop, z)
# Powerlaw with slope
power = -1
lum_pop.set_lum(model='powerlaw', low=1e40, high=1e43, power=power)
lum_pop.generate()
lum_pop.name = f'li={power}'
self.loop_surveys(lum_pop, z)
# Powerlaw with slope
power = -2
lum_pop.set_lum(model='powerlaw', low=1e40, high=1e43, power=power)
lum_pop.generate()
lum_pop.name = f'li={power}'
self.loop_surveys(lum_pop, z)
def gen_si_pops(self):
si_pop = deepcopy(self.default_pop)
for z in [0.01, 6]:
si_pop.set_dist(z_max=z)
for si in [-2, 0, 2]:
si_pop.set_si(model='constant', value=si)
si_pop.name = f'si={si}'
si_pop.generate()
self.loop_surveys(si_pop, z)
def gen_w_pops(self):
w_pop = deepcopy(self.default_pop)
for z in [0.01, 6]:
w_pop.set_dist(z_max=z)
# Constant
w_pop.set_w(model='constant', value=10)
w_pop.generate()
w_pop.name = f'constant'
self.loop_surveys(w_pop, z)
# Normal
w_pop.set_w(model='gauss', mean=10, std=10)
w_pop.generate()
w_pop.name = f'normal'
self.loop_surveys(w_pop, z)
# Lognormal
w_pop.set_w(model='lognormal', mean=10, std=10)
w_pop.generate()
w_pop.name = f'lognormal'
self.loop_surveys(w_pop, z)
def plot(self):
plot_aa_style()
plt.rcParams["figure.figsize"] = (5.75373, 5.75373)
f, self.axes = plt.subplots(2, 2, sharex='col', sharey='row')
self.linestyles = ['solid', 'dashed', 'dotted']
self.colours = plt.rcParams['axes.prop_cycle'].by_key()['color']
# Matching redshifts to linestyles
self.zs = self.df.z_max.unique()
self.lz = dict(zip(self.zs, self.linestyles))
# Set vertical spacing
self.internal_survey_spacing = 0.1
self.external_survey_spacing = 0.2
for ax in self.axes.flat:
ax.set_xscale('log')
ax.set_xlim(1e-6, 1e6)
self.plot_cosmo()
self.plot_lum()
self.plot_si()
self.plot_w()
plt.tight_layout()
p = f'plots/flattening_rates.pdf'
plt.savefig(rel_path(p))
def plot_rate(self, ax, df, subplot=None):
y = 0
label_ys = []
surveys = df.survey.unique()
norm_rate = df.iloc[0].rate
if subplot == 'cosmo':
y -= 1.5*self.internal_survey_spacing
for survey, group in df.groupby('survey', sort=False):
y_min = y
color_ix = 0
for pop, group in group.groupby('pop', sort=False):
# Set colour per population type
colour = self.colours[color_ix]
if subplot == 'cosmo':
colour = 'grey'
color_ix += 1
ls_ix = 0
for _, row in group.iterrows():
# Set linestyle per population
ls = self.lz[row.z_max]
ls_ix += 1
label = pop
# Set up legend for just the first set
if subplot == 'cosmo':
if survey != surveys[0]:
label = f'_{label}'
else:
if ls_ix > 1 or survey != surveys[0]:
label = f'_{label}'
# Plot errorbars
errs = [row.err_low, row.err_high]
line = ax.errorbar(row.rate/norm_rate, y,
xerr=np.array([errs]).T,
fmt='x',
color=colour,
label=rf'{label}')
line[-1][0].set_linestyle(ls)
# Shift vertical position to next line
y -= self.internal_survey_spacing
# Show survey interval
# overflow = 0.25*self.external_survey_spacing
# y_max = y + self.internal_survey_spacing
# ax.plot([ax.get_xlim()[0]]*2,
# [y_min+overflow, y_max-overflow],
# color='k', lw=4)
if ACTUAL_RATES: # Add line with actual rate
ax.plot([self.rates[survey]]*2,
[y_min, y + self.internal_survey_spacing],
color='grey')
# Calculate coordinates for y axis
y_width = [y_min, y+self.internal_survey_spacing]
label_ys.append(np.mean(y_width))
y -= self.external_survey_spacing
if subplot == 'cosmo':
y -= 3*self.internal_survey_spacing
ax.set_yticks(label_ys)
ax.set_yticklabels(surveys)
ax.legend(prop={'size': 8}, markerscale=0)
def plot_cosmo(self):
ax = self.axes[0, 0]
ax.set_title('Cosmology')
filtered_df = self.df[(self.df['pop'].str.startswith('z$_'))]
self.plot_rate(ax, filtered_df, subplot='cosmo')
ax.invert_yaxis()
def plot_lum(self):
ax = self.axes[0, 1]
ax.set_title('Luminosity')
p = self.df['pop']
filtered_df = self.df[((p == 'std candle') | (p.str.startswith('li')))]
self.plot_rate(ax, filtered_df)
ax.invert_yaxis()
def plot_si(self):
ax = self.axes[1, 0]
ax.set_xlabel(r'Rate (day$^{-1}$)')
ax.set_title('Spectral index')
self.plot_rate(ax, self.df[(self.df['pop'].str.startswith('si'))])
ax.invert_yaxis()
def plot_w(self):
ax = self.axes[1, 1]
ax.set_xlabel(r'Rate (day$^{-1}$)')
ax.set_title('Pulse width')
keep_list = ['constant', 'lognormal', 'normal']
self.plot_rate(ax, self.df[self.df['pop'].isin(keep_list)])
ax.invert_yaxis()
if __name__ == '__main__':
rates = {'wsrt-apertif': 1, 'parkes-htru': 2, 'fast-crafts': 5, 'askap': 10}
Rates(SIZE, SURVEYS, rates)
| mit |
kkozarev/mwacme | MS_Inspect_menu_version/Cloamp_Panel.py | 1 | 11277 | ###########################################################################
############################ Cloamp_Panel #################################
###########################################################################
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import time
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, \
NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.patches as patches
from amplitude_closures import calc_quadrangles, calc_amplitude_closures
import sys
if sys.version_info[0] < 3:
import Tkinter as tk
import ttk
import tkFileDialog
import tkMessageBox
else: # Python3
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
import Graph_Panel
from Graph_Panel import Graph_Panel
class Cloamp_Panel(Graph_Panel):
#
# Externally defined methods
#
calculate_quadrangles = calc_quadrangles
calculate_closures = calc_amplitude_closures
def __init__(self, parent, momPanel):
Graph_Panel.__init__(self, parent, momPanel)
self.panelName = 'Cloamp_Panel'
#
# Flagging menubutton
#
############################self.menuBtFlagging.pack(side="left")
#
# Buttons "Select Points", "Histogram", and "Show Tip"
#
#self.buttonSelectPoints.pack(side="left")
self.menuBtSelect.pack(side="left")
self.buttonUndo.pack(side="left")
self.buttonRedo.pack(side="left")
self.menuBtFlagging.pack(side="left")
self.buttonHistogram.pack(side="left")
self.buttonShowTip.pack(side="left")
#
# Compute quadrangle abscissae and other metrics:
#
# self.area
# self.maxbl
# self.longshort
# self.ants
# self.bls
# self.bllen
# self.apairs
#
# This function is called only once
#
self.calculate_quadrangles()
#
# Compute amplitude closures and quadrangle products used for it
# for all the keys in visibility dictionary:
#
# self.closures
# self.prod
#
self.prod = dict()
self.closures = dict()
self.calculate_closures(self.vis_orig_keys)
#
# Plot and save the list of plotted lines in plis
#
self.Ykey = 'original'
self.Xkey = 'area'
seterr_state = np.seterr()
np.seterr(divide='ignore', invalid='ignore')
self.xdata = {'lin':{'area':self.area, \
'maxbl':self.maxbl, \
'longshort':self.longshort}, \
'log10':{'area':np.log10(self.area), \
'maxbl':np.log10(self.maxbl), \
'longshort':np.log10(self.longshort)}
}
self.xlabel = {'lin':{'area':'Quadrangle Area (m^2)', \
'maxbl':'Max Baseline (m)', \
'longshort':'Longest/Shortest Baseline'}, \
'log10':{'area':'log10( Quadrangle Area (m^2) )', \
'maxbl':'log10( Max Baseline (m) )', \
'longshort':'log10( Longest/Shortest Baseline )'}
}
self.ydata = {'lin':{}, 'log10':{}}
for k in self.closures.iterkeys():
self.ydata['lin'][k] = self.closures[k]
self.ydata['log10'][k] = np.log10(self.closures[k])
np.seterr(**seterr_state) # Revert to the former
self.ylabel = {'lin':{}, 'log10':{}}
self.ylabel_log10 = 'log10( Closure Amplitude %s )'
self.ylabel_lin = 'Closure Amplitude %s'
for k in self.closures.iterkeys():
self.ylabel['lin'][k] = self.ylabel_lin % \
' '.join(k.split('_')).title()
self.ylabel['log10'][k] = self.ylabel_log10 % \
' '.join(k.split('_')).title()
self.title = 'Amplitude Closures %s vs Closure Areas'
xdata = self.xdata[self.Xscale][self.Xkey]
ydata = self.ydata[self.Yscale][self.Ykey]
xpoint = xdata[self.picked_ind] if self.picked_point else None
ypoint = ydata[self.picked_ind] if self.picked_point else None
xlabel = self.xlabel[self.Xscale][self.Xkey]
ylabel = self.ylabel[self.Yscale][self.Ykey]
title = self.title % ' '.join(self.Ykey.split('_')).title()
#
# Find indices of the amplitude closures containing at least one
# flagged visibility from TileRxPanel::iflag
#
self.iflag = self.flags_for_closures()
iflag = self.iflag
self.plis = self.ax.plot(xdata, ydata, 'b.', ms=4.0, mew=0.1, picker=3)
# Paint gray the flagged visibility points
self.plis += self.ax.plot(xdata[iflag], ydata[iflag], marker='.', \
ms=4.0, mew=0.1, mfc='#8f8f8f', ls='.')
self.ax.grid(1)
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
self.ax.set_title(title)
self.canvas.show()
def save_selection(self, sel):
"""
sel: array (or any other sequence) of the selected points numbers
"""
if len(sel) == 0: return # ============ NO SELECTIONS ============= >>>
if len(self.sel) == 0: return # ======= NO SELECTIONS ============= >>>
bls = self.bls
ants = self.ants
apairs = self.apairs
#
# Use local time stamp stim in the output file name fout_name
#
stim = time.strftime("%Y%m%d_%H%M%S", time.localtime())
fout_name = self.momPanel.insp_name + '/' + \
'selected_amplitude_closures_' + stim + '.txt'
#
# Print the selected points on screen and to file
# "selected_points.txt"
#
ntsel = len(self.sel)
self.nsel = ntsel
print 'Total amplitude closures selected: %d, ' % ntsel
fout = open(fout_name, "w")
x = self.xdata[self.Xscale][self.Xkey]
y = self.ydata[self.Yscale][self.Ykey]
Xname = self.Xkey2name[self.Xkey]
if self.Xscale == 'log10':
hdrx = 'log10(x)'
else:
hdrx = ' x '
if self.Yscale == 'log10':
hdry = 'log10(x)'
else:
hdry = ' y '
hedr = '# Amplitude Closures versus %s\n' \
'# N %s %s antennas ' \
' baselines as antenna pairs ' \
'baselines receivers' % (Xname, hdrx, hdry)
fout.write(hedr+'\n')
print hedr
for i in xrange(ntsel):
j = self.sel[i]
bl = bls[j,:]
an = ants[j,:]
ap = apairs[j,:,:] # Antenna pairs that make up the closure
line = '%5d %11g %11g %3d %3d %3d %3d ' \
'(%3d,%3d) (%3d,%3d) (%3d,%3d) (%3d,%3d) ' \
'%4d %4d %4d %4d %2d %2d %2d %2d'% \
((j, x[j], y[j], an[0], an[1], an[2], an[3]) + \
tuple(ap.ravel()) + \
(bl[0], bl[1], bl[2], bl[3], \
an[0]//8, an[1]//8, an[2]//8, an[3]//8))
print line
fout.write(line+'\n')
fout.close()
print ntsel, ' selected points are saved in file ' \
'"selected_amplitude_closures.txt"'
def green_picked_and_draw_closure(self, ind):
momPanel = self.momPanel # Tlrx panel
tl = momPanel.tl # Array of tiles' coordinates
anc = np.zeros(5, dtype=int) # Closed quadrangle
anc[:4] = self.ants[ind,:]
anc[4] = anc[0]
an = anc[:4]
an = self.ants[ind,:]
ap = self.apairs[ind,:,:]
bll = self.bllen[ind,:] # bl lengths
# Highlight/dim the clicked point
xpoint = self.xdata[self.Xscale][self.Xkey][ind]
ypoint = self.ydata[self.Yscale][self.Ykey][ind]
## if self.log10XScale:
## xpoint = self.xdata_log[ind]
## else:
## xpoint = self.xdata[ind]
## if self.log10YScale:
## if self.dividedByModel:
## ypoint = self.ydata_dm_log[ind]
## else:
## ypoint = self.ydata_log[ind]
## else:
## if self.dividedByModel:
## ypoint = self.ydata_dm[ind]
## else:
## ypoint = self.ydata[ind]
if self.picked_point:
self.picked_point.remove()
self.picked_point = None
else:
pp = self.ax.plot(xpoint, ypoint, 'g.', ms=8.0, mew=0.1)
self.picked_point = pp[0]
# Pop/hide framed text
self.tip.showtip('Ant: [%d, %d, %d, %d]\nRx: [%d, %d, %d, %d]'%\
(tuple(an) + tuple(an//8+1)))
# Blacken/deblacken the tile and receiver
if momPanel.clostiles:
momPanel.clostiles.remove()
momPanel.clostiles = None
if isinstance(momPanel.closline, list):
for line in momPanel.closline:
line.remove()
else:
momPanel.closline.remove()
momPanel.closline = None
for i in xrange(4):
tx = momPanel.closannot[i]
tx.remove()
momPanel.closannot = None
else:
# Highlight the tiles making the quadrangle
clostiles = momPanel.ax.plot(tl[an,0], tl[an,1], 'rs')
momPanel.clostiles = clostiles[0]
# Draw the closure quadrangle
tlx = tl[anc,0]
tly = tl[anc,1]
closcol = ['brown', 'brown', 'brown', 'brown']
closcol[bll.argmin()] = 'blue'
closcol[bll.argmax()] = 'red'
closvert = ('A', 'B', 'C', 'D')
dashes = ([7,3], [7,3], [7,3,2,3], [7,3,2,3])
closline = []
for i in xrange(4):
closline += momPanel.ax.plot( \
tl[ap[i,:],0], tl[ap[i,:],1], closcol[i], \
dashes=dashes[i], lw=2.)
#print 'bll[%d]=%g, col[%d] = %s ' % \
# (i, bll[i], i, closcol[i])
momPanel.closline = closline
# Print antenna numbers near the highlighted tiles
momPanel.closannot = []
for i in xrange(4):
momPanel.closannot.append( \
momPanel.ax.text(tlx[i]-140, tly[i]+10, \
'(%c) Ant %d' % (closvert[i], an[i])))
momPanel.canvas.show()
def update_selclo(self):
selquad = self.ants[self.sel,:] # Selected antenna quads
return selquad
| gpl-2.0 |
JackKelly/neuralnilm_prototype | scripts/e348.py | 2 | 6288 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=8,
lag=0,
output_central_value=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-3,
learning_rate_changes_by_iteration={
# 200: 1e-2,
# 400: 1e-3,
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False,
plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 512
output_shape = source.output_shape_after_processing()
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 3,
'stride': 1,
'nonlinearity': rectify
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 3,
'stride': 1,
'nonlinearity': rectify
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 3,
'stride': 1,
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N // 2,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': output_shape[1] * output_shape[2],
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
h2educ/scikit-learn | sklearn/utils/__init__.py | 79 | 14202 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/scalar/timedelta/test_arithmetic.py | 1 | 33663 | """
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, _is_numpy_dev, compat, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
def test_td_add_timestamp_overflow(self):
msg = "int too (large|big) to convert"
with pytest.raises(OverflowError, match=msg):
Timestamp("1700-01-01") + Timedelta(13 * 19999, unit="D")
with pytest.raises(OverflowError, match=msg):
Timestamp("1700-01-01") + timedelta(days=13 * 19999)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
msg = "unsupported operand type"
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError, match=msg):
td + other
with pytest.raises(TypeError, match=msg):
other + td
with pytest.raises(TypeError, match=msg):
td - other
with pytest.raises(TypeError, match=msg):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
msg = r"unsupported operand type\(s\) for \-: 'Timedelta' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
# TODO: moved from index tests following #24365, may need de-duplication
def test_ops_ndarray(self):
td = Timedelta("1 day")
# timedelta, timedelta
other = pd.to_timedelta(["1 day"]).values
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
msg = r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
expected = pd.to_timedelta(["0 days"]).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td - np.array([1])
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) - td
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
"ufunc '?multiply'? cannot use operands with types "
r"dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(["2000-01-01"]).values
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(["1999-12-31"]).values
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="d")
typs = "|".join(["numpy.timedelta64", "NaTType", "Timedelta"])
msg = "|".join(
[
rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'",
r"ufunc '?multiply'? cannot use operands with types",
]
)
with pytest.raises(TypeError, match=msg):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td).value == -1 * td.value
assert op(-1.0, td).value == -1.0 * td.value
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError, match=msg):
# invalid multiply with another timedelta
op(td, td)
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_td64_non_nano(self):
# truediv
td = Timedelta("1 days 2 hours 3 ns")
result = td / np.timedelta64(1, "D")
assert result == td.value / float(86400 * 1e9)
result = td / np.timedelta64(1, "s")
assert result == td.value / float(1e9)
result = td / np.timedelta64(1, "ns")
assert result == td.value
# floordiv
td = Timedelta("1 days 2 hours 3 ns")
result = td // np.timedelta64(1, "D")
assert result == 1
result = td // np.timedelta64(1, "s")
assert result == 93600
result = td // np.timedelta64(1, "ns")
assert result == td.value
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5.0
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize(
"nan",
[
np.nan,
pytest.param(
np.float64("NaN"),
marks=pytest.mark.xfail(
# Works on numpy dev only in python 3.9
_is_numpy_dev and not compat.PY39,
raises=RuntimeWarning,
reason="https://github.com/pandas-dev/pandas/issues/31992",
),
),
float("nan"),
],
)
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = td / nan
assert result is NaT
result = td // nan
assert result is NaT
# ---------------------------------------------------------------
# Timedelta.__rdiv__
def test_td_rdiv_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = offsets.Hour(1) / td
assert result == 1 / 240.0
assert np.timedelta64(60, "h") / td == 0.25
def test_td_rdiv_na_scalar(self):
# GH#31869 None gets cast to NaT
td = Timedelta(10, unit="d")
result = NaT / td
assert np.isnan(result)
result = None / td
assert np.isnan(result)
result = np.timedelta64("NaT") / td
assert np.isnan(result)
msg = r"unsupported operand type\(s\) for /: 'numpy.datetime64' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.datetime64("NaT") / td
msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.nan / td
def test_td_rdiv_ndarray(self):
td = Timedelta(10, unit="d")
arr = np.array([td], dtype=object)
result = arr / td
expected = np.array([1], dtype=np.float64)
tm.assert_numpy_array_equal(result, expected)
arr = np.array([None])
result = arr / td
expected = np.array([np.nan])
tm.assert_numpy_array_equal(result, expected)
arr = np.array([np.nan], dtype=object)
msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
arr / td
arr = np.array([np.nan], dtype=np.float64)
msg = "cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
arr / td
# ---------------------------------------------------------------
# Timedelta.__floordiv__
def test_td_floordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
assert td // scalar == 1
assert -td // scalar.to_pytimedelta() == -2
assert (2 * td) // scalar.to_timedelta64() == 2
def test_td_floordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64("NaT"))
def test_td_floordiv_offsets(self):
# GH#19738
td = Timedelta(hours=3, minutes=4)
assert td // offsets.Hour(1) == 3
assert td // offsets.Minute(2) == 92
def test_td_floordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
msg = "|".join(
[
r"Invalid dtype datetime64\[D\] for __floordiv__",
"'dtype' is an invalid keyword argument for this function",
r"ufunc '?floor_divide'? cannot use operands with types",
]
)
with pytest.raises(TypeError, match=msg):
td // np.datetime64("2016-01-01", dtype="datetime64[us]")
def test_td_floordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
expected = Timedelta(hours=1, minutes=32)
assert td // 2 == expected
assert td // 2.0 == expected
assert td // np.float64(2.0) == expected
assert td // np.int32(2.0) == expected
assert td // np.uint8(2.0) == expected
def test_td_floordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
# Array-like others
assert td // np.array(scalar.to_timedelta64()) == 1
res = (3 * td) // np.array([scalar.to_timedelta64()])
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")])
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_floordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
ser = pd.Series([1], dtype=np.int64)
res = td // ser
assert res.dtype.kind == "m"
# ---------------------------------------------------------------
# Timedelta.__rfloordiv__
def test_td_rfloordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# scalar others
# x // Timedelta is defined only for timedelta-like x. int-like,
# float-like, and date-like, in particular, should all either
# a) raise TypeError directly or
# b) return NotImplemented, following which the reversed
# operation will raise TypeError.
assert td.__rfloordiv__(scalar) == 1
assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2
assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0
def test_td_rfloordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert np.isnan(td.__rfloordiv__(NaT))
assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT")))
def test_td_rfloordiv_offsets(self):
# GH#19738
assert offsets.Hour(1) // Timedelta(minutes=25) == 2
def test_td_rfloordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
dt64 = np.datetime64("2016-01-01", "us")
assert td.__rfloordiv__(dt64) is NotImplemented
msg = (
r"unsupported operand type\(s\) for //: 'numpy.datetime64' and 'Timedelta'"
)
with pytest.raises(TypeError, match=msg):
dt64 // td
def test_td_rfloordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert td.__rfloordiv__(np.nan) is NotImplemented
assert td.__rfloordiv__(3.5) is NotImplemented
assert td.__rfloordiv__(2) is NotImplemented
assert td.__rfloordiv__(np.float64(2.0)) is NotImplemented
assert td.__rfloordiv__(np.uint8(9)) is NotImplemented
assert td.__rfloordiv__(np.int32(2.0)) is NotImplemented
msg = r"unsupported operand type\(s\) for //: '.*' and 'Timedelta"
with pytest.raises(TypeError, match=msg):
np.float64(2.0) // td
with pytest.raises(TypeError, match=msg):
np.uint8(9) // td
with pytest.raises(TypeError, match=msg):
# deprecated GH#19761, enforced GH#29797
np.int32(2.0) // td
def test_td_rfloordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# Array-like others
assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1
res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()]))
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
arr = np.array([(10 * scalar).to_timedelta64(), np.timedelta64("NaT")])
res = td.__rfloordiv__(arr)
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_rfloordiv_intarray(self):
# deprecated GH#19761, enforced GH#29797
ints = np.array([1349654400, 1349740800, 1349827200, 1349913600]) * 10 ** 9
msg = "Invalid dtype"
with pytest.raises(TypeError, match=msg):
ints // Timedelta(1, unit="s")
def test_td_rfloordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
ser = pd.Series([1], dtype=np.int64)
res = td.__rfloordiv__(ser)
assert res is NotImplemented
msg = "Invalid dtype"
with pytest.raises(TypeError, match=msg):
# Deprecated GH#19761, enforced GH#29797
ser // td
# ----------------------------------------------------------------
# Timedelta.__mod__, __rmod__
def test_mod_timedeltalike(self):
# GH#19365
td = Timedelta(hours=37)
# Timedelta-like others
result = td % Timedelta(hours=6)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
result = td % timedelta(minutes=60)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % NaT
assert result is NaT
def test_mod_timedelta64_nat(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64("NaT", "ns")
assert result is NaT
def test_mod_timedelta64(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64(2, "h")
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
def test_mod_offset(self):
# GH#19365
td = Timedelta(hours=37)
result = td % offsets.Hour(5)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=2)
def test_mod_numeric(self):
# GH#19365
td = Timedelta(hours=37)
# Numeric Others
result = td % 2
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % 1e12
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
result = td % int(1e12)
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
def test_mod_invalid(self):
# GH#19365
td = Timedelta(hours=37)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
td % Timestamp("2018-01-22")
with pytest.raises(TypeError, match=msg):
td % []
def test_rmod_pytimedelta(self):
# GH#19365
td = Timedelta(minutes=3)
result = timedelta(minutes=4) % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=1)
def test_rmod_timedelta64(self):
# GH#19365
td = Timedelta(minutes=3)
result = np.timedelta64(5, "m") % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=2)
def test_rmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
msg = "unsupported operand"
with pytest.raises(TypeError, match=msg):
Timestamp("2018-01-22") % td
with pytest.raises(TypeError, match=msg):
15 % td
with pytest.raises(TypeError, match=msg):
16.0 % td
msg = "Invalid dtype int"
with pytest.raises(TypeError, match=msg):
np.array([22, 24]) % td
# ----------------------------------------------------------------
# Timedelta.__divmod__, __rdivmod__
def test_divmod_numeric(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, 53 * 3600 * 1e9)
assert result[0] == Timedelta(1, unit="ns")
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=1)
assert result
result = divmod(td, np.nan)
assert result[0] is NaT
assert result[1] is NaT
def test_divmod(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
result = divmod(td, 54)
assert result[0] == Timedelta(hours=1)
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(0)
result = divmod(td, NaT)
assert np.isnan(result[0])
assert result[1] is NaT
def test_divmod_offset(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, offsets.Hour(-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_divmod_invalid(self):
# GH#19365
td = Timedelta(days=2, hours=6)
msg = r"unsupported operand type\(s\) for //: 'Timedelta' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
divmod(td, Timestamp("2018-01-22"))
def test_rdivmod_pytimedelta(self):
# GH#19365
result = divmod(timedelta(days=2, hours=6), Timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
def test_rdivmod_offset(self):
result = divmod(offsets.Hour(54), Timedelta(hours=-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_rdivmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
divmod(Timestamp("2018-01-22"), td)
with pytest.raises(TypeError, match=msg):
divmod(15, td)
with pytest.raises(TypeError, match=msg):
divmod(16.0, td)
msg = "Invalid dtype int"
with pytest.raises(TypeError, match=msg):
divmod(np.array([22, 24]), td)
# ----------------------------------------------------------------
@pytest.mark.parametrize(
"op", [operator.mul, ops.rmul, operator.truediv, ops.rdiv, ops.rsub]
)
@pytest.mark.parametrize(
"arr",
[
np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")]),
np.array([Timestamp.now(), Timedelta("1D")]),
],
)
def test_td_op_timedelta_timedeltalike_array(self, op, arr):
msg = "unsupported operand type|cannot use operands with types"
with pytest.raises(TypeError, match=msg):
op(arr, Timedelta("1D"))
class TestTimedeltaComparison:
def test_compare_tick(self, tick_classes):
cls = tick_classes
off = cls(4)
td = off.delta
assert isinstance(td, Timedelta)
assert td == off
assert not td != off
assert td <= off
assert td >= off
assert not td < off
assert not td > off
assert not td == 2 * off
assert td != 2 * off
assert td <= 2 * off
assert td < 2 * off
assert not td >= 2 * off
assert not td > 2 * off
def test_comparison_object_array(self):
# analogous to GH#15183
td = Timedelta("2 days")
other = Timedelta("3 hours")
arr = np.array([other, td], dtype=object)
res = arr == td
expected = np.array([False, True], dtype=bool)
assert (res == expected).all()
# 2D case
arr = np.array([[other, td], [td, other]], dtype=object)
res = arr != td
expected = np.array([[True, False], [False, True]], dtype=bool)
assert res.shape == expected.shape
assert (res == expected).all()
def test_compare_timedelta_ndarray(self):
# GH#11835
periods = [Timedelta("0 days 01:00:00"), Timedelta("0 days 01:00:00")]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_compare_td64_ndarray(self):
# GG#33441
arr = np.arange(5).astype("timedelta64[ns]")
td = pd.Timedelta(arr[1])
expected = np.array([False, True, False, False, False], dtype=bool)
result = td == arr
tm.assert_numpy_array_equal(result, expected)
result = arr == td
tm.assert_numpy_array_equal(result, expected)
result = td != arr
tm.assert_numpy_array_equal(result, ~expected)
result = arr != td
tm.assert_numpy_array_equal(result, ~expected)
@pytest.mark.skip(reason="GH#20829 is reverted until after 0.24.0")
def test_compare_custom_object(self):
"""
Make sure non supported operations on Timedelta returns NonImplemented
and yields to other operand (GH#20829).
"""
class CustomClass:
def __init__(self, cmp_result=None):
self.cmp_result = cmp_result
def generic_result(self):
if self.cmp_result is None:
return NotImplemented
else:
return self.cmp_result
def __eq__(self, other):
return self.generic_result()
def __gt__(self, other):
return self.generic_result()
t = Timedelta("1s")
assert not (t == "string")
assert not (t == 1)
assert not (t == CustomClass())
assert not (t == CustomClass(cmp_result=False))
assert t < CustomClass(cmp_result=True)
assert not (t < CustomClass(cmp_result=False))
assert t == CustomClass(cmp_result=True)
@pytest.mark.parametrize("val", ["string", 1])
def test_compare_unknown_type(self, val):
# GH#20829
t = Timedelta("1s")
msg = "not supported between instances of 'Timedelta' and '(int|str)'"
with pytest.raises(TypeError, match=msg):
t >= val
with pytest.raises(TypeError, match=msg):
t > val
with pytest.raises(TypeError, match=msg):
t <= val
with pytest.raises(TypeError, match=msg):
t < val
def test_ops_notimplemented():
class Other:
pass
other = Other()
td = Timedelta("1 day")
assert td.__add__(other) is NotImplemented
assert td.__sub__(other) is NotImplemented
assert td.__truediv__(other) is NotImplemented
assert td.__mul__(other) is NotImplemented
assert td.__floordiv__(other) is NotImplemented
def test_ops_error_str():
# GH#13624
td = Timedelta("1 day")
for left, right in [(td, "a"), ("a", td)]:
msg = "|".join(
[
"unsupported operand type",
r'can only concatenate str \(not "Timedelta"\) to str',
"must be str, not Timedelta",
]
)
with pytest.raises(TypeError, match=msg):
left + right
msg = "not supported between instances of"
with pytest.raises(TypeError, match=msg):
left > right
assert not left == right
assert left != right
| bsd-3-clause |
NickleDave/hybrid-vocal-classifier | src/hvc/parse/extract.py | 1 | 23527 | """
YAML parser for extract config files
"""
import os
import csv
import copy
import warnings
import yaml
from .ref_spect_params import refs_dict
from .utils import check_for_missing_keys, flatten
path = os.path.abspath(__file__) # get the path of this file
dir_path = os.path.dirname(path) # but then just take the dir
with open(os.path.join(dir_path, "features.yml")) as features_yml:
VALID_FEATURES = yaml.load(features_yml, Loader=yaml.FullLoader)["features"]
with open(os.path.join(dir_path, "validation.yml")) as val_yaml:
validate_dict = yaml.load(val_yaml, Loader=yaml.FullLoader)
# feature groups in separate file from feature list because
# want to validate feature groups against feature list
# and some features are not in feature group
with open(os.path.join(dir_path, "feature_groups.yml")) as ftr_grp_yaml:
valid_feature_groups_dict = yaml.load(ftr_grp_yaml, Loader=yaml.FullLoader)
REQUIRED_TODO_LIST_KEYS = set(validate_dict["required_extract_todo_list_keys"])
REQUIRED_TODO_LIST_KEYS_FLATTENED = set(
flatten(validate_dict["required_extract_todo_list_keys"])
)
OPTIONAL_TODO_LIST_KEYS = set(validate_dict["optional_extract_todo_list_keys"])
################################################################
# validation functions for individual configuration parameters #
################################################################
valid_spect_param_keys = {
"nperseg",
"noverlap",
"freq_cutoffs",
"window",
"filter_func",
"spect_func",
"ref",
"log_transform_spect",
}
def validate_spect_params(spect_params):
"""validates spect_params
Parameters
----------
spect_params : dict
with keys as specified in extract YAML spec
also are the arguments to Spectrogram.__init__
nperseg : int
numper of samples per segment for FFT, e.g. 512
noverlap : int
number of overlapping samples in each segment
freq_cutoffs : two-element list of integers
limits of frequency band to keep, e.g. [1000,8000]
Spectrogram.make keeps the band:
freq_cutoffs[0] >= spectrogram > freq_cutoffs[1]
window : str
window to apply to segments
valid strings are 'Hann', 'dpss', None
Hann -- Uses np.Hanning with parameter M (window width) set to value of nperseg
dpss -- Discrete prolate spheroidal sequence AKA Slepian.
Uses scipy.signal.slepian with M parameter equal to nperseg and
width parameter equal to 4/nperseg, as in [2]_.
filter_func : str
filter to apply to raw audio. valid strings are 'diff' or None
'diff' -- differential filter, literally np.diff applied to signal as in [1]_.
'filt_song' -- filter used by evsonganaly.m with .cbin files recorded by evTAF
bandpass filter applied by filtfilt function
None -- no filter, this is the default
spect_func : str
which function to use for spectrogram.
valid strings are 'scipy' or 'mpl'.
'scipy' uses scipy.signal.spectrogram,
'mpl' uses matplotlib.matlab.specgram.
Default is 'scipy'.
ref : str
{'tachibana','koumura'}
Use spectrogram parameters from a reference.
'tachibana' uses spectrogram parameters from [1]_,
'koumura' uses spectrogram parameters from [2]_.
log_transform_spect : bool
if True, applies np.log10 to spectrogram to increase range. Default is True.
Returns
-------
spect_params
"""
if type(spect_params) != dict:
raise TypeError(
"value for key 'spect_params' in config file did "
"not parse as a dictionary of parameters, "
"it parsed as {}. Check file formatting.".format(spect_params)
)
if not set(spect_params.keys()) <= valid_spect_param_keys:
invalid_keys = set(spect_params.keys()) - valid_spect_param_keys
raise KeyError(
"unrecognized keys in spect_params dictionary: {}".format(invalid_keys)
)
if "ref" in spect_params:
if spect_params["ref"] not in refs_dict:
raise ValueError(
"Value {} for 'ref' not recognized."
"Valid values are: {}.".format(
spect_params["ref"], list(refs_dict.keys())
)
)
if len(spect_params.keys()) > 1:
warnings.warn(
"spect_params contains 'ref' parameter "
"but also contains other parameters. Defaults "
"for 'ref' will override other parameters."
)
return {"ref": spect_params["ref"]}
else:
return refs_dict[spect_params["ref"]]
if "nperseg" not in spect_params.keys() and "noverlap" not in spect_params.keys():
raise KeyError(
"keys nperseg and noverlap are required in"
"spect_params but were not found."
)
for sp_key, sp_val in spect_params.items():
if sp_key == "nperseg" or sp_key == "noverlap":
if type(sp_val) != int:
raise ValueError(
"{} in spect_params should be an integer".format(sp_key)
)
elif sp_key == "freq_cutoffs":
if len(sp_val) != 2:
raise ValueError("freq_cutoffs should be a 2 item list")
for freq_cutoff in sp_val:
if type(freq_cutoff) != int:
raise ValueError("freq_cutoff {} should be an int".format(sp_val))
elif sp_key == "window":
if sp_val not in {"Hann", "dspp", None}:
raise ValueError(
"{} is invalid value for window in spect params."
"Valid values are: {'Hann', 'dspp', None}".format(sp_val)
)
elif sp_key == "filter_func":
if sp_val not in {"diff", "bandpass_filtfilt", "butter_bandpass", None}:
raise ValueError(
"{} is invalid value for filter_func in spect params."
"Valid values are: {'diff', 'bandpass_filtfilt',"
"'butter_andpass', None}".format(sp_val)
)
elif sp_key == "log_transform_spect":
if type(sp_val) != bool:
raise TypeError(
"log_transform_spect parsed as type {}, "
"but should be bool.".format(type(sp_val))
)
return spect_params
valid_segment_param_keys = {"threshold", "min_syl_dur", "min_silent_dur"}
def validate_segment_params(segment_params):
"""validates segmenting parameters
Parameters
----------
segment_params : dict
with following keys:
threshold : int
amplitudes crossing above this are considered segments
min_syl_dur : float
minimum syllable duration, in seconds
min_silent_dur : float
minimum duration of silent gap between syllables, in seconds
Returns
-------
nothing if parameters are valid
else raises error
"""
if type(segment_params) != dict:
raise TypeError(
"segment_params did not parse as a dictionary, "
"instead it parsed as {}."
" Please check config file formatting.".format(type(val))
)
elif set(segment_params.keys()) != valid_segment_param_keys:
if set(segment_params.keys()) < valid_segment_param_keys:
missing_keys = valid_segment_param_keys - set(segment_params.keys())
raise KeyError("segment_params is missing keys: {}".format(missing_keys))
elif valid_segment_param_keys < set(segment_params.keys()):
extra_keys = set(segment_params.keys()) - segment_param_keys
raise KeyError("segment_params has extra keys:".format(extra_keys))
else:
invalid_keys = set(segment_params.keys()) - valid_segment_param_keys
raise KeyError("segment_params has invalid keys:".format(invalid_keys))
else:
for key, val in segment_params.items():
if key == "threshold":
if type(val) != int:
raise ValueError(
"threshold should be int but parsed as {}".format(type(val))
)
elif key == "min_syl_dur":
if type(val) != float:
raise ValueError(
"min_syl_dur should be float but parsed as {}".format(type(val))
)
elif key == "min_silent_dur":
if type(val) != float:
raise ValueError(
"min_silent_dur should be float but parsed as {}".format(
type(val)
)
)
def _validate_feature_list(feature_list):
"""helper function to validate feature_list"""
if type(feature_list) != list:
raise ValueError(
"feature_list should be a list but parsed as a {}".format(type(val))
)
else:
for feature in feature_list:
if feature not in VALID_FEATURES:
raise ValueError(
"feature {} not found in valid features".format(feature)
)
def _validate_feature_group_and_convert_to_list(feature_group, feature_list=None):
"""validates feature_group value from todo_list dicts, then converts
to feature_list.
Since todo_list dicts can include both feature_group and feature_list,
this function will accept feature_list from the dict and then append
the feature_group features to those already in the feature_list.
Parameters
----------
feature_group : str or list
currently valid feature groups: {'svm','knn'}
if list, must be a list of strings
feature_list : list
list of features, default is None.
If not None, features from feature groups will be appended to this list.
Returns
-------
feature_list : list
list of features to extract for each feature group.
if feature_list was passed to function along with feature_group, then
features from feature group(s) are appended to end of the feature_list
passed to the function.
feature_group_ID_dict : dict
dict where key is a feature group name and value is a corresponding ID, an int.
Same length as feature_list.
Used by hvc.modelselection.select to determine which columns in feature
array belong to which feature group.
If feature_list was passed to the function, it does not affect this dict,
since the features in that list are not considered part of a feature group
feature_list_group_ID_arr : list
list of ints of same length as feature_list.
If feature_list was passed to function, its features will have value None
"""
if type(feature_group) != str and type(feature_group) != list:
raise TypeError(
"feature_group parsed as {} but it should be"
" either a string or a list. Please check config"
" file formatting.".format(type(feature_group))
)
# if user entered list with just one element
if type(feature_group) == list and len(feature_group) == 1:
# just take that one element, assuming it is str,
# i.e. name of feature group
feature_group = feature_group[0]
if type(feature_group) == str:
if feature_group not in valid_feature_groups_dict:
raise ValueError(
"{} not found in valid feature groups".format(feature_group)
)
else:
ftr_grp_list = valid_feature_groups_dict[feature_group]
_validate_feature_list(ftr_grp_list) # sanity check
ftr_grp_ID_dict = {feature_group: 0}
feature_list_group_ID = [0] * len(ftr_grp_list)
elif type(feature_group) == list:
# if a list of feature groups
# make feature list that is concatenated feature groups
# and also add 'feature_group_id' vector for indexing to config
ftr_grp_list = []
feature_list_group_ID = []
ftr_grp_ID_dict = {}
for grp_ind, ftr_grp in enumerate(feature_group):
if ftr_grp not in valid_feature_groups_dict:
raise ValueError("{} not found in valid feature groups".format(ftr_grp))
else:
ftr_grp_list.extend(valid_feature_groups_dict[ftr_grp])
feature_list_group_ID.extend(
[grp_ind] * len(valid_feature_groups_dict[ftr_grp])
)
ftr_grp_ID_dict[ftr_grp] = grp_ind
_validate_feature_list(ftr_grp_list)
if feature_list is not None:
not_ftr_grp_features = [None] * len(feature_list)
feature_list_group_ID = not_ftr_grp_features + feature_list_group_ID
return (feature_list + ftr_grp_list, feature_list_group_ID, ftr_grp_ID_dict)
else:
feature_list_group_ID = feature_list_group_ID
return (ftr_grp_list, feature_list_group_ID, ftr_grp_ID_dict)
def _validate_todo_list_dict(todo_list_dict, index, config_path):
"""
validates to-do lists
Parameters
----------
todo_list_dict : dictionary
from "to-do" list
index : int
index of element (i.e., dictionary) in list of dictionaries
config_path : str
absolute path to YAML config file from which dict was taken.
Used to validate directory names.
Returns
-------
validated_todo_list_dict : dictionary
after validation, may have new keys added if necessary
"""
# if required_todo_list_keys is not a subset of todo_list_dict,
# i.e., if not all required keys are in todo_list_dict
missing_keys = check_for_missing_keys(todo_list_dict, REQUIRED_TODO_LIST_KEYS)
if missing_keys:
raise KeyError(
"The following required keys "
"were not found in todo_list item #{}: {}".format(index, missing_keys)
)
else:
additional_keys = set(todo_list_dict.keys()) - REQUIRED_TODO_LIST_KEYS_FLATTENED
for extra_key in additional_keys:
if extra_key not in OPTIONAL_TODO_LIST_KEYS:
raise KeyError(
"key {} in todo_list item #{} is not recognized".format(
extra_key, index
)
)
if "feature_group" not in todo_list_dict and "feature_list" not in todo_list_dict:
raise ValueError(
"todo_list item #{} does not include feature_group or feature_list".format(
index
)
)
# first make copy of todo_list_dict that can be chanegd
validated_todo_list_dict = copy.deepcopy(todo_list_dict)
if (
"feature_list" in validated_todo_list_dict
and "feature_group" not in validated_todo_list_dict
):
# if just feature_list, just validate it, don't have to
# do anything else:
_validate_feature_list(validated_todo_list_dict["feature_list"])
elif (
"feature_group" in validated_todo_list_dict
and "feature_list" not in validated_todo_list_dict
):
# if just feature group, convert to feature list then validate
ftr_grp_valid = _validate_feature_group_and_convert_to_list(
validated_todo_list_dict["feature_group"]
)
validated_todo_list_dict["feature_list"] = ftr_grp_valid[0]
validated_todo_list_dict["feature_list_group_ID"] = ftr_grp_valid[1]
validated_todo_list_dict["feature_group_ID_dict"] = ftr_grp_valid[2]
elif (
"feature_list" in validated_todo_list_dict
and "feature_group" in validated_todo_list_dict
):
ftr_grp_valid = _validate_feature_group_and_convert_to_list(
validated_todo_list_dict["feature_group"],
validated_todo_list_dict["feature_list"],
)
validated_todo_list_dict["feature_list"] = ftr_grp_valid[0]
validated_todo_list_dict["feature_list_group_ID"] = ftr_grp_valid[1]
validated_todo_list_dict["feature_group_ID_dict"] = ftr_grp_valid[2]
# okay now that we took care of that we can loop through everything else
for key, val in todo_list_dict.items():
# valid todo_list_dict keys in alphabetical order
if key == "annotation_file":
with open(val, newline="") as f:
reader = csv.reader(f, delimiter=",")
first_row = next(reader)
if first_row != "filename,index,onset,offset,label".split(","):
raise ValueError("annotation_file did not have correct header")
elif key == "bird_ID":
if type(val) != str:
raise ValueError(
"Value {} for key 'bird_ID' is type {} but it"
" should be a string".format(val, type(val))
)
elif key == "data_dirs":
if type(val) != list:
raise ValueError("data_dirs should be a list")
else:
validated_data_dirs = []
for item in val:
if not os.path.isdir(item):
# if item is not absolute path to dir
# try adding item to absolute path to config_file
# i.e. assume it is written relative to config file
item = os.path.join(
os.path.dirname(config_path), os.path.normpath(item)
)
if not os.path.isdir(item):
raise ValueError(
"directory {} in {} is not a valid directory.".format(
item, key
)
)
validated_data_dirs.append(item)
validated_todo_list_dict["data_dirs"] = validated_data_dirs
elif key == "file_format":
if type(val) != str:
raise ValueError(
"Value {} for key 'file_format' is type {} but it"
" should be a string".format(val, type(val))
)
else:
if val not in validate_dict["valid_file_formats"]:
raise ValueError("{} is not a known audio file format".format(val))
elif key == "labels_to_use":
if type(val) != str:
raise ValueError("labels_to_use should be a string, e.g., 'iabcde'.")
else:
validated_todo_list_dict[key] = list(
val
) # convert from string to list of chars
validated_todo_list_dict["labels_to_use_int"] = [
ord(label) for label in list(val)
]
elif key == "output_dir":
if type(val) != str:
raise ValueError(
"output_dirs should be a string but it parsed as a {}".format(
type(val)
)
)
# add 'save_features=True' since this is implied when user
# specifies a directory for output
if "save_features" not in todo_list_dict:
validated_todo_list_dict["save_features"] = True
elif key == "save_features":
if (
"output_dir" in todo_list_dict
and todo_list_dict["save_features"] is False
):
raise ValueError(
"output_dir was specified but " "save_features was set to False"
)
elif key == "segment_params":
validate_segment_params(val)
elif key == "spect_params":
validate_spect_params(val)
return validated_todo_list_dict
##########################################
# main function that validates yaml file #
##########################################
def validate_yaml(config_path, extract_config_yaml):
"""
validates config from extract YAML file
Parameters
----------
config_path : str
absolute path to YAML config file. Used to validate directory names
in YAML files, which are assumed to be written relative to the
location of the file itself.
extract_config_yaml : dict
dict should be config from YAML file as loaded with pyyaml.
Returns
-------
extract_config_dict : dictionary, after validation of all keys
"""
if type(extract_config_yaml) is not dict:
raise ValueError(
"extract_config_yaml passed to parse.extract was "
"not recognized as a dict, instead was a {}."
"Must pass a dict containing config loaded from YAML"
"file or a str that is a YAML filename.".format(type(extract_config_yaml))
)
if "todo_list" not in extract_config_yaml:
raise KeyError("extract config does not have required key 'todo_list'")
if "spect_params" not in extract_config_yaml:
has_spect_params = [
"spect_params" in todo_dict
for todo_dict in extract_config_yaml["todo_list"]
]
if not all(has_spect_params):
raise KeyError(
"no default `spect_params` specified, but"
"not every todo_list in extract config has spect_params"
)
if "segment_params" not in extract_config_yaml:
has_segment_params = [
"segment_params" in todo_dict
for todo_dict in extract_config_yaml["todo_list"]
]
if not all(has_segment_params):
raise KeyError(
"no default `segment_params` specified, but"
"not every todo_list in extract config has segment_params"
)
validated = copy.deepcopy(extract_config_yaml)
for key, val in extract_config_yaml.items():
if key == "spect_params":
validated["spect_params"] = validate_spect_params(val)
elif key == "segment_params":
validate_segment_params(val)
elif key == "todo_list":
if type(val) != list:
raise TypeError(
"todo_list did not parse as a list, instead it parsed as {}."
" Please check config file formatting.".format(type(val))
)
else:
for index, item in enumerate(val):
if type(item) != dict:
raise TypeError(
"item {} in todo_list did not parse as a dictionary, "
"instead it parsed as a {}. Please check config file"
" formatting".format(index, type(item))
)
else:
val[index] = _validate_todo_list_dict(item, index, config_path)
validated["todo_list"] = val # re-assign because feature list is added
else: # if key is not found in list
raise KeyError("key {} in extract is an invalid key".format(key))
return validated
| bsd-3-clause |
silasb/flatcam | descartes/patch.py | 3 | 2290 | """Paths and patches"""
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from numpy import asarray, concatenate, ones
class Polygon(object):
# Adapt Shapely or GeoJSON/geo_interface polygons to a common interface
def __init__(self, context):
if hasattr(context, 'interiors'):
self.context = context
else:
self.context = getattr(context, '__geo_interface__', context)
@property
def geom_type(self):
return (getattr(self.context, 'geom_type', None)
or self.context['type'])
@property
def exterior(self):
return (getattr(self.context, 'exterior', None)
or self.context['coordinates'][0])
@property
def interiors(self):
value = getattr(self.context, 'interiors', None)
if value is None:
value = self.context['coordinates'][1:]
return value
def PolygonPath(polygon):
"""Constructs a compound matplotlib path from a Shapely or GeoJSON-like
geometric object"""
this = Polygon(polygon)
assert this.geom_type == 'Polygon'
def coding(ob):
# The codes will be all "LINETO" commands, except for "MOVETO"s at the
# beginning of each subpath
n = len(getattr(ob, 'coords', None) or ob)
vals = ones(n, dtype=Path.code_type) * Path.LINETO
vals[0] = Path.MOVETO
return vals
vertices = concatenate(
[asarray(this.exterior)]
+ [asarray(r) for r in this.interiors])
codes = concatenate(
[coding(this.exterior)]
+ [coding(r) for r in this.interiors])
return Path(vertices, codes)
def PolygonPatch(polygon, **kwargs):
"""Constructs a matplotlib patch from a geometric object
The `polygon` may be a Shapely or GeoJSON-like object with or without holes.
The `kwargs` are those supported by the matplotlib.patches.Polygon class
constructor. Returns an instance of matplotlib.patches.PathPatch.
Example (using Shapely Point and a matplotlib axes):
>>> b = Point(0, 0).buffer(1.0)
>>> patch = PolygonPatch(b, fc='blue', ec='blue', alpha=0.5)
>>> axis.add_patch(patch)
"""
return PathPatch(PolygonPath(polygon), **kwargs)
| mit |
rajat1994/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
ZENGXH/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
silky/sms-tools | lectures/09-Sound-description/plots-code/k-means.py | 25 | 1714 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import os, sys
from scipy.cluster.vq import vq, kmeans, whiten
from numpy import random
import pickle
n = 30
features = np.hstack((np.array([np.random.normal(-2,1.1,n), np.random.normal(-2,1.1,n)]), np.array([np.random.normal(2,1.5,n), np.random.normal(2,1.5,n)])))
whitened = np.transpose(features)
nClusters = 2
arr = np.arange(whitened.shape[0])
np.random.shuffle(arr)
seeds = np.array([[-2, 1], [2, -1]])
color = [ 'r', 'c', 'c', 'm']
plt.figure(1, figsize=(9.5, 4))
plt.subplot(1,3,1)
plt.scatter(whitened[:,0],whitened[:,1], c='b', alpha=0.75, s=50, edgecolor='none')
plt.subplot(1,3,2)
clusResults = -1*np.ones(whitened.shape[0])
for ii in range(whitened.shape[0]):
diff = seeds - whitened[ii,:]
diff = np.sum(np.power(diff,2), axis = 1)
indMin = np.argmin(diff)
clusResults[ii] = indMin
for pp in range(nClusters):
plt.scatter(whitened[clusResults==pp,0],whitened[clusResults==pp,1], c=color[pp], alpha=0.75, s=50, edgecolor='none')
plt.scatter(seeds[:,0],seeds[:,1], c=color[:nClusters], alpha=1, s=80)
plt.subplot(1,3,3)
centroids, distortion = kmeans(whitened, seeds, iter=40)
clusResults = -1*np.ones(whitened.shape[0])
for ii in range(whitened.shape[0]):
diff = centroids - whitened[ii,:]
diff = np.sum(np.power(diff,2), axis = 1)
indMin = np.argmin(diff)
clusResults[ii] = indMin
for pp in range(nClusters):
plt.scatter(whitened[clusResults==pp,0],whitened[clusResults==pp,1], c=color[pp], s=50, alpha=0.75, edgecolor='none')
plt.scatter(centroids[:,0],centroids[:,1], c=color[:nClusters], alpha=1, s=80)
plt.tight_layout()
plt.savefig('k-means.png')
plt.show()
| agpl-3.0 |
OSHI7/Learning1 | VoipReadVer.py | 1 | 2741 | from voipms import VoipMs
from datetime import datetime
import time
import matplotlib.pyplot as plt
import matplotlib
#dummy change; remove this
#from Utils import showModulePath
import Utils
def sendSMS(src, dst, msg):
client.dids.send.sms(src,dst, msg)
filedata=Utils.getPWFile()
Utils.showModulePath(VoipMs)
#raise AssertionError("Unexpected value of 'distance'!", 11)
#raise Exception('bad thing happened')
#Load the JSON file with pwrd
client = VoipMs(filedata['un'], filedata['pw'] )
status=client.accounts.get.registration_status('136817_CELL3')
print(status)
print('now sending sms')
sendSMS(filedata['DID'], filedata['DID'], 'AHOY DUDE')
import DataTest
#from voipms import VoipMs
#client = VoipMs('', '')
a=client.accounts.get.registration_status('136817_CELL3')
print('cell phone registered? ' + a['registered'])
'''
a=client.accounts.get.registration_status('136817_HOME')
print('home phone registered? ' + a['registered'])
a=client.accounts.get.registration_status('136817_CELL')
print('\defunct phone registered? ' + a['registered'])
'''
TIME_ARRAY=[]
REGISTRATION_ARRAY=[]
VOIPMS_LOGFILE='VoipMS_Logfile.csv'
with open(VOIPMS_LOGFILE, "a", newline="\r\n") as text_file:
text_file.write('*'*10 + "\n")
timeValue=datetime.now().strftime('%Y-%m-%d, %H:%M:%S')
text_file.write('NEW LOG STARTED AT: ' + timeValue +'\n' )
i=-1
while 1:
i=i+1
timeValue=datetime.now().strftime('%Y-%m-%d, %H:%M:%S')
TIME_ARRAY.append(timeValue) #Store the data
# ref: https://stackoverflow.com/questions/26455616/how-can-i-create-basic-timestamps-or-dates-python-3-4
x2 = [datetime.strptime(elem, '%Y-%m-%d, %H:%M:%S') for elem in TIME_ARRAY]
dates=matplotlib.dates.date2num(x2)
try:
status=client.accounts.get.registration_status('136817_CELL3')
except:
print('Iteration skipped! could not log into voip.ms..retrying')
continue
#print('cell phone registered? ' + status['registered'])
registerstatus= int(status['registered']=='yes')
REGISTRATION_ARRAY.append(registerstatus)
outstring=timeValue + ", CellStatus , " + "{first}".format(first=registerstatus)
print(outstring)
#Append contents to file
with open(VOIPMS_LOGFILE, "a", newline="\r\n") as text_file:
text_file.write(outstring +'\n')
#print(timeValue + ", CellStatus= " + "{first}".format(first=registerstatus))
#Update plot
'''
# ref https://stackoverflow.com/questions/11874767/real-time-plotting-in-while-loop-with-matplotlib
plt.ion()
matplotlib.pyplot.plot_date(dates, REGISTRATION_ARRAY)
(fig, ax) = plt.subplots(1, 1)
ax.plot(dates, REGISTRATION_ARRAY)
fig.show()
plt.pause(0.05)
'''
time.sleep(1)
| mit |
Saurabh7/shogun | examples/undocumented/python_modular/graphical/interactive_svr_demo.py | 16 | 11301 | """
Shogun demo, based on PyQT Demo by Eli Bendersky
Christian Widmer
Soeren Sonnenburg
License: GPLv3
"""
import numpy
import sys, os, csv
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib import mpl
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
from modshogun import *
from modshogun import *
from modshogun import *
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.data = DataHolder()
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.on_show()
def load_file(self, filename=None):
filename = QFileDialog.getOpenFileName(self,
'Open a data file', '.', 'CSV files (*.csv);;All Files (*.*)')
if filename:
self.data.load_from_file(filename)
self.fill_series_list(self.data.series_names())
self.status_text.setText("Loaded " + filename)
def on_show(self):
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1, self.data.x2, 'bo')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
self.canvas.draw()
self.fill_series_list(self.data.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
self.data.add_example(event.xdata, event.ydata)
self.on_show()
def clear(self):
self.data.clear()
self.on_show()
def enable_widgets(self):
kernel_name = self.kernel_combo.currentText()
if kernel_name == "LinearKernel":
self.sigma.setDisabled(True)
self.degree.setDisabled(True)
elif kernel_name == "PolynomialKernel":
self.sigma.setDisabled(True)
self.degree.setEnabled(True)
elif kernel_name == "GaussianKernel":
self.sigma.setEnabled(True)
self.degree.setDisabled(True)
def train_svm(self):
width = float(self.sigma.text())
degree = int(self.degree.text())
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1, self.data.x2, 'bo')
# train svm
labels = self.data.get_labels()
print type(labels)
lab = RegressionLabels(labels)
features = self.data.get_examples()
train = RealFeatures(features)
kernel_name = self.kernel_combo.currentText()
print "current kernel is %s" % (kernel_name)
if kernel_name == "LinearKernel":
gk = LinearKernel(train, train)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "PolynomialKernel":
gk = PolyKernel(train, train, degree, True)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "GaussianKernel":
gk = GaussianKernel(train, train, width)
cost = float(self.cost.text())
tubeeps = float(self.tubeeps.text())
print "cost", cost
svm = LibSVR(cost, tubeeps, gk, lab)
svm.train()
svm.set_epsilon(1e-2)
x=numpy.linspace(-5.0,5.0,100)
y=svm.apply(RealFeatures(numpy.array([x]))).get_labels()
self.axes.plot(x,y,'r-')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
log_label = QLabel("Number of examples:")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
cost_label = QLabel('C')
#self.cost = QSpinBox()#QLineEdit()
self.cost = QLineEdit()
self.cost.setText("1.0")
#self.cost.setMinimum(1)
spin_label2 = QLabel('tube')
self.tubeeps = QLineEdit()
self.tubeeps.setText("0.1")
spin_label3 = QLabel('sigma')
self.sigma = QLineEdit()
self.sigma.setText("1.2")
#self.sigma.setMinimum(1)
spin_label4 = QLabel('d')
self.degree = QLineEdit()
self.degree.setText("2")
#self.sigma.setMinimum(1)
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(cost_label)
spins_hbox.addWidget(self.cost)
spins_hbox.addWidget(spin_label2)
spins_hbox.addWidget(self.tubeeps)
spins_hbox.addWidget(spin_label3)
spins_hbox.addWidget(self.sigma)
spins_hbox.addWidget(spin_label4)
spins_hbox.addWidget(self.degree)
spins_hbox.addStretch(1)
self.legend_cb = QCheckBox("Show Support Vectors")
self.legend_cb.setChecked(False)
self.show_button = QPushButton("&Train SVR")
self.connect(self.show_button, SIGNAL('clicked()'), self.train_svm)
self.clear_button = QPushButton("&Clear")
self.connect(self.clear_button, SIGNAL('clicked()'), self.clear)
self.kernel_combo = QComboBox()
self.kernel_combo.insertItem(-1, "GaussianKernel")
self.kernel_combo.insertItem(-1, "PolynomialKernel")
self.kernel_combo.insertItem(-1, "LinearKernel")
self.kernel_combo.maximumSize = QSize(300, 50)
self.connect(self.kernel_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(log_label)
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_label = QLabel("Settings")
right2_vbox.addWidget(right2_label)
right2_vbox.addWidget(self.show_button)
right2_vbox.addWidget(self.kernel_combo)
right2_vbox.addLayout(spins_hbox)
right2_clearlabel = QLabel("Remove Data")
right2_vbox.addWidget(right2_clearlabel)
right2_vbox.addWidget(self.clear_button)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_action = self.create_action("&Load file",
shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
class DataHolder(object):
""" Just a thin wrapper over a dictionary that holds integer
data series. Each series has a name and a list of numbers
as its data. The length of all series is assumed to be
the same.
The series can be read from a CSV file, where each line
is a separate series. In each series, the first item in
the line is the name, and the rest are data numbers.
"""
def __init__(self, filename=None):
self.clear()
self.load_from_file(filename)
def clear(self):
self.x1 = []
self.x2 = []
def get_stats(self):
num = len(self.x1)
str_num = "num examples: %i" % num
return (str_num, str_num)
def get_labels(self):
return numpy.array(self.x2, dtype=numpy.float64)
def get_examples(self):
num = len(self.x1)
examples = numpy.zeros((1,num))
for i in xrange(num):
examples[0,i] = self.x1[i]
return examples
def add_example(self, x1, x2):
self.x1.append(x1)
self.x2.append(x2)
def load_from_file(self, filename=None):
self.data = {}
self.names = []
if filename:
for line in csv.reader(open(filename, 'rb')):
self.names.append(line[0])
self.data[line[0]] = map(int, line[1:])
self.datalen = len(line[1:])
def series_names(self):
""" Names of the data series
"""
return self.names
def series_len(self):
""" Length of a data series
"""
return self.datalen
def series_count(self):
return len(self.data)
def get_series_data(self, name):
return self.data[name]
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
#~ dh = DataHolder('qt_mpl_data.csv')
#~ print dh.data
#~ print dh.get_series_data('1991 Sales')
#~ print dh.series_names()
#~ print dh.series_count()
| mit |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/doc/mpl_examples/pylab_examples/pythonic_matplotlib.py | 9 | 2425 | #!/usr/bin/env python
"""
Some people prefer to write more pythonic, object oriented, code
rather than use the pylab interface to matplotlib. This example shows
you how.
Unless you are an application developer, I recommend using part of the
pylab interface, particularly the figure, close, subplot, axes, and
show commands. These hide a lot of complexity from you that you don't
need to see in normal figure creation, like instantiating DPI
instances, managing the bounding boxes of the figure elements,
creating and reaslizing GUI windows and embedding figures in them.
If you are an application developer and want to embed matplotlib in
your application, follow the lead of examples/embedding_in_wx.py,
examples/embedding_in_gtk.py or examples/embedding_in_tk.py. In this
case you will want to control the creation of all your figures,
embedding them in application windows, etc.
If you are a web application developer, you may want to use the
example in webapp_demo.py, which shows how to use the backend agg
figure canvase directly, with none of the globals (current figure,
current axes) that are present in the pylab interface. Note that
there is no reason why the pylab interface won't work for web
application developers, however.
If you see an example in the examples dir written in pylab interface,
and you want to emulate that using the true python method calls, there
is an easy mapping. Many of those examples use 'set' to control
figure properties. Here's how to map those commands onto instance
methods
The syntax of set is
setp(object or sequence, somestring, attribute)
if called with an object, set calls
object.set_somestring(attribute)
if called with a sequence, set does
for object in sequence:
object.set_somestring(attribute)
So for your example, if a is your axes object, you can do
a.set_xticklabels([])
a.set_yticklabels([])
a.set_xticks([])
a.set_yticks([])
"""
from pylab import figure, show
from numpy import arange, sin, pi
t = arange(0.0, 1.0, 0.01)
fig = figure(1)
ax1 = fig.add_subplot(211)
ax1.plot(t, sin(2*pi*t))
ax1.grid(True)
ax1.set_ylim( (-2,2) )
ax1.set_ylabel('1 Hz')
ax1.set_title('A sine wave or two')
for label in ax1.get_xticklabels():
label.set_color('r')
ax2 = fig.add_subplot(212)
ax2.plot(t, sin(2*2*pi*t))
ax2.grid(True)
ax2.set_ylim( (-2,2) )
l = ax2.set_xlabel('Hi mom')
l.set_color('g')
l.set_fontsize('large')
show()
| mit |
bjackman/lisa | libs/utils/wa_results_collector.py | 1 | 47802 | # Copyright 2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple, defaultdict
import csv
import json
import numpy as np
import re
import os
import pandas as pd
import subprocess
import logging
import warnings
from scipy.stats import ttest_ind
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.colors import to_hex
from conf import LisaLogging
from bart.common.Utils import area_under_curve
from devlib.target import KernelVersion
from trappy.utils import handle_duplicate_index
from IPython.display import display
from trace import Trace
from git import Git
class WaResultsCollector(object):
"""
Collects, analyses and visualises results from multiple WA3 directories
Takes a list of output directories from Workload Automation 3 and parses
them. Finds metrics reported by WA itself, and extends those metrics with
extra detail extracted from ftrace files, energy instrumentation output, and
workload-specific artifacts that are found in the output.
Results can be grouped according to the following terms:
- 'metric' is a specific measurable quantity such as a single frame's
rendering time or the average energy consumed during a workload run.
- 'workload' is the general name of a workload such as 'jankbench' or
'youtube'.
- 'test' is a more specific identification for workload - for example this
might identify one of Jankbench's sub-benchmarks, or specifically playing
a certain video on Youtube for 30s.
WaResultsCollector ultimately derives 'test' names from the
'classifiers'::'test' field of the WA3 agenda file's 'workloads' entries.
- 'tag' is an identifier for a set of run-time target configurations that
the target was run under. For example there might exist one 'tag'
identifying running under the schedutil governor and another for the
performance governor.
WaResultsCollector ultimately derives 'tag' names from the 'classifiers'
field of the WA3 agenda file's 'sections' entries.
- 'kernel' identifies the kernel that was running when the metric was
collected. This may be a SHA1 or a symbolic ref (branch/tag) derived from
a provided Git repository. To try to keep identifiers readable, common
prefixes of refs are removed: if the raw refs are 'test/foo/bar' and
'test/foo/baz', they will be referred to just as 'bar' and 'baz'.
Aside from the provided helper attributes, all metrics are exposed in a
DataFrame as the ``results_df`` attribute.
:param wa_dirs: List of paths to WA3 output directories or a regexp of WA3
output directories names to consider starting from the
specified base_path
:type wa_dirs: str
:param base_dir: The path of a directory containing a collection of WA3
output directories
:type base_dir: str
:param platform: Optional LISA platform description. If provided, used to
enrich extra metrics gleaned from trace analysis.
:param kernel_repo_path: Optional path to kernel repository. WA3 reports the
SHA1 of the kernel that workloads were run against. If this
param is provided, the repository is search for symbolic
references to replace SHA1s in data representation. This is
purely to make the output more manageable for humans.
:param parse_traces: This class uses LISA to parse and analyse ftrace files
for extra metrics. With multiple/large traces this
can take some time. Set this param to False to disable
trace parsing.
:param use_cached_trace_metrics: This class uses LISA to parse and analyse
ftrace files for extra metrics. With multiple/large traces
this can take some time, so the extracted metrics are
cached in the provided output directories. Set this param
to False to disable this caching.
"""
def __init__(self, base_dir=None, wa_dirs=".*", platform=None,
kernel_repo_path=None, parse_traces=True,
use_cached_trace_metrics=True):
self._log = logging.getLogger('WaResultsCollector')
if base_dir:
base_dir = os.path.expanduser(base_dir)
if not isinstance(wa_dirs, basestring):
raise ValueError(
'If base_dir is provided, wa_dirs should be a regexp')
regex = wa_dirs
wa_dirs = self._list_wa_dirs(base_dir, regex)
if not wa_dirs:
raise ValueError("Couldn't find any WA results matching '{}' in {}"
.format(regex, base_dir))
else:
if not hasattr(wa_dirs, '__iter__'):
raise ValueError(
'if base_dir is not provided, wa_dirs should be a list of paths')
wa_dirs = [os.path.expanduser(p) for p in wa_dirs]
self.platform = platform
self.parse_traces = parse_traces
if not self.parse_traces:
self._log.warning("Trace parsing disabled")
self.use_cached_trace_metrics = use_cached_trace_metrics
df = pd.DataFrame()
for wa_dir in wa_dirs:
df = df.append(self._read_wa_dir(wa_dir))
kernel_refs = {}
if kernel_repo_path:
for sha1 in df['kernel_sha1'].unique():
ref = Git.find_shortest_symref(kernel_repo_path, sha1)
if ref:
kernel_refs[sha1] = ref
common_prefix = os.path.commonprefix(kernel_refs.values())
for sha1, ref in kernel_refs.iteritems():
kernel_refs[sha1] = ref[len(common_prefix):]
df['kernel'] = df['kernel_sha1'].replace(kernel_refs)
self.results_df = df
def _list_wa_dirs(self, base_dir, wa_dirs_re):
dirs = []
self._log.info("Processing WA3 dirs matching [%s], rooted at %s",
wa_dirs_re, base_dir)
wa_dirs_re = re.compile(wa_dirs_re)
for subdir in os.listdir(base_dir):
dir = os.path.join(base_dir, subdir)
if not os.path.isdir(dir) or not wa_dirs_re.search(subdir):
continue
# WA3 results dirs contains a __meta directory at the top level.
if '__meta' not in os.listdir(dir):
self.log.warning('Ignoring {}, does not contain __meta directory')
continue
dirs.append(dir)
return dirs
def _read_wa_dir(self, wa_dir):
"""
Get a DataFrame of metrics from a single WA3 output directory.
Includes the extra metrics derived from workload-specific artifacts and
ftrace files.
Columns returned:
kernel_sha1,kernel,id,workload,tag,test,iteration,metric,value,units
"""
# A WA output directory looks something like:
#
# wa_output/
# |- __meta/
# | | - jobs.json
# | | (some other bits)
# |- results.csv
# |- pelt-wk1-jankbench-1/
# | | - result.json
# | | (other results from iteration 1 of pelt-wk1, which is a
# | | jankbench job)
# |- pelt-wk1-jankbench-2/
# [etc]
# results.csv contains all the metrics reported by WA for all jobs.
df = pd.read_csv(os.path.join(wa_dir, 'results.csv'))
# __meta/jobs.json describes the jobs that were run - we can use this to
# find extra artifacts (like traces and detailed energy measurement
# data) from the jobs, which we'll use to add additional metrics that WA
# didn't report itself.
with open(os.path.join(wa_dir, '__meta', 'jobs.json')) as f:
jobs = json.load(f)['jobs']
subdirs_done = []
# Keep track of how many times we've seen each job id so we know which
# iteration to look at (If we use the proper WA3 API this awkwardness
# isn't necessary).
next_iteration = defaultdict(lambda: 1)
# Keep track of which jobs we skipped for each iteration
skipped_jobs = defaultdict(lambda: [])
# Dicts mapping job IDs to things determined about the job - this will
# be used to add extra columns to the DataFrame (that aren't reported
# directly in WA's results.csv)
tag_map = {}
test_map = {}
job_dir_map = {}
for job in jobs:
workload = job['workload_name']
job_id = job['id']
# If there's a 'tag' in the 'classifiers' object, use that to
# identify the runtime configuration. If not, use a representation
# of the full key=value pairs.
classifiers = job['classifiers'] or {}
if 'test' in classifiers:
# If the workload spec has a 'test' classifier, use that to
# identify it.
test = classifiers.pop('test')
elif 'test' in job['workload_parameters']:
# If not, some workloads have a 'test' workload_parameter, try
# using that
test = job['workload_parameters']['test']
else:
# Otherwise just use the workload name.
# This isn't ideal because it means the results from jobs with
# different workload parameters will be amalgamated.
test = workload
rich_tag = ';'.join('{}={}'.format(k, v) for k, v in classifiers.iteritems())
tag = classifiers.get('tag', rich_tag)
if job_id in tag_map:
# Double check I didn't do a stupid
if tag_map[job_id] != tag:
raise RuntimeError('Multiple tags ({}, {}) found for job ID {}'
.format(tag, tag_map[job_id], job_id))
tag_map[job_id] = tag
if job_id in test_map:
# Double check I didn't do a stupid
if test_map[job_id] != test:
raise RuntimeError('Multiple tests ({}, {}) found for job ID {}'
.format(test, test_map[job_id], job_id))
test_map[job_id] = test
iteration = next_iteration[job_id]
next_iteration[job_id] += 1
job_dir = os.path.join(wa_dir,
'-'.join([job_id, workload, str(iteration)]))
job_dir_map[job_id] = job_dir
# Jobs can fail due to target misconfiguration or other problems,
# without preventing us from collecting the results for the jobs
# that ran OK.
with open(os.path.join(job_dir, 'result.json')) as f:
job_result = json.load(f)
if job_result['status'] == 'FAILED':
skipped_jobs[iteration].append(job_id)
continue
extra_df = self._get_extra_job_metrics(job_dir, workload)
if extra_df.empty:
continue
extra_df.loc[:, 'workload'] = workload
extra_df.loc[:, 'iteration'] = iteration
extra_df.loc[:, 'id'] = job_id
extra_df.loc[:, 'tag'] = tag
extra_df.loc[:, 'test'] = test
df = df.append(extra_df)
for iteration, job_ids in skipped_jobs.iteritems():
self._log.warning("Skipped failed iteration %d for jobs:", iteration)
self._log.warning(" %s", ', '.join(job_ids))
df['tag'] = df['id'].replace(tag_map)
df['test'] = df['id'].replace(test_map)
# TODO: This is a bit lazy: we're storing the directory that every
# single metric came from in a DataFrame column. That's redundant really
# - instead, to get from a row in results_df to a job output directory,
# we should just store a mapping from kernel identifiers to wa_output
# directories, then derive at the job dir from that mapping plus the
# job_id+workload+iteration in the results_df row. This works fine for
# now, though - that refactoring would probably belong alongside a
# refactoring to use WA's own API for reading output directories.
df['_job_dir'] = df['id'].replace(job_dir_map)
df.loc[:, 'kernel_sha1'] = self._wa_get_kernel_sha1(wa_dir)
return df
def _get_trace_metrics(self, trace_path):
"""
Parse a trace (or used cached results) and extract extra metrics from it
Returns a DataFrame with columns:
metric,value,units
"""
cache_path = os.path.join(os.path.dirname(trace_path), 'lisa_trace_metrics.csv')
if self.use_cached_trace_metrics and os.path.exists(cache_path):
return pd.read_csv(cache_path)
# I wonder if this should go in LISA itself? Probably.
metrics = []
events = ['irq_handler_entry', 'cpu_frequency', 'nohz_kick', 'sched_switch',
'sched_load_cfs_rq', 'sched_load_avg_task', 'thermal_temperature']
trace = Trace(self.platform, trace_path, events)
if hasattr(trace.data_frame, 'cpu_wakeups'): # Not merged in LISA yet
metrics.append(('cpu_wakeup_count', len(trace.data_frame.cpu_wakeups()), None))
# Helper to get area under curve of multiple CPU active signals
def get_cpu_time(trace, cpus):
df = pd.DataFrame([trace.getCPUActiveSignal(cpu) for cpu in cpus])
return df.sum(axis=1).sum(axis=0)
clusters = trace.platform.get('clusters')
if clusters:
for cluster in clusters.values():
name = '-'.join(str(c) for c in cluster)
df = trace.data_frame.cluster_frequency_residency(cluster)
if df is None or df.empty:
self._log.warning("Can't get cluster freq residency from %s",
trace.data_dir)
else:
df = df.reset_index()
avg_freq = (df.frequency * df.time).sum() / df.time.sum()
metric = 'avg_freq_cluster_{}'.format(name)
metrics.append((metric, avg_freq, 'MHz'))
df = trace.data_frame.trace_event('cpu_frequency')
df = df[df.cpu == cluster[0]]
metrics.append(('freq_transition_count_{}'.format(name), len(df), None))
active_time = area_under_curve(trace.getClusterActiveSignal(cluster))
metrics.append(('active_time_cluster_{}'.format(name),
active_time, 'seconds'))
metrics.append(('cpu_time_cluster_{}'.format(name),
get_cpu_time(trace, cluster), 'cpu-seconds'))
metrics.append(('cpu_time_total',
get_cpu_time(trace, range(trace.platform['cpus_count'])),
'cpu-seconds'))
event = None
if trace.hasEvents('sched_load_cfs_rq'):
event = 'sched_load_cfs_rq'
row_filter = lambda r: r.path == '/'
column = 'util'
elif trace.hasEvents('sched_load_avg_cpu'):
event = 'sched_load_avg_cpu'
row_filter = lambda r: True
column = 'util_avg'
if event:
df = trace.data_frame.trace_event(event)
util_sum = (handle_duplicate_index(df)[row_filter]
.pivot(columns='cpu')[column].ffill().sum(axis=1))
avg_util_sum = area_under_curve(util_sum) / (util_sum.index[-1] - util_sum.index[0])
metrics.append(('avg_util_sum', avg_util_sum, None))
if trace.hasEvents('thermal_temperature'):
df = trace.data_frame.trace_event('thermal_temperature')
for zone, zone_df in df.groupby('thermal_zone'):
metrics.append(('tz_{}_start_temp'.format(zone),
zone_df.iloc[0]['temp_prev'],
'milliCelcius'))
if len(zone_df == 1): # Avoid division by 0
avg_tmp = zone_df['temp'].iloc[0]
else:
avg_tmp = (area_under_curve(zone_df['temp'])
/ (zone_df.index[-1] - zone_df.index[0]))
metrics.append(('tz_{}_avg_temp'.format(zone),
avg_tmp,
'milliCelcius'))
ret = pd.DataFrame(metrics, columns=['metric', 'value', 'units'])
ret.to_csv(cache_path, index=False)
return ret
def _get_extra_job_metrics(self, job_dir, workload):
"""
Get extra metrics (not reported directly by WA) from a WA job output dir
Returns a DataFrame with columns:
metric,value,units
"""
# return
# value,metric,units
metrics_df = pd.DataFrame()
artifacts = self._read_artifacts(job_dir)
if self.parse_traces and 'trace-cmd-bin' in artifacts:
metrics_df = metrics_df.append(
self._get_trace_metrics(artifacts['trace-cmd-bin']))
if 'jankbench_results_csv' in artifacts:
df = pd.read_csv(artifacts['jankbench_results_csv'])
df = pd.DataFrame({'value': df['total_duration']})
df.loc[:, 'metric'] = 'frame_total_duration'
df.loc[:, 'units'] = 'ms'
metrics_df = metrics_df.append(df)
# WA's metrics model just exports overall energy metrics, not individual
# samples. We're going to extend that with individual samples so if you
# want to you can see how much variation there was in energy usage.
# So we'll look for the actual CSV files and parse that by hand.
# The parsing necessary is specific to the energy measurement backend
# that was used, which WA doesn't currently report directly.
# TODO: once WA's reporting of this data has been cleaned up a bit I
# think we can simplify this.
for artifact_name, path in artifacts.iteritems():
if artifact_name.startswith('energy_instrument_output'):
df = pd.read_csv(path)
if 'device_power' in df.columns:
# Looks like this is from an ACME
df = pd.DataFrame({'value': df['device_power']})
# Figure out what to call the sample metrics. If the
# artifact name has something extra, that will be the
# channel (IIO device) name. Use that to differentiate where
# the samples came from. If not just call it
# 'device_power_sample'.
device_name = artifact_name[len('energy_instrument_output') + 1:]
name_extra = device_name or 'device'
df.loc[:, 'metric'] = '{}_power_sample'.format(name_extra)
df.loc[:, 'units'] = 'watts'
metrics_df = metrics_df.append(df)
elif 'output_power' in df.columns and 'USB_power' in df.columns:
# Looks like this is from a Monsoon
# For monsoon the USB and device power are collected
# together with the same timestamps, so we can just add them
# up.
power_samples = df['output_power'] + df['USB_power']
df = pd.DataFrame({'value': power_samples})
df.loc[:, 'metric'] = 'device_power_sample'
df.loc[:, 'units'] = 'watts'
metrics_df = metrics_df.append(df)
return metrics_df
def _wa_get_kernel_sha1(self, wa_dir):
"""
Find the SHA1 of the kernel that a WA3 run was run against
"""
with open(os.path.join(wa_dir, '__meta', 'target_info.json')) as f:
target_info = json.load(f)
return KernelVersion(target_info['kernel_release']).sha1
def _select(self, tag='.*', kernel='.*', test='.*'):
_df = self.results_df
_df = _df[_df.tag.str.contains(tag)]
_df = _df[_df.kernel.str.contains(kernel)]
_df = _df[_df.test.str.contains(test)]
return _df
@property
def workloads(self):
return self.results_df['kernel'].unique()
@property
def workloads(self):
return self.results_df['workload'].unique()
@property
def tags(self):
return self.results_df['tag'].unique()
def tests(self, workload=None):
df = self.results_df
if workload:
df = df[df['workload'] == workload]
return df['test'].unique()
def workload_available_metrics(self, workload):
return (self.results_df
.groupby('workload').get_group(workload)
['metric'].unique())
def _get_metric_df(self, workload, metric, tag, kernel, test):
"""
Common helper for getting results to plot for a given metric
"""
df = self._select(tag, kernel, test)
if df.empty:
self._log.warn("No data to plot for (tag: %s, kernel: %s, test: %s)",
tag, kernel, test)
return None
valid_workloads = df.workload.unique()
if workload not in valid_workloads:
self._log.warning("No data for [%s] workload", workload)
self._log.info("Workloads with data, for the specified filters, are:")
self._log.info(" %s", ','.join(valid_workloads))
return None
df = df[df['workload'] == workload]
valid_metrics = df.metric.unique()
if metric not in valid_metrics:
self._log.warning("No metric [%s] collected for workoad [%s]",
metric, workload)
self._log.info("Metrics with data, for the specied filters, are:")
self._log.info(" %s", ', '.join(valid_metrics))
return None
df = df[df['metric'] == metric]
units = df['units'].unique()
if len(units) > 1:
raise RuntimError('Found different units for workload "{}" metric "{}": {}'
.format(workload, metric, units))
return df
SortBy = namedtuple('SortBy', ['key', 'params', 'column'])
def _get_sort_params(self, sort_on):
"""
Validate a sort criteria and return the parameters required by the
boxplot and report methods.
"""
valid_sort = ['count', 'mean', 'std', 'min', 'max']
# Verify if valid percentile string has been required
match = re.match('^(?P<quantile>\d{1,3})\%$', sort_on)
if match:
quantile = int(match.group('quantile'))
if quantile < 1 or quantile > 100:
raise ValueError("Error sorting data: Quantile value out of range [1..100]")
return self.SortBy('quantile', {'q': quantile/100.}, sort_on)
# Otherwise, verify if it's a valid Pandas::describe()'s column name
if sort_on in valid_sort:
return self.SortBy(sort_on, {}, sort_on)
raise ValueError(
"sort_on={} not supported, allowed values are percentile or {}"
.format(sort_on, valid_sort))
def boxplot(self, workload, metric,
tag='.*', kernel='.*', test='.*',
by=['test', 'tag', 'kernel'],
sort_on='mean', ascending=False,
xlim=None):
"""
Display boxplots of a certain metric
Creates horizontal boxplots of metrics in the results. Check
``workloads`` and ``workload_available_metrics`` to find the available
workloads and metrics. Check ``tags``, ``tests`` and ``kernels``
to find the names that results can be filtered against.
By default, the box with the lowest mean value is plotted at the top of
the graph, this can be customized with ``sort_on`` and ``ascending``.
:param workload: Name of workload to display metrics for
:param metric: Name of metric to display
:param tag: regular expression to filter tags that should be plotted
:param kernel: regular expression to filter kernels that should be plotted
:param tag: regular expression to filter tags that should be plotted
:param by: List of identifiers to group output as in DataFrame.groupby.
:param sort_on: Name of the statistic to order data for.
Supported values are: count, mean, std, min, max.
You may alternatively specify a percentile to sort on,
this should be an integer in the range [1..100]
formatted as a percentage, e.g. 95% is the 95th
percentile.
:param ascending: When True, boxplots are plotted by increasing values
(lowest-valued boxplot at the top of the graph) of the
specified `sort_on` statistic.
"""
sp = self._get_sort_params(sort_on)
df = self._get_metric_df(workload, metric, tag, kernel, test)
if df is None:
return
gb = df.groupby(by)
# Convert the groupby into a DataFrame with a column for each group
max_group_size = max(len(group) for group in gb.groups.itervalues())
_df = pd.DataFrame()
for group_name, group in gb:
# Need to pad the group's column so that they all have the same
# length
padding_length = max_group_size - len(group)
padding = pd.Series(np.nan, index=np.arange(padding_length))
col = group['value'].append(padding)
col.index = np.arange(max_group_size)
_df[group_name] = col
# Sort the columns
# With default params this puts the box with the lowest mean at the
# bottom.
# NOTE: the not(ascending) condition is required to keep these plots
# aligned with the way describe() reports the stats corresponding to
# each boxplot
sorted_df = getattr(_df, sp.key)(**sp.params)
sorted_df = sorted_df.sort_values(ascending=not(ascending))
_df = _df[sorted_df.index]
# Plot boxes sorted by mean
fig, axes = plt.subplots(figsize=(16,8))
_df.boxplot(ax=axes, vert=False, showmeans=True)
fig.suptitle('')
if xlim:
axes.set_xlim(xlim)
[units] = df['units'].unique()
axes.set_xlabel('{} [{}]'.format(metric, units))
axes.set_title('{}:{}'.format(workload, metric))
plt.show()
return axes
def describe(self, workload, metric,
tag='.*', kernel='.*', test='.*',
by=['test', 'tag', 'kernel'],
sort_on='mean', ascending=False):
"""
Return a DataFrame of statistics for a certain metric
Compute mean, std, min, max and [50, 75, 95, 99] percentiles for
the values collected on each iteration of the specified metric.
Check ``workloads`` and ``workload_available_metrics`` to find the
available workloads and metrics.
Check ``tags``, ``tests`` and ``kernels`` to find the names that
results can be filtered against.
:param workload: Name of workload to display metrics for
:param metric: Name of metric to display
:param tag: regular expression to filter tags that should be plotted
:param kernel: regular expression to filter kernels that should be plotted
:param tag: regular expression to filter tags that should be plotted
:param by: List of identifiers to group output as in DataFrame.groupby.
:param sort_on: Name of the statistic to order data for.
Supported values are: count, mean, std, min, max.
It's also supported at the usage of a percentile value,
which has to be an integer in the range [1..100] and
formatted as a percentage,
e.g. 95% is the 95th percentile.
:param ascending: When True, the statistics are reported by increasing values
of the specified `sort_on` column
"""
sp = self._get_sort_params(sort_on)
df = self._get_metric_df(workload, metric, tag, kernel, test)
if df is None:
return
# Add the eventually required additional percentile
percentiles = [0.75, 0.95, 0.99]
if sp.params and 'q' in sp.params:
percentiles.append(sp.params['q'])
percentiles = sorted(list(set(percentiles)))
grouped = df.groupby(by)['value']
stats_df = pd.DataFrame(
grouped.describe(percentiles=percentiles))
# Use a consistent formatting independently from the PANDAs version
if 'value' in stats_df.columns:
# We must be running on a pre-0.20.0 version of pandas.
# unstack will convert the old output format to the new.
# http://pandas.pydata.org/pandas-docs/version/0.20/whatsnew.html#groupby-describe-formatting
# Main difference is that here we have a top-level column
# named 'value'
stats_df = stats_df.unstack()
else:
# Let's add a top-level column named 'value' which will be replaced
# by the actual metric name by the following code
stats_df.columns = pd.MultiIndex.from_product(
[['value'], stats_df.columns])
# Sort entries by the required metric and order value
stats_df.sort_values(by=[('value', sp.column)],
ascending=ascending, inplace=True)
stats_df.rename(columns={'value': metric}, inplace=True)
return stats_df
def report(self, workload, metric,
tag='.*', kernel='.*', test='.*',
by=['test', 'tag', 'kernel'],
sort_on='mean', ascending=False,
xlim=None):
"""
Report a boxplot and a set of statistics for a certain metric
This is a convenience method to call both ``boxplot`` and ``describe``
at the same time to get a consistent graphical and numerical
representation of the values for the specified metric.
Check ``workloads`` and ``workload_available_metrics`` to find the
available workloads and metrics.
Check ``tags``, ``tests`` and ``kernels`` to find the names that
results can be filtered against.
:param workload: Name of workload to display metrics for
:param metric: Name of metric to display
:param tag: regular expression to filter tags that should be plotted
:param kernel: regular expression to filter kernels that should be plotted
:param tag: regular expression to filter tags that should be plotted
:param by: List of identifiers to group output as in DataFrame.groupby.
"""
axes = self.boxplot(workload, metric, tag, kernel, test,
by, sort_on, ascending, xlim)
stats_df = self.describe(workload, metric, tag, kernel, test,
by, sort_on, ascending)
display(stats_df)
return (axes, stats_df)
CDF = namedtuple('CDF', ['df', 'threshold', 'above', 'below'])
def _get_cdf(self, data, threshold):
"""
Build the "Cumulative Distribution Function" (CDF) for the given data
"""
# Build the series of sorted values
ser = data.sort_values()
if len(ser) < 1000:
# Append again the last (and largest) value.
# This step is important especially for small sample sizes
# in order to get an unbiased CDF
ser = ser.append(pd.Series(ser.iloc[-1]))
df = pd.Series(np.linspace(0., 1., len(ser)), index=ser)
# Compute percentage of samples above/below the specified threshold
below = float(max(df[:threshold]))
above = 1 - below
return self.CDF(df, threshold, above, below)
def plot_cdf(self, workload='jankbench', metric='frame_total_duration',
threshold=16, tag='.*', kernel='.*', test='.*'):
"""
Display cumulative distribution functions of a certain metric
Draws CDFs of metrics in the results. Check ``workloads`` and
``workload_available_metrics`` to find the available workloads and
metrics. Check ``tags``, ``tests`` and ``kernels`` to find the
names that results can be filtered against.
The most likely use-case for this is plotting frame rendering times
under Jankbench, so default parameters are provided to make this easy.
:param workload: Name of workload to display metrics for
:param metric: Name of metric to display
:param threshold: Value to highlight in the plot - the likely use for
this is highlighting the maximum acceptable
frame-rendering time in order to see at a glance the
rough proportion of frames that were rendered in time.
:param tag: regular expression to filter tags that should be plotted
:param kernel: regular expression to filter kernels that should be plotted
:param tag: regular expression to filter tags that should be plotted
:param by: List of identifiers to group output as in DataFrame.groupby.
"""
df = self._get_metric_df(workload, metric, tag, kernel, test)
if df is None:
return
test_cnt = len(df.groupby(['test', 'tag', 'kernel']))
colors = iter(cm.rainbow(np.linspace(0, 1, test_cnt+1)))
fig, axes = plt.subplots()
axes.axvspan(0, threshold, facecolor='g', alpha=0.1);
labels = []
lines = []
for keys, df in df.groupby(['test', 'tag', 'kernel']):
labels.append("{:16s}: {:32s}".format(keys[2], keys[1]))
color = next(colors)
cdf = self._get_cdf(df['value'], threshold)
[units] = df['units'].unique()
ax = cdf.df.plot(ax=axes, legend=False, xlim=(0,None), figsize=(16, 6),
title='Total duration CDF ({:.1f}% within {} [{}] threshold)'\
.format(100. * cdf.below, threshold, units),
label=test,
color=to_hex(color))
lines.append(ax.lines[-1])
axes.axhline(y=cdf.below, linewidth=1,
linestyle='--', color=to_hex(color))
self._log.debug("%-32s: %-32s: %.1f", keys[2], keys[1], 100.*cdf.below)
axes.grid(True)
axes.legend(lines, labels)
plt.show()
def find_comparisons(self, base_id=None, by='kernel'):
"""
Find metrics that changed between a baseline and variants
The notion of 'variant' and 'baseline' is defined by the `by` param. If
by='kernel', then `base_id` should be a kernel SHA (or whatever key the
'kernel' column in the results_df uses). If by='tag' then `base_id`
should be a WA 'tag id' (as named in the WA agenda).
"""
comparisons = []
# I dunno why I wrote this with a namedtuple instead of just a dict or
# whatever, but it works fine
Comparison = namedtuple('Comparison', ['metric', 'test', 'inv_id',
'base_id', 'base_mean', 'base_std',
'new_id', 'new_mean', 'new_std',
'diff', 'diff_pct', 'pvalue'])
# If comparing by kernel, only check comparisons where the 'tag' is the same
# If comparing by tag, only check where kernel is same
if by == 'kernel':
invariant = 'tag'
elif by == 'tag':
invariant = 'kernel'
else:
raise ValueError('`by` must be "kernel" or "tag"')
available_baselines = self.results_df[by].unique()
if base_id is None:
base_id = available_baselines[0]
if base_id not in available_baselines:
raise ValueError('base_id "{}" not a valid "{}" (available: {}). '
'Did you mean to set by="{}"?'.format(
base_id, by, available_baselines, invariant))
for metric, metric_results in self.results_df.groupby('metric'):
# inv_id will either be the id of the kernel or of the tag,
# depending on the `by` param.
# So wl_inv_results will be the results entries for that workload on
# that kernel/tag
for (test, inv_id), wl_inv_results in metric_results.groupby(['test', invariant]):
gb = wl_inv_results.groupby(by)['value']
if base_id not in gb.groups:
self._log.warning('Skipping - No baseline results for test '
'[%s] %s [%s] metric [%s]',
test, invariant, inv_id, metric)
continue
base_results = gb.get_group(base_id)
base_mean = base_results.mean()
for group_id, group_results in gb:
if group_id == base_id:
continue
# group_id is now a kernel id or a tag (depending on
# `by`). group_results is a slice of all the rows of self.results_df
# for a given metric, test, tag/test tuple. We
# create comparison object to show how that metric changed
# wrt. to the base tag/test.
group_mean = group_results.mean()
mean_diff = group_mean - base_mean
# Calculate percentage difference in mean metric value
if base_mean != 0:
mean_diff_pct = mean_diff * 100. / base_mean
else:
# base mean is 0, can't divide by that.
if group_mean == 0:
# Both are 0 so diff_pct is 0
mean_diff_pct =0
else:
# Tricky one - base value was 0, new value isn't.
# Let's just call it a 100% difference.
mean_diff_pct = 100
if len(group_results) <= 1 or len(base_results) <= 1:
# Can't do ttest_ind if we only have one sample. There
# are proper t-tests for this, but let's just assume the
# worst.
pvalue = 1.0
elif mean_diff == 0:
# ttest_ind also gives a warning if the two data sets
# are the same and have no variance. I don't know why
# that is to be honest, but anyway if there's no
# difference in the mean, we don't care about the
# p-value.
pvalue = 1.0
else:
# Find a p-value which hopefully represents the
# (complement of the) certainty that any difference in
# the mean represents something real.
pvalue = ttest_ind(group_results, base_results, equal_var=False).pvalue
comparisons.append(Comparison(
metric, test, inv_id,
base_id, base_mean, base_results.std(),
group_id, group_mean, group_results.std(),
mean_diff, mean_diff_pct, pvalue))
return pd.DataFrame(comparisons)
def plot_comparisons(self, base_id=None, by='kernel'):
"""
Visualise metrics that changed between a baseline and variants
The notion of 'variant' and 'baseline' is defined by the `by` param. If
by='kernel', then `base_id` should be a kernel SHA (or whatever key the
'kernel' column in the results_df uses). If by='tag' then `base_id`
should be a WA 'tag id' (as named in the WA agenda).
"""
df = self.find_comparisons(base_id=base_id, by=by)
if df.empty:
self._log.error('No comparisons by %s found', by)
if len(self.results_df[by].unique()) == 1:
self._log.warning('There is only one %s in the results', by)
return
# Separate plot for each test (e.g. one plot for Jankbench list_view)
for (test, inv_id), test_comparisons in df.groupby(('test', 'inv_id')):
# Vertical size of plot depends on how many metrics we're comparing
# and how many things (kernels/tags) we're comparing metrics for.
# a.k.a the total length of the comparisons df.
fig, ax = plt.subplots(figsize=(15, len(test_comparisons) / 2.))
# pos is used as the Y-axis. The y-axis is a discrete axis with a
# point for each of the metrics we're comparing. matplotlib needs
# that in numerical form.
# We also have one more tick on the Y-axis than we actually need -
# this is a terrible hack which is necessary because when we set the
# opacity of the first bar, it sets the opacity of the legend. So we
# introduce a dummy bar with a value of 0 and an opacity of 1.
all_metrics = test_comparisons['metric'].unique()
pos = np.arange(-1, len(all_metrics))
# At each point on the discrete y-axis we'll have one bar for each
# comparison: one per kernel/tag (depending on the `by` param), minus
# one for the baseline.
# If there are more bars we'll need to make them thinner so they
# fit. The sum of the bars' thicknesses should be 60% of a tick on
# the 'y-axis'.
thickness= 0.6 / len(test_comparisons.groupby('new_id'))
# TODO: something is up with the calculations above, because there's
# always a bit of empty space at the bottom of the axes.
gb = test_comparisons.groupby('new_id')
colors = cm.rainbow(np.linspace(0, 1, len(gb)))
for i, (group, gdf) in enumerate(gb):
def get_dummy_row(metric):
return pd.DataFrame({col: 0 for col in gdf.columns}, index=[metric])
missing_metrics = set(all_metrics) - set(gdf['metric'].unique())
gdf = gdf.set_index('metric')
for missing_metric in missing_metrics:
self._log.warning(
"Data missing, can't compare metric [{}] for {} [{}]"
.format(missing_metric, by, group))
gdf = gdf.append(get_dummy_row(missing_metric))
# Ensure the comparisons are in the same order for each group
gdf = gdf.reindex(all_metrics)
# Append the dummy row we're using to fix the legend opacity
gdf = get_dummy_row('').append(gdf)
# For each of the things we're comparing we'll plot a bar chart
# but slightly shifted. That's how we get multiple bars on each
# y-axis point.
bars = ax.barh(bottom=pos + (i * thickness),
width=gdf['diff_pct'],
height=thickness, label=group,
color=colors[i % len(colors)], align='center')
# Decrease the opacity for comparisons with a high p-value
for bar, pvalue in zip(bars, gdf['pvalue']):
bar.set_alpha(1 - (min(pvalue * 10, 0.95)))
# Add some text for labels, title and axes ticks
ax.set_xlabel('Percent difference')
[baseline] = test_comparisons['base_id'].unique()
ax.set_title('{} ({}): Percent difference compared to {} \nopacity depicts p-value'
.format(test, inv_id, baseline))
ax.set_yticklabels(gdf.index.tolist())
ax.set_yticks(pos + thickness / 2)
# ax.set_xlim((-50, 50))
ax.legend(loc='best')
ax.grid(True)
plt.show()
def _read_artifacts(self, job_dir):
with open(os.path.join(job_dir, 'result.json')) as f:
ret = {a['name']: os.path.join(job_dir, a['path'])
for a in json.load(f)['artifacts']}
return ret
def _find_job_dir(self, workload='.*', tag='.*', kernel='.*', test='.*',
iteration=1):
df = self._select(tag, kernel, test)
df = df[df['workload'].str.match(workload)]
job_dirs = df['_job_dir'].unique()
if len(job_dirs) > 1:
raise ValueError("Params for get_artifacts don't uniquely identify a job. "
"for workload='{}' tag='{}' kernel='{}' test='{}' iteration={}, "
"found:\n{}" .format(
workload, tag, kernel, test, iteration, '\n'.join(job_dirs)))
if not job_dirs:
raise ValueError(
"No job found for "
"workload='{}' tag='{}' kernel='{}' test='{}' iteration={}"
.format(workload, tag, kernel, test, iteration))
[job_dir] = job_dirs
return job_dir
def get_artifacts(self, workload='.*', tag='.*', kernel='.*', test='.*',
iteration=1):
"""
Get a dict mapping artifact names to file paths for a specific job.
artifact_name specifies the name of an artifact, e.g. 'trace_bin' to
find the ftrace file from the specific job run. The other parameters
should be used to uniquely identify a run of a job.
"""
job_dir = self._find_job_dir(workload, tag, kernel, test, iteration)
return self._read_artifacts(job_dir)
def get_artifact(self, artifact_name, workload='.*',
tag='.*', kernel='.*', test='.*',
iteration=1):
"""
Get the path of an artifact attached to a job output.
artifact_name specifies the name of an artifact, e.g. 'trace_bin' to
find the ftrace file from the specific job run. The other parameters
should be used to uniquely identify a run of a job.
"""
job_dir = self._find_job_dir(workload, tag, kernel, test, iteration)
artifacts = self._read_artifacts(job_dir)
if not artifact_name in artifacts:
raise ValueError("No '{}' artifact found in {} (have {})".format(
artifact_name, job_dir, artifacts.keys()))
return artifacts[artifact_name]
| apache-2.0 |
copperwire/SIMS | plotting_module.py | 1 | 1816 | from file_handler import file_handler
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import numpy as np
class plotter:
def __init__(self, filename):
self.filename = filename
def pathfinder(self):
"""Find full path to filename """
def plot_machine(self):
class_instance = file_handler(self.filename)
class_instance.file_iteration()
data_sets = class_instance.data_conversion()
names = getattr(class_instance, "substances")
if len(names) > 2:
host = host_subplot(111, axes_class = AA.Axes)
plt.subplots_adjust(right = 0.75)
par1 = host.twinx()
par2 = host.twinx()
host.set_yscale("log")
par1.set_yscale("log")
par2.set_yscale("log")
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all = True)
host.set_xlabel(data_sets[0]["x_unit"])
plotty_things = [host, par1, par2]
for data_set, name, things in zip(data_sets, names, plotty_things):
x_val = data_set["data"][0]
y_val = data_set["data"][1]
x_unit = data_set["x_unit"]
y_unit = data_set["y_unit"]
things.set_ylabel(y_unit)
things.plot(x_val, y_val, label = data_set["sample element"])
plt.legend()
plt.show()
else:
data_set = data_sets[0]
x_val = data_set["data"][0]
y_val = data_set["data"][1]
x_val = x_val.copy(order = "C")
x_unit = data_set["x_unit"]
y_unit = data_set["y_unit"]
plt.semilogy(x_val, y_val, label = data_set["sample info"][2], nonposy = "clip")
plt.xlabel(x_unit)
plt.ylabel(y_unit)
plt.legend()
plt.show()
| cc0-1.0 |
spennihana/h2o-3 | h2o-py/tests/testdir_jira/pyunit_pubdev_4723.py | 5 | 1674 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas
import h2o
from tests import pyunit_utils
from pandas.util.testing import assert_frame_equal
TEST_DATASET = pyunit_utils.locate('smalldata/logreg/prostate_missing.csv')
def test_4723():
pandas_frame = pandas.read_csv(TEST_DATASET)
frame = h2o.import_file(TEST_DATASET)
# Ensure that the as_data_frame method does not modify the frame
assert_frame_equal(pandas_frame, frame.as_data_frame())
# Now insert some missing values
expected_rows_count = frame['RACE'].shape[0]
# Check that the shape of the data frames is not modified
pandas_default_rows_count = frame['RACE'].as_data_frame(use_pandas=True).shape[0]
assert pandas_default_rows_count == expected_rows_count, "Result's rows count when using pandas with default na_value equal to expected_rows_count. Expected: %s, actual: %s" % (
expected_rows_count, pandas_default_rows_count)
no_pandas_default_rows_count = len(frame['RACE'].as_data_frame(use_pandas=False, header=False))
assert no_pandas_default_rows_count == expected_rows_count, "Result's rows count when NOT using pandas must be equal to expected_rows_count. Expected: %s, actual: %s" % (
expected_rows_count, no_pandas_default_rows_count)
def test_npe_string_vec():
f = h2o.create_frame(string_fraction = 1)
f['C1'].insert_missing_values(1)
print(f['C1'][0,0])
if __name__ == "__main__":
pyunit_utils.standalone_test(test_4723)
pyunit_utils.standalone_test(test_npe_string_vec)
else:
test_4723()
test_npe_string_vec()
| apache-2.0 |
cms-externals/lhapdf | examples/pdf-plot.py | 1 | 1029 | #! /usr/bin/env python
import math, numpy
import lhapdf
import matplotlib.pyplot as plt
q = math.sqrt(6400.0)
pdfsets = ["cteq6ll.LHpdf", "MSTW2008lo68cl.LHgrid", "MRST2001lo.LHgrid", "MRST2007lomod.LHgrid", "MRSTMCal.LHgrid"]
#pdfsets = ["cteq6ll.LHpdf", "MRST2001lo.LHgrid", "MRST2007lomod.LHgrid", "MRSTMCal.LHgrid"]
partons = { 0 : "gluon", 2 : "up" }
NPOINTS = 1000
xs = numpy.logspace(-4, -0.001, NPOINTS)
plt.figure(figsize=(13,7))
for n, parton in enumerate(sorted(partons.keys())):
plt.subplot(1, len(partons), n+1)
lines = []
for pdfset in pdfsets:
lhapdf.initPDFSetByName(pdfset)
lhapdf.initPDF(0)
xfxs = numpy.zeros([NPOINTS])
for i, x in enumerate(xs):
xfx = lhapdf.xfx(x, q, parton)
xfxs[i] = xfx
l = plt.plot(xs, xfxs)
lines.append(l)
plt.xscale("log")
#plt.ylim(0.5, 3)
plt.legend(lines, pdfsets)
plt.title(partons[parton])
plt.xlabel("$x$")
if n == 0:
plt.ylabel("$x f(x, Q^2)$")
plt.show()
| gpl-2.0 |
kevin-intel/scikit-learn | examples/inspection/plot_partial_dependence.py | 6 | 10585 | """
===============================================================
Partial Dependence and Individual Conditional Expectation Plots
===============================================================
Partial dependence plots show the dependence between the target function [2]_
and a set of features of interest, marginalizing over the values of all other
features (the complement features). Due to the limits of human perception, the
size of the set of features of interest must be small (usually, one or two)
thus they are usually chosen among the most important features.
Similarly, an individual conditional expectation (ICE) plot [3]_
shows the dependence between the target function and a feature of interest.
However, unlike partial dependence plots, which show the average effect of the
features of interest, ICE plots visualize the dependence of the prediction on a
feature for each :term:`sample` separately, with one line per sample.
Only one feature of interest is supported for ICE plots.
This example shows how to obtain partial dependence and ICE plots from a
:class:`~sklearn.neural_network.MLPRegressor` and a
:class:`~sklearn.ensemble.HistGradientBoostingRegressor` trained on the
California housing dataset. The example is taken from [1]_.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] For classification you can think of it as the regression score before
the link function.
.. [3] Goldstein, A., Kapelner, A., Bleich, J., and Pitkin, E., Peeking Inside
the Black Box: Visualizing Statistical Learning With Plots of
Individual Conditional Expectation. (2015) Journal of Computational and
Graphical Statistics, 24(1): 44-65 (https://arxiv.org/abs/1309.6392)
"""
print(__doc__)
# %%
# California Housing data preprocessing
# -------------------------------------
#
# Center target to avoid gradient boosting init bias: gradient boosting
# with the 'recursion' method does not account for the initial estimator
# (here the average target, by default).
import pandas as pd
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
cal_housing = fetch_california_housing()
X = pd.DataFrame(cal_housing.data, columns=cal_housing.feature_names)
y = cal_housing.target
y -= y.mean()
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1, random_state=0
)
# %%
# 1-way partial dependence with different models
# ----------------------------------------------
#
# In this section, we will compute 1-way partial dependence with two different
# machine-learning models: (i) a multi-layer perceptron and (ii) a
# gradient-boosting. With these two models, we illustrate how to compute and
# interpret both partial dependence plot (PDP) and individual conditional
# expectation (ICE).
#
# Multi-layer perceptron
# ......................
#
# Let's fit a :class:`~sklearn.neural_network.MLPRegressor` and compute
# single-variable partial dependence plots.
from time import time
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import QuantileTransformer
from sklearn.neural_network import MLPRegressor
print("Training MLPRegressor...")
tic = time()
est = make_pipeline(QuantileTransformer(),
MLPRegressor(hidden_layer_sizes=(50, 50),
learning_rate_init=0.01,
early_stopping=True))
est.fit(X_train, y_train)
print(f"done in {time() - tic:.3f}s")
print(f"Test R2 score: {est.score(X_test, y_test):.2f}")
# %%
# We configured a pipeline to scale the numerical input features and tuned the
# neural network size and learning rate to get a reasonable compromise between
# training time and predictive performance on a test set.
#
# Importantly, this tabular dataset has very different dynamic ranges for its
# features. Neural networks tend to be very sensitive to features with varying
# scales and forgetting to preprocess the numeric feature would lead to a very
# poor model.
#
# It would be possible to get even higher predictive performance with a larger
# neural network but the training would also be significantly more expensive.
#
# Note that it is important to check that the model is accurate enough on a
# test set before plotting the partial dependence since there would be little
# use in explaining the impact of a given feature on the prediction function of
# a poor model.
#
# We will plot the partial dependence, both individual (ICE) and averaged one
# (PDP). We limit to only 50 ICE curves to not overcrowd the plot.
import matplotlib.pyplot as plt
from sklearn.inspection import partial_dependence
from sklearn.inspection import plot_partial_dependence
print('Computing partial dependence plots...')
tic = time()
features = ['MedInc', 'AveOccup', 'HouseAge', 'AveRooms']
display = plot_partial_dependence(
est, X_train, features, kind="both", subsample=50,
n_jobs=3, grid_resolution=20, random_state=0
)
print(f"done in {time() - tic:.3f}s")
display.figure_.suptitle(
'Partial dependence of house value on non-location features\n'
'for the California housing dataset, with MLPRegressor'
)
display.figure_.subplots_adjust(hspace=0.3)
# %%
# Gradient boosting
# .................
#
# Let's now fit a :class:`~sklearn.ensemble.HistGradientBoostingRegressor` and
# compute the partial dependence on the same features.
from sklearn.ensemble import HistGradientBoostingRegressor
print("Training HistGradientBoostingRegressor...")
tic = time()
est = HistGradientBoostingRegressor()
est.fit(X_train, y_train)
print(f"done in {time() - tic:.3f}s")
print(f"Test R2 score: {est.score(X_test, y_test):.2f}")
# %%
# Here, we used the default hyperparameters for the gradient boosting model
# without any preprocessing as tree-based models are naturally robust to
# monotonic transformations of numerical features.
#
# Note that on this tabular dataset, Gradient Boosting Machines are both
# significantly faster to train and more accurate than neural networks. It is
# also significantly cheaper to tune their hyperparameters (the defaults tend
# to work well while this is not often the case for neural networks).
#
# We will plot the partial dependence, both individual (ICE) and averaged one
# (PDP). We limit to only 50 ICE curves to not overcrowd the plot.
print('Computing partial dependence plots...')
tic = time()
display = plot_partial_dependence(
est, X_train, features, kind="both", subsample=50,
n_jobs=3, grid_resolution=20, random_state=0
)
print(f"done in {time() - tic:.3f}s")
display.figure_.suptitle(
'Partial dependence of house value on non-location features\n'
'for the California housing dataset, with Gradient Boosting'
)
display.figure_.subplots_adjust(wspace=0.4, hspace=0.3)
# %%
# Analysis of the plots
# .....................
#
# We can clearly see on the PDPs (thick blue line) that the median house price
# shows a linear relationship with the median income (top left) and that the
# house price drops when the average occupants per household increases (top
# middle). The top right plot shows that the house age in a district does not
# have a strong influence on the (median) house price; so does the average
# rooms per household.
#
# The ICE curves (light blue lines) complement the analysis: we can see that
# there are some exceptions, where the house price remain constant with median
# income and average occupants. On the other hand, while the house age (top
# right) does not have a strong influence on the median house price on average,
# there seems to be a number of exceptions where the house price increase when
# between the ages 15-25. Similar exceptions can be observed for the average
# number of rooms (bottom left). Therefore, ICE plots show some individual
# effect which are attenuated by taking the averages.
#
# In all plots, the tick marks on the x-axis represent the deciles of the
# feature values in the training data.
#
# We also observe that :class:`~sklearn.neural_network.MLPRegressor` has much
# smoother predictions than
# :class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
#
# However, it is worth noting that we are creating potential meaningless
# synthetic samples if features are correlated.
# %%
# 2D interaction plots
# --------------------
#
# PDPs with two features of interest enable us to visualize interactions among
# them. However, ICEs cannot be plotted in an easy manner and thus interpreted.
# Another consideration is linked to the performance to compute the PDPs. With
# the tree-based algorithm, when only PDPs are requested, they can be computed
# on an efficient way using the `'recursion'` method.
features = ['AveOccup', 'HouseAge', ('AveOccup', 'HouseAge')]
print('Computing partial dependence plots...')
tic = time()
_, ax = plt.subplots(ncols=3, figsize=(9, 4))
display = plot_partial_dependence(
est, X_train, features, kind='average', n_jobs=3, grid_resolution=20,
ax=ax,
)
print(f"done in {time() - tic:.3f}s")
display.figure_.suptitle(
'Partial dependence of house value on non-location features\n'
'for the California housing dataset, with Gradient Boosting'
)
display.figure_.subplots_adjust(wspace=0.4, hspace=0.3)
# %%
# The two-way partial dependence plot shows the dependence of median house
# price on joint values of house age and average occupants per household. We
# can clearly see an interaction between the two features: for an average
# occupancy greater than two, the house price is nearly independent of the
# house age, whereas for values less than two there is a strong dependence on
# age.
#
# 3D interaction plots
# --------------------
#
# Let's make the same partial dependence plot for the 2 features interaction,
# this time in 3 dimensions.
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
features = ('AveOccup', 'HouseAge')
pdp = partial_dependence(
est, X_train, features=features, kind='average', grid_resolution=20
)
XX, YY = np.meshgrid(pdp["values"][0], pdp["values"][1])
Z = pdp.average[0].T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1,
cmap=plt.cm.BuPu, edgecolor='k')
ax.set_xlabel(features[0])
ax.set_ylabel(features[1])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median\n'
'age and average occupancy, with Gradient Boosting')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
arahuja/scikit-learn | examples/model_selection/randomized_search.py | 57 | 3208 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
iris = load_digits()
X, y = iris.data, iris.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
yujiakimoto/lowrankautoml | automl/model.py | 1 | 4147 | import ML_algorithms as ml
import util
import numpy as np
from smac.configspace import ConfigurationSpace
from smac.tae.execute_func import ExecuteTAFuncDict
from smac.scenario.scenario import Scenario
from smac.facade.smac_facade import SMAC
class Model:
def __init__(self, settings={'algorithm':None, 'hyperparameters':None}, num_bayesian_optimize=10, num_folds=10, verbose=True, train_features=None,
train_labels=None):
"""instantiates a model object given an algorithm type, hyperparameter settings and
a number of folds for cross validation"""
self.algorithm = settings['algorithm']
"""algorithm type, a string"""
self.hyperparameters = settings['hyperparameters']
"""hyperparameter settings, a dict (keys=hyperparameter name, values=hyperparameter values)"""
self.classifier = None
"""sklearn classifier associated with this model"""
self.num_folds = num_folds
"""the number of folds for k-fold cross validation"""
self.fitted = False
"""whether or not the model has been trained"""
self.bayesian_optimized = False
"""whether or not the model's hyperparameters have been tuned"""
self.num_bayesian_optimize = num_bayesian_optimize
"""number of Bayesian optimization rounds for each base learner"""
self.train_features = train_features
self.train_labels = train_labels
"""training dataset"""
self.error = None
"""k-fold cross validation error for a given dataset"""
self.cv_predictions = None
"""k-fold predictions for a given dataset"""
self.verbose = verbose
"""whether to generate print statements"""
def fit(self, train_features, train_labels):
"""fit the model to given training features and labels"""
self.train_features = train_features
self.train_labels = train_labels
self.error, self.cv_predictions, self.classifier = getattr(ml, self.algorithm)(self.train_features, self.train_labels, verbose=self.verbose, **self.hyperparameters)
self.fitted = True
return self
def predict(self, test_features):
"""return predictions of ensemble on newly provided test set"""
return self.classifier.predict(test_features)
def bayesian_optimize(self):
"""conduct Bayesian optimization on the hyperparameters, starting at current values"""
if self.algorithm in ['GNB','Perceptron']:
return self
else:
cs = ConfigurationSpace()
cs.add_hyperparameters(list(getattr(util, self.algorithm + '_range')(self.hyperparameters).values()))
#set runcount-limit in Bayesian optimization
if self.algorithm == 'kNN':
if self.hyperparameters['k'] == 1: num = 3
else: num = 5
else: num = self.num_bayesian_optimize
scenario = Scenario({'run_obj': 'quality', 'runcount-limit': num, 'cs': cs, 'deterministic': 'true', 'memory_limit': None})
smac = SMAC(scenario=scenario, rng=np.random.RandomState(100), tae_runner=self.error_function)
try:
incumbent = smac.optimize()
finally:
incumbent = smac.solver.incumbent
self.error = smac.get_tae_runner().run(incumbent, 1)[1]
self.hyperparameters = incumbent.get_dictionary()
self.bayesian_optimized = True
return self
def error_function(self, hyperparameters):
"""function on which to conduct Bayesian optimization"""
return getattr(ml, self.algorithm)(self.train_features, self.train_labels, num_splits=3, verbose=self.verbose, **hyperparameters)[0]
def add_training_data(self, train_features, train_labels):
self.train_features = train_features
self.train_labels = train_labels
| mit |
LuciusV/ORCA-pyPDOS | PlotPartialDOS.py | 1 | 2508 | from functions import Gaussian, dump
import numpy as np
import matplotlib.pyplot as plt
def PlotPartialDOS(args, fig, ax, Type, SpeciesToPlot, PlotParameters, Eigenvalues, Occupations, ContributionMatrix, **kwargs):
Domain = PlotParameters['Domain']
MoInEnergyRange = PlotParameters['MoInEnergyRange']
MoInEnergyRangeB = PlotParameters['MoInEnergyRangeB']
for key in kwargs:
if kwargs[key] is not None and key == 'EigenvaluesB':
EigenvaluesB = kwargs[key]
elif kwargs[key] is not None and key == 'OccupationsB':
OccupationsB = kwargs[key]
elif kwargs[key] is not None and key == 'ContributionMatrixB':
ContributionMatrixB = kwargs[key]
SumOfselected = np.zeros_like(Domain)
if Type == 'UKS':
SumOfselectedB = np.zeros_like(Domain)
for key in SpeciesToPlot:
AtomDos = np.zeros_like(Domain)
for index in MoInEnergyRange:
norm = np.sum(ContributionMatrix[index,:]**2)
if Type == 'RKS':
norm *= 0.5
weight = np.sum(ContributionMatrix[index,range(SpeciesToPlot[key][0], SpeciesToPlot[key][1]+1)]**2) / norm
AtomDos += Gaussian(Eigenvalues[index],args.smear, weight, Domain)
if Type == 'UKS':
AtomDosB = np.zeros_like(Domain)
for index in MoInEnergyRangeB:
norm = np.sum(ContributionMatrixB[index, :] ** 2)
weight = np.sum(ContributionMatrixB[index, range(SpeciesToPlot[key][0], SpeciesToPlot[key][1] + 1)] ** 2) / norm
AtomDosB -= Gaussian(EigenvaluesB[index], args.smear, weight, Domain)
postfix = '' if Type == 'RKS' else ' up'
SumOfselected += AtomDos
if args.unique:
dump(args, '_pdos_' + key + postfix.replace(' ','_'), Domain, AtomDos)
plt.plot(Domain, AtomDos, label=key+postfix)
if Type == 'UKS':
SumOfselectedB += AtomDosB
if args.unique:
dump(args,'_pdos_'+key+'_down.txt',Domain, AtomDosB)
plt.plot(Domain, AtomDosB, label=key+' down')
plt.plot(Domain, SumOfselected, lw=2, label='full of sel. atoms' + postfix)
if Type == 'UKS':
postfix = postfix.replace(' ','_')
dump(args, '_pdos'+postfix+'.txt',Domain, SumOfselected)
if Type == 'UKS':
plt.plot(Domain, SumOfselectedB, lw=2, label='full of sel. atoms down')
if args.dump:
dump(args, '_pdos_down.txt',Domain,SumOfselectedB) | gpl-3.0 |
xfaxca/pymlkit | pymlkit/stats/metrics.py | 1 | 1069 | """
Module for some simple statistical metrics.
"""
import numpy as np
from sklearn.metrics import r2_score, median_absolute_error
from sklearn.metrics import mean_squared_error, mean_squared_log_error, mean_absolute_error
__all__ = [
'median_abs_pc_err',
'mean_abs_pc_err',
'abs_pc_err',
'r2_score',
'mean_absolute_error',
'mean_squared_error',
'mean_squared_log_error',
'median_absolute_error'
]
def median_abs_pc_err(y, y_pred):
"""
Calculates the median abosolute percentage error between two
numpy arrays.
:param y: (np.ndarray) - True values
:param y_pred: (np.ndarray) - Predicted values
"""
return np.median(abs_pc_err(y, y_pred))
def mean_abs_pc_err(y, y_pred):
"""
Calculates the mean abosolute percentage error between two
numpy arrays.
:param y: (np.ndarray)
:param y_pred: (np.ndarray)
"""
return np.mean(abs_pc_err(y, y_pred))
def abs_pc_err(y, y_pred):
"""
Calculates absolute percentage error
"""
return (abs(y - y_pred) / y) * 100.0
| gpl-3.0 |
theodoregoetz/clas12-dc-wiremap | clas12_wiremap/ui/wire_supply_board_status.py | 1 | 6271 | import numpy as np
from numpy import random as rand
from clas12_wiremap.ui import QtGui
from matplotlib import pyplot, gridspec, cm
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg \
as FigureCanvas
from matplotlib.backends.backend_qt4 import NavigationToolbar2QT \
as NavigationToolbar
from matplotlib.figure import Figure
from component_array import*
class WireMap(QtGui.QWidget):
def __init__(self, parent=None):
super(WireMap,self).__init__(parent)
self.parent = parent
self.setup_widgets()
self.vbox = QtGui.QVBoxLayout(self)
self.vbox.addWidget(self.canvas)
self.vbox.addWidget(self.toolbar)
def setup_widgets(self):
# setting up dimensions
self.fig = Figure((5.0, 4.0), dpi=100)
#attaching the figure to the canvas
self.canvas = FigureCanvas(self.fig)
#attaching a toolbar to the canvas
self.toolbar = NavigationToolbar(self.canvas, self.parent)
self.axs = [[] for i in range(6)]
self.pts = [[None]*6 for i in range(6)]
sector_grid = gridspec.GridSpec(2,3,wspace=0.3,hspace=0.2)
for sec in range(6):
slyr_grid = gridspec.GridSpecFromSubplotSpec(6,1,
wspace=0.0,hspace=0.1,
subplot_spec=sector_grid[sec])
for slyr in range(6):
self.axs[sec].append(
self.fig.add_subplot(slyr_grid[5-slyr]))
def update_plots(self):
for sec in range(6):
for slyr in range(6):
self.pts[sec][slyr] = \
self.superlayer_plot(self.axs[sec][slyr],sec,slyr)
self.canvas.draw()
def superlayer_plot(self,ax,sec,slyr):
if not hasattr(self,'data'):
self.data = fetchCrateArray(session)
pt = ax.imshow(self.data[sec][slyr],
origin='lower',
aspect='auto',
interpolation='nearest',
extent=[0.5,112.5,-0.5,5.5],
vmin=0,
cmap=cm.ocean)
if slyr == 5:
ax.set_title('Sector '+str(sec+1))
if (sec > 2) and (slyr == 0):
ax.xaxis.set_ticks([1]+list(range(32,113,32)))
ax.xaxis.set_ticklabels([1]+list(range(32,113,32)))
else:
ax.xaxis.set_major_locator(pyplot.NullLocator())
ax.set_ylabel(str(slyr+1))
ax.yaxis.set_major_locator(pyplot.NullLocator())
ax.hold(False)
return pt
class WireMapSector(QtGui.QWidget):
def __init__(self, sector, parent=None):
super(WireMapSector,self).__init__(parent)
self.sector = sector
self.parent = parent
self.setup_widgets()
self.vbox = QtGui.QVBoxLayout(self)
self.vbox.addWidget(self.canvas)
self.vbox.addWidget(self.toolbar)
def setup_widgets(self):
self.fig = Figure((5.0, 4.0), dpi=100)
self.canvas = FigureCanvas(self.fig)
self.toolbar = NavigationToolbar(self.canvas, self.parent)
self.axs = []
self.pts = [None]*6
slyr_grid = gridspec.GridSpec(6,1,wspace=0.0,hspace=0.1)
for slyr in range(6):
self.axs.append(
self.fig.add_subplot(slyr_grid[5-slyr]))
def update_plots(self):
for slyr in range(6):
self.pts[slyr] = \
self.superlayer_plot(self.axs[slyr],slyr)
self.canvas.draw()
def superlayer_plot(self,ax,slyr):
if not hasattr(self,'data'):
self.data = fetchCrateArray(session)
pt = ax.imshow(self.data[slyr],
origin='lower',
aspect='auto',
interpolation='nearest',
extent=[0.5,112.5,-0.5,5.5],
vmin=0,
cmap=cm.ocean)
if slyr == 5:
ax.set_title('Sector '+str(self.sector+1))
if slyr == 0:
ax.xaxis.set_ticks([1]+list(range(32,113,32)))
ax.xaxis.set_ticklabels([1]+list(range(32,113,32)))
else:
ax.xaxis.set_major_locator(pyplot.NullLocator())
ax.set_ylabel(str(slyr+1))
ax.yaxis.set_major_locator(pyplot.NullLocator())
ax.hold(False)
return pt
class WireMaps(QtGui.QStackedWidget):
def __init__(self, parent=None):
super(WireMaps,self).__init__(parent)
self.wiremap = WireMap(self)
self.addWidget(self.wiremap)
self.sec_wiremaps = []
for sec in range(6):
self.sec_wiremaps.append(WireMapSector(sec,self))
self.addWidget(self.sec_wiremaps[sec])
self.data = fetchCrateArray(session)
@property
def data(self):
return self.wiremap.data
@data.setter
def data(self,d):
self._data = d
self.wiremap.data = self._data
for sec in range(6):
self.sec_wiremaps[sec].data = self._data[sec]
self.update_active_plots()
def update_active_plots(self):
if super(WireMaps,self).currentIndex() == 0:
self.wiremap.update_plots()
else:
sec = super(WireMaps,self).currentIndex() - 1
self.sec_wiremaps[sec].update_plots()
def setCurrentIndex(self,*args,**kwargs):
super(WireMaps,self).setCurrentIndex(*args,**kwargs)
self.update_active_plots()
if __name__ == '__main__':
import sys
from matplotlib import pyplot
session = initialize_session()
dc_fill_tables(session)
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
wid = QtGui.QWidget()
vbox = QtGui.QVBoxLayout()
wid.setLayout(vbox)
cbox = QtGui.QSpinBox()
cbox.setMinimum(0)
cbox.setMaximum(6)
cbox.setSpecialValueText('-')
stack = WireMaps()
stack.data = fetchSupplyBoardArray(session)
#change this line to fetch different component
vbox.addWidget(cbox)
vbox.addWidget(stack)
self.setCentralWidget(wid)
cbox.valueChanged.connect(stack.setCurrentIndex)
self.show()
app = QtGui.QApplication(sys.argv)
main_window = MainWindow()
sys.exit(app.exec_())
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/units/bar_demo2.py | 9 | 1062 | """
plot using a variety of cm vs inches conversions. The example shows
how default unit instrospection works (ax1), how various keywords can
be used to set the x and y units to override the defaults (ax2, ax3,
ax4) and how one can set the xlimits using scalars (ax3, current units
assumed) or units (conversions applied to get the numbers to current
units)
"""
import numpy as np
from basic_units import cm, inch
import matplotlib.pyplot as plt
cms = cm *np.arange(0, 10, 2)
bottom=0*cm
width=0.8*cm
fig = plt.figure()
ax1 = fig.add_subplot(2,2,1)
ax1.bar(cms, cms, bottom=bottom)
ax2 = fig.add_subplot(2,2,2)
ax2.bar(cms, cms, bottom=bottom, width=width, xunits=cm, yunits=inch)
ax3 = fig.add_subplot(2,2,3)
ax3.bar(cms, cms, bottom=bottom, width=width, xunits=inch, yunits=cm)
ax3.set_xlim(2, 6) # scalars are interpreted in current units
ax4 = fig.add_subplot(2,2,4)
ax4.bar(cms, cms, bottom=bottom, width=width, xunits=inch, yunits=inch)
#fig.savefig('simple_conversion_plot.png')
ax4.set_xlim(2*cm, 6*cm) # cm are converted to inches
plt.show()
| mit |
everilae/sqli | tests/test_sqli.py | 1 | 7735 | import pytest
from sqli import check
from textwrap import dedent
ok = [
(0, """
import sqlite3
with sqlite3.connect("Quiz.db") as db:
cursor = db.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS user(
userID INTEGER PRIMARY KEY
username VARCHAR(20) NOT NULL,
firstname VARCHAR(20) NOT NULL,
surname VARCHAR(20) NOT NULL,
password VARCHAR(20) NOT NULL,);
''')
"""),
(0, '''
def search(x,y):
c.execute("""SELECT * FROM Table
INNER JOIN Genres ON Table.GenreID = Genres.GenreID
WHERE ?=?""",(x,y))
rows = c.fetchall
for row in rows:
print(row)
search("Genres.Genre", input("Genre: "))
'''),
(0, """
import sqlite3
import os
from werkzeug.security import generate_password_hash
from uuid import uuid4
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DB_PATH = os.path.join(BASE_DIR, "chat.sqlite")
def insert_row_in_db(username, firstname, lastname, email, password):
\"\"\" Creates a row in chat.sqlite's users table \"\"\"
uid = uuid4().hex
pwd_hash = generate_password_hash(password)
login_time = set_lastlogin(uid)
row_data = (uid, username, firstname, lastname, email, pwd_hash, login_time, True)
with sqlite3.connect(DB_PATH) as conn:
c = conn.cursor()
c.execute('''INSERT INTO users (uid, username, firstname, lastname, email, passwordhash,
lastlogin, loggedin) VALUES (?, ?, ?, ?, ?, ?, ?, ?);''', row_data)
"""),
(0, """
with open(poll, encoding='utf-8') as p:
f_csv = csv.reader(p)
for row in f_csv:
c.execute(u'SELECT id FROM senators WHERE name LIKE ?', ('%'+row[0]+'%',))
data = c.fetchone()
print(data) # I should not get None results here, but I do, exactly when the query has UTF-8 characters.
"""),
(0, """
if count == 1:
cursor.execute("SELECT * FROM PacketManager WHERE ? = ?", filters[0], parameters[0])
all_rows = cursor.fetchall()
elif count == 2:
cursor.execute("SELECT * FROM PacketManager WHERE ? = ? AND ? = ?", filters[0], parameters[0], filters[1], parameters[1])
all_rows = cursor.fetchall()
elif count == 3 :
cursor.execute("SELECT * FROM PacketManager WHERE ? = ? AND ? = ? AND ? = ?", filters[0], parameters[0], filters[1], parameters[1], filters[2], parameters[2])
all_rows = cursor.fetchall()
"""),
(0, """stmt = f"SELECT * FROM foo" """),
(0, """cur.execute(
sql.SQL(intersecta)
.format(nome0=sql.Identifier(nomecompleto),
nome1=sql.Identifier(tabelagerada),
nome2=sql.Identifier(segmentnome)),
[nomedosegmento,])
"""),
(0, """
stmt = "SELECT * FROM foo"
stmt += " WHERE x = ?"
engine.execute(stmt, (x,))
"""),
(0, """engine.execute(f"SELECT 1")"""),
(0, """
grey_area = "1"
engine.execute(f"SELECT {grey_area}")
"""),
(0, """
foo = "SELECT * FROM foo WHERE bar = {}"
cur.execute(foo.format(1))
"""),
(0, """
foo = "SELECT * FROM {}"
cur.execute(foo.format("{}").format("foo"))
"""),
]
injectable = [
(3, """
import sqlite3 as lite
db = lite.connect("test.db")
id_to_be_added = "123456789101112"
db.execute("CREATE TABLE USERS (ID TEXT, NUM INT)")
Query = "{ SOMETHING IN SQL }" # This returns either True or False
if Query:
db.execute("UPDATE USERS SET NUM = NUM + 1 WHERE ID = {};".format(id_to_be_added))
else:
db.execute("INSERT INTO USERS ({}, 0)".format(id_to_be_added))
num_to_be_printed = db.execute("SELECT NUM FROM USERS WHERE ID = {}".format(id_to_be_added))
print("{0} has {1}").format(id_to_be_added, num_to_be_printed)
"""),
(1, """
query = "delete from zoznam where name = '%s' " % data3
c.execute(query)
conn.commit()
"""),
(1, """mydata = c.execute("DELETE FROM Zoznam WHERE Name='%s'" % data3)"""),
(1, """
conn = sqlite3.connect('databaza.db')
c = conn.cursor()
conn.text_factory = str
data3 = str(input('Please enter name: '))
query = "DELETE FROM Zoznam WHERE Name = '%s';" % data3.strip()
print(query)
mydata = c.execute(query)
"""),
(1, """insert_sql = 'INSERT INTO pcp(date,stations,pcp) VALUES( ' + kdm + ',' + station + ',' + klm + ')'
c.execute(insert_sql)"""),
(1, """statement = u"select * from senators where name like '" + '%'+row[0]+'%'+"'"
c.execute(statement)"""),
(1, """
stmt = f"SELECT * FROM foo WHERE bar = {bar}"
c.execute(stmt)
"""),
(1, """cur.execute("SELECT * FROM foo WHERE x = {x}".format_map(bar))"""),
(1, """stmt = text("SELECT * FROM foo WHERE x = '" + str(x) + "'")
engine.execute(stmt)"""),
(1, """Person.objects.raw('SELECT last_name, birth_date, first_name, id FROM myapp_person WHERE last_name LIKE \\'%' + search + '%\\'')"""),
(1, """
stmt = "SELECT * FROM foo"
stmt += "WHERE x = " + x
engine.execute(stmt)
"""),
(1, """
sql = "SELECT * FROM foo "+\\
"WHERE baz = " + baz +\\
" AND bat = " + bat
cur.execute(sql)
"""),
(1, """
cur.execute("SELECT * FROM foo WHERE x = {}".format(x()))
"""),
(1, """
cur.execute("SELECT * FROM foo WHERE x = {}".format(y.x()))
"""),
(1, """
pd.read_sql_query(
'SELECT * FROM foo WHERE bar in ({0})'.format(
','.join(f"'{d}'" for d in data)))
"""),
]
dynamic_placeholders = [
(0, '''
query = """SELECT *
FROM [dbo].[viewA]
WHERE [Id] IN ({0})"""
query = query.format(",".join("?" * len(idList)))
data = pandas.read_sql_query(query, conn, params = tuple(idList))
'''),
(0, """
sql_query = "DELETE FROM multiusingfuncs WHERE muf_id IN (%s) " % ','.join(['?'] * len(list))
cursor.execute(sql_query, list)
"""),
(0, """
query = 'select * from foo where x in {}'
query = query.format(','.join('?' * (int(len(columns)))))
print(query) #for debug purposes
cursor = connection.cursor()
for data in reader:
cursor.execute(query, data)
"""),
(0, """
args = input().split()
placeholders = ",".join("?" * len(args))
stmt = f"SELECT * FROM foo WHERE x IN ({placeholders})"
engine.execute(stmt, args)
"""),
(0, """
pd.read_sql_query('SELECT * FROM foo WHERE bar in ({0})'.format(', '.join(('?' for _ in data))), data, conn)
"""),
(0, """
pd.read_sql_query('SELECT * FROM foo WHERE bar in ({0})'.format(', '.join(['?' for _ in data])), data, conn)
"""),
]
@pytest.mark.parametrize(
"expected,source", ok + injectable + dynamic_placeholders)
def test_check(expected, source):
poisoned = check(dedent(source))
assert len(poisoned) == expected
#def test_py_2():
# source = dedent("""\
# sql = "SELECT * FROM foo WHERE x = " + x
# print sql
# cur.execute(sql)""")
# poisoned = check(source)
# assert len(poisoned) == 1
| mit |
deot95/Tesis | Proyecto de Grado Ingeniería Electrónica/Workspace/Comparison/Full Linear/ddpg_small.py | 1 | 10664 | import linear_env
import sim_env
from actor import Actor
from critic import Critic
from replay_buffer import ReplayBuffer
import numpy as np
import tensorflow as tf
import keras.backend as kbck
import json
import time
import argparse
import matplotlib.pylab as plt
import os.path
def ou(x, mu, theta, sigma):
return theta * (mu - x) + sigma * np.random.randn(np.shape(x)[0])
def simulate(control, swmm ,flows):
best_reward = -1*np.inf
BUFFER_SIZE = 100000
BATCH_SIZE = 120
GAMMA = 0.99
TAU = 0.01 #Target Network HyperParameters
LRA = 0.0001 #Learning rate for Actor
LRC = 0.001 #Lerning rate for Critic
action_dim = 10
state_dim = 8
max_steps = 6000
np.random.seed(9501)
EXPLORE = 100000.
episode_count = 1000
done = False
step = 0
epsilon = 1
if swmm:
print("No support")
else:
# Constants for the linear environment
Hs = 1800
A1 = 0.0020
mu1 = 250
sigma1 = 70
A2 = 0.0048
mu2 = 250
sigma2 = 70
dt = 1
x = np.arange(Hs)
d = np.zeros((2,Hs))
if control:
#Tensorflow GPU optimization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
kbck.set_session(sess)
# Actor, critic and replay buffer creation
actor = Actor(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA,flows)
critic = Critic(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)
buff = ReplayBuffer(BUFFER_SIZE)
# Get the linear environment
reward_hist = []
for i in range(episode_count):
print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))
A1 += 0.0004*np.random.rand()
mu1 += 50*np.random.rand()
sigma1 += 14*np.random.rand()
A2 += 0.00096*np.random.rand()
mu2 += 50*np.random.rand()
sigma2 += 14*np.random.rand()
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
env = linear_env.env(dt,d,vref)
s_t = np.divide(env.reset(),env.vmax)
total_reward = 0.
for j in range(max_steps):
## Noise addition for exploration
## Ornstein-Uhlenbeck process
loss = 0
epsilon -= 1.0 / EXPLORE
a_t = np.zeros([1,action_dim])
noise_t = np.zeros([1,action_dim])
a_t_original = actor.munet.predict(s_t.reshape(1, s_t.shape[0]))
noise_t[0,:] = max(epsilon, 0) * ou(a_t_original[0,:], 0.5 , 1 , 1.5)
#noise_t[0,4:] = max(epsilon, 0) * ou(a_t_original[0,4:], 0.5 , 1 , 1.5)
a_t[0] = a_t_original[0] + noise_t[0]
#Act over the system and get info of the next states
s_t1 , r_t, done, _ = env.step(a_t[0],flows=flows)
s_t1 = np.divide(s_t1,env.vmax)
#Add replay buffer
buff.add(s_t, a_t[0], r_t, s_t1, done)
#Do the batch update
batch = buff.getBatch(BATCH_SIZE)
states = np.asarray([e[0] for e in batch])
actions = np.asarray([e[1] for e in batch])
rewards = np.asarray([e[2] for e in batch])
next_states = np.asarray([e[3] for e in batch])
dones = np.asarray([e[4] for e in batch])
# Get estimated q-values of the pair (next_state,mu(next_state))
actions_next = actor.target_munet.predict(next_states)
target_q_values = critic.target_qnet.predict([next_states, actions_next])
y_t = np.zeros(np.shape(actions))
for k in range(len(batch)):
if dones[k]:
y_t[k] = rewards[k]
else:
y_t[k] = rewards[k] + GAMMA*target_q_values[k]
loss += critic.qnet.train_on_batch([states,actions], y_t)
a_for_grad = actor.munet.predict(states)
grads = critic.gradients(states, a_for_grad)
actor.train(states, grads)
actor.target_train()
critic.target_train()
total_reward = total_reward + GAMMA*r_t
s_t = s_t1
if j%100==0:
print("Episode", i, "Step", j, "Reward", r_t, "Loss", loss)
if done:
break
reward_hist.append(total_reward)
np.save("reward_small_history_flows_"+str(flows).lower()+".npy",np.array(reward_hist))
if i%20 == 0:
print("Saving the networks...")
actor.munet.save_weights("./actors_small/anetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
critic.qnet.save_weights("./critics_small/cnetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
if total_reward > best_reward:
print("Saving Best Actor...")
np.save("best_reward"+"_flows_"+str(flows)+".npy",np.array(total_reward))
actor.munet.save_weights("./actors_small/best_anetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
critic.qnet.save_weights("./critics_small/best_cnetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
best_reward = total_reward
print("TOTAL REWARD @ " + str(i) +"-th Episode : Reward " + str(total_reward))
print("Total Step: " + str(step))
print("")
print("Finish.")
else:
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
env = linear_env.env(dt,d,vref)
resv, resf, resu = env.free_sim()
font_labels = 16
font_legends = 22
ticksize = 16
width = 2.5
f , axarr = plt.subplots(nrows=1, ncols=2,figsize=(14,6),sharex=True )
resv_norm = np.divide(np.transpose(resv),np.matlib.repmat(env.vmax,Hs,1))
resu = np.transpose(np.asarray(resu))
## Plot Volume Results
lines = axarr[0].plot(x,resv_norm[:,:5],linewidth=width)
axarr[0].legend(lines , list(map(lambda x: "v"+str(x+1),range(5))),prop ={'size':font_legends})
axarr[0].set_title("Volumes - Tanks 1 to 5",fontsize=font_labels)
axarr[0].set_xlabel("Times(s)",fontsize=font_labels)
axarr[0].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[0].tick_params(labelsize=ticksize)
lines = axarr[1].plot(x,resv_norm[:,5:],linewidth=width)
axarr[1].legend(lines , list(map(lambda x: "v"+str(x+1) if x+1!=10 else "vT",range(5,10))),prop ={'size':font_legends})
axarr[1].set_title("Volumes - Tank 6 to 9 and Storm Tank",fontsize=font_labels)
axarr[1].set_xlabel("Times(s)",fontsize=font_labels)
#axarr[0,1].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[1].tick_params(labelsize=ticksize)
plt.tight_layout()
plt.show()
'''
lines = axarr[1,0].plot(x,resu[:,:2],linewidth=width)
axarr[1,0].legend(lines , list(map(lambda x: "u"+str(x+1),range(2))),prop ={'size':font_legends})
axarr[1,0].set_title("Control Actions - Valves 1 and 2",fontsize=font_labels)
axarr[1,0].set_xlabel("Times(s)",fontsize=font_labels)
axarr[1,0].set_ylabel("% Aperture",fontsize=font_labels)
axarr[1,0].tick_params(labelsize=ticksize)
lines = axarr[1,1].plot(x,resu[:,2:],linewidth=width)
axarr[1,1].legend(lines , list(map(lambda x: "u"+str(x+1),range(2,4))),prop ={'size':font_legends})
axarr[1,1].set_title("Control Actions - Valves 3 and 4",fontsize=font_labels)
axarr[1,1].set_xlabel("Times(s)",fontsize=font_labels)
#axarr[1,1].set_ylabel("% Aperture",fontsize=font_labels)
axarr[1,1].tick_params(labelsize=ticksize)
#sns.despine()
'''
def rainfile():
from math import exp
import numpy as np
from matplotlib import pylab as plt
#Gaussian Extension
A1 = 0.0063 ; mu1 = 500; sigma1 = 150
A2 = 0.018; mu2 = 550; sigma2 = 150
dt = 1
Hs = 1800
x = np.arange(0,Hs,dt)
d = [[],[]]
# dconst = 0.5*mpc_obj.k1*mpc_obj.vmax(1);
d[0] = A1*np.exp((-(x-mu1)**2)/(2*sigma1**2)) # Node 1 - left
d[1] = A2*np.exp((-(x-mu2)**2)/(2*sigma2**2)) # Node 2 - right
def secs_to_hour(secs_convert):
hour = secs_convert//3600
mins = (secs_convert%3600)//60
secs = secs_convert%60
return '{h:02d}:{m:02d}'.format(h=hour,m=mins)
secs_hour_vec = np.vectorize(secs_to_hour)
for k in (1,2):
with open('swmm/runoff%d.dat' % k, 'w') as f:
i = 0
for (t,val) in zip(secs_hour_vec(x), d[k-1]):
if i%60 == 0:
f.write(t+" "+str(val)+"\n")
i += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c","--control", type=int, choices = [0,1], help = "Choose between control(1) or free dynamics(0)")
parser.add_argument("-s","--swmm", type=int, choices = [0,1], help = "Choose between a simulation with swmm(1) or not(0)")
parser.add_argument("-f","--flow", type=int, choices = [0,1], help = "Choose between a simulation with flows(1) or not(0)")
args = parser.parse_args()
if args.flow == 1 and args.swmm == 1:
print("Conflicting option flow 1 and swmm 1")
else:
t0 = time.process_time()
simulate(control=args.control, swmm=args.swmm, flows = args.flow)
tf = time.process_time()
print("Elapsed time: ",tf-t0) | mit |
botlabio/autonomio | autonomio/plots/paramgrid.py | 1 | 1688 | import matplotlib
matplotlib.use('Agg') # this needs to be here exceptionslly
import matplotlib.pyplot as plt
plt.style.use('bmh')
def paramgrid(data, col, col_count=4):
'''
USE: paramgrid(df,'loss')
This plot is especially useful for investigating
more than a few values at one go. For example if you
want to see 16 different layer settings in one figure.
data = pandas dataframe from hyperscan()
col = the parameter column
col_count = how many columns per row
'''
uniques = len(data[col].unique())
if uniques is 1:
print ("ERROR: column has only one unique item")
if uniques < 4:
col_count = uniques
row_modulus = uniques % col_count
row_count = uniques / col_count
if row_modulus is not 0:
row_count = row_count + 1
fig, axs = plt.subplots(row_count,
col_count,
figsize=(16, row_count*4),
sharex=True,
sharey=True)
axs = axs.ravel()
for i in range(uniques):
item = data[col].unique()[i]
temp = data[data[col] == item]
axs[i].scatter(temp.train_loss, temp.train_acc, label='train')
axs[i].scatter(temp.test_loss, temp.test_acc, label='test')
axs[i].set_title(item)
axs[i].tick_params(axis='both', which='major', pad=15)
for i in range(uniques, row_count*col_count):
fig.delaxes(axs[i])
fig.text(0.07,
0.5,
'x(loss) y(accuracy)',
ha='center',
va='center',
rotation='vertical',
size=17,
color='grey')
| mit |
loli/semisupervisedforests | sklearn/metrics/scorer.py | 11 | 12934 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
costypetrisor/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
Ultra-Seven/newStream | src/misc/plotmouse.py | 1 | 4354 | from sklearn.cluster import MiniBatchKMeans
import numpy as np
from collections import *
from pygg import *
from wuutils import *
from math import cos, sin, radians
import random
import bsddb3
import click
import json
random.seed(0)
def zeroit(xs):
if not xs: return xs
x0 = xs[0]
return [x-x0 for x in xs]
def split(trace, cutoff=2000):
"""
If there is a pause > cutoff between two points in a trace, split it.
"""
ts = trace['ts']
si, ei = 0, 0
pt = ts[0]
for ei, t in enumerate(ts):
if t > pt + cutoff:
print si, ei
yield dict(
xs=trace['xs'][si:ei],
ys=trace['ys'][si:ei],
ts=trace['ts'][si:ei],
actions=trace['actions'][si:ei]
)
si = ei
pt = t
yield dict(
xs=trace['xs'][si:ei],
ys=trace['ys'][si:ei],
ts=trace['ts'][si:ei],
actions=trace['actions'][si:ei]
)
def rotate_trace(trace):
"""
trace is a list of dicts
normalize the magnitude of the traces to be the same,
and (try) to rotate them into the same direction
"""
xs = [d['x'] for d in trace]
ys = [d['y'] for d in trace]
pts = zip(xs, ys)
pts = map(np.array, pts)
# find max norm and normalize trace
maxnorm, maxpt = max([(np.linalg.norm(pt), pt) for pt in pts], key=lambda p: p[0])
if maxnorm == 0:
return trace
pts = [pt / maxnorm for pt in pts]
maxpt = maxpt / maxnorm
angle = np.arccos(np.clip(np.dot(maxpt, [1., 0]), -1.0, 1.0))
#angle = -angle
#theta = radians(angle)
theta = angle
print maxpt, angle, theta
rotate = lambda (x,y): ((x*cos(theta) + y*sin(theta)),
(x*sin(theta) + y*cos(theta)))
pts = map(rotate, pts)
for d, (x,y) in zip(trace, pts):
d['x'] = x
d['y'] = y
return trace
if 0:
traces = [
[
dict(x=0, y=0),
dict(x=0, y=1),
dict(x=0, y=.5)
],
[
dict(x=0, y=0),
dict(x=1, y=0)
]
]
rotated = map(rotate_trace, traces)
for trace in rotated:
for d in trace:
print d['x'], d['y']
print
exit()
def cluster_traces(traces, n_clusters=30):
"""
traces is: [ [dict, dict,... ], ... ]
"""
def trace_to_vect(trace):
vect = []
for d in trace:
vect.append(d['x'])
vect.append(d['y'])
#vect.append(d['t'])
return vect
trace_vects = map(trace_to_vect, traces)
maxlen = max(map(len, trace_vects))
for trace in trace_vects:
if len(trace) < maxlen:
trace.extend([-1] * (maxlen - len(trace)))
trace_vects = np.array(trace_vects)
k_means = MiniBatchKMeans(
init='k-means++',
init_size=1000, batch_size=1000, n_clusters=n_clusters, n_init=10)
k_means.fit(trace_vects)
for trace, label in zip(traces, k_means.labels_):
for d in trace:
d['g'] = label
return traces
@click.command()
@click.argument("fname")
def main(fname):
"""
If you want to run this you will need to install R+ggplot2, as well as the following python packages
pip install pygg wuutils scikit-learn
"""
db = bsddb3.hashopen(fname)
keys = ['g', 's', 'x', 'y', 't', 'a']
traces = []
i = 0
db = [(key, db[key]) for key in db.keys()]
random.shuffle(db)
for key, log in db:
try:
log = json.loads(log)
if 'xs' not in log: continue
except:
continue
if 'd' not in log['actions'] or 'u' not in log['actions']: continue
try:
for d in split(log):
if len(d['xs']) < 5: continue
g=[i%20] * len(d['xs'])
s=[i] * len(d['xs'])
x=zeroit(d['xs'])
y=zeroit(d['ys'])
t=zeroit(d['ts'])
a=d['actions']
traces.append([dict(zip(keys, l)) for l in zip(g,s,x,y,t,a)])
i += 1
except:
print d.keys()
print d
exit()
if i >= 200: break
traces = map(rotate_trace, traces)
traces = cluster_traces(traces, 20)
data = []
map(data.extend, traces)
p = ggplot(data, aes(x='x', y='y', group='s', color='s'))
p += facet_wrap("~g", ncol=5)
p += geom_line(alpha=0.9)
p += legend_none
ggsave("plot.png", p, width=12, height=10, scale=1.2)
data = [dict(x=length, y=count) for length, count in Counter(map(len, traces)).items() ]
p = ggplot(data, aes(x='x', y='y'))
p += geom_point()
ggsave("plot_lengths.png", p, width=6, height=3, scale=1.2)
if __name__ == '__main__':
main() | mit |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/MHDgenerator/NS.py | 1 | 10327 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
from dolfin import *
import mshr
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
import MHDprec as MHDpreconditioner
import memory_profiler
import gc
import MHDmulti
import MHDmatrixSetup as MHDsetup
import Lshaped
#@profile
m = 8
set_log_active(False)
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
uu0, ub0, pu0, pb0, bu0, bb0, ru0, Laplacian, Advection, gradPres, CurlCurl, gradR, NS_Couple, M_Couple = Lshaped.SolutionSetUp()
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
parameters["form_compiler"]["quadrature_degree"] = -1
mesh, boundaries, domains = Lshaped.Domain(nn)
order = 2
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "CG", order-1)
W = MixedFunctionSpace([Velocity, Pressure])
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim()]
def boundary(x, on_boundary):
return on_boundary
FSpaces = [Velocity,Pressure]
kappa = 1.0
Mu_m =10.0
MU = 1.0
N = FacetNormal(mesh)
ds = Measure('ds', domain=mesh, subdomain_data=boundaries)
# g = inner(p0*N - MU*grad(u0)*N,v)*dx
IterType = 'Full'
Split = "No"
Saddle = "No"
Stokes = "No"
SetupType = 'python-class'
params = [kappa,Mu_m,MU]
u0, p0, b0, r0, F_NS, F_M, F_MX, F_S, gradu0, Neumann, p0vec, bNone = Lshaped.SolutionMeshSetup(mesh, params, uu0, ub0, pu0, pb0, bu0, bb0, ru0, Laplacian, Advection, gradPres, CurlCurl, gradR, NS_Couple, M_Couple)
F_M = Expression(("0.0","0.0"))
F_S = Expression(("0.0","0.0"))
n = FacetNormal(mesh)
u_k, p_k = Lshaped.Stokes(Velocity, Pressure, F_S, u0, p0, Neumann, params, boundaries, domains)
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
a11 = params[2]*inner(grad(v), grad(u))*dx + inner((grad(u)*u_k),v)*dx + (1./2)*div(u_k)*inner(u,v)*dx - (1./2)*inner(u_k,n)*inner(u,v)*ds
a12 = -div(v)*p*dx
a21 = -div(u)*q*dx
a = a11 + a21 + a12
Lns = inner(v, F_NS)*dx #+ inner(Neumann,v)*ds(2)
a11 = params[2]*inner(grad(v), grad(u_k))*dx + inner((grad(u_k)*u_k),v)*dx + (1./2)*div(u_k)*inner(u_k,v)*dx - (1./2)*inner(u_k,n)*inner(u_k,v)*ds
a12 = -div(v)*p_k*dx
a21 = -div(u_k)*q*dx
L = Lns - ( a11 + a21 + a12 )
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
pConst = - assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
x= np.concatenate((u_k.vector().array(),p_k.vector().array()), axis=0)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
parameters['linear_algebra_backend'] = 'uBLAS'
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 10 # max no of iterations allowed
SolutionTime = 0
outer = 0
u_is = PETSc.IS().createGeneral(range(Velocity.dim()))
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
OuterTol = 1e-5
InnerTol = 1e-5
NSits =0
Mits =0
TotalStart =time.time()
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0")), boundary)
bcs = [bcu]
A, b = assemble_system(a, L, bcs)
A, b = CP.Assemble(A,b)
u = b.duplicate()
n = FacetNormal(mesh)
b_t = TrialFunction(Velocity)
c_t = TestFunction(Velocity)
aa = params[2]*inner(grad(b_t), grad(c_t))*dx(W.mesh()) + inner((grad(b_t)*u_k),c_t)*dx(W.mesh()) +(1./2)*div(u_k)*inner(c_t,b_t)*dx(W.mesh()) - (1./2)*inner(u_k,n)*inner(c_t,b_t)*ds(W.mesh())
ShiftedMass = assemble(aa)
bcu.apply(ShiftedMass)
ShiftedMass = CP.Assemble(ShiftedMass)
kspF = NSprecondSetup.LSCKSPnonlinear(ShiftedMass)
stime = time.time()
u, mits,nsits = S.solve(A,b,u,params,W,'Directss',IterType,OuterTol,InnerTol,HiptmairMatrices,Hiptmairtol,KSPlinearfluids, Fp,kspF)
Soltime = time.time()- stime
MO.StrTimePrint("MHD solve, time: ", Soltime)
Mits += mits
NSits += nsits
SolutionTime += Soltime
u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(),Lagrange.dim()]
u0, p0, b0, r0, F_NS, F_M, F_MX, F_S, gradu0, Neumann, p0vec = Lshaped.Solution2(mesh, params)
ExactSolution = [u0,p0,b0,r0]
Vdim = dim[0]
Pdim = dim[1]
Mdim = dim[2]
Rdim = dim[3]
# k +=2
VelocityE = VectorFunctionSpace(mesh,"CG",4)
u = interpolate(ExactSolution[0],VelocityE)
PressureE = FunctionSpace(mesh,FS,3)
# parameters["form_compiler"]["quadrature_degree"] = 8
# X = x.array()
xu = X[0:Vdim]
ua = Function(FSpaces[0])
ua.vector()[:] = xu
pp = X[Vdim:Vdim+Pdim]
pa = Function(FSpaces[1])
pa.vector()[:] = pp
pend = assemble(pa*dx)
ones = Function(FSpaces[1])
ones.vector()[:]=(0*pp+1)
pp = Function(FSpaces[1])
pp.vector()[:] = pa.vector().array()- assemble(pa*dx)/assemble(ones*dx)
pInterp = interpolate(ExactSolution[1],PressureE)
pe = Function(PressureE)
pe.vector()[:] = pInterp.vector().array()
const = - assemble(pe*dx)/assemble(ones*dx)
pe.vector()[:] = pe.vector()[:]+const
ErrorU = Function(FSpaces[0])
ErrorP = Function(FSpaces[1])
ErrorU = u-ua
ErrorP = pe-pp
tic()
errL2u= sqrt(abs(assemble(inner(ErrorU, ErrorU)*dx)))
MO.StrTimePrint("Velocity L2 error, time: ", toc())
tic()
errH1u= sqrt(abs(assemble(inner(grad(ErrorU), grad(ErrorU))*dx)))
MO.StrTimePrint("Velocity H1 error, time: ", toc())
tic()
errL2p= sqrt(abs(assemble(inner(ErrorP, ErrorP)*dx)))
MO.StrTimePrint("Pressure L2 error, time: ", toc())
print float(Wdim[xx-1][0])/Wdim[xx-2][0]
if xx > 1:
l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1])/np.log2(sqrt(float(Velocitydim[xx-1][0])/Velocitydim[xx-2][0])))
H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1])/np.log2(sqrt(float(Velocitydim[xx-1][0])/Velocitydim[xx-2][0])))
l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1])/np.log2(sqrt(float(Pressuredim[xx-1][0])/Pressuredim[xx-2][0])))
# import pandas as pd
LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
pd.set_option('precision',3)
LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
print LatexTable.to_latex()
import pandas as pd
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,Mave,NSave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable
interactive()
| mit |
Y-oHr-N/kenchi | kenchi/plotting.py | 1 | 12025 | import numpy as np
from scipy.stats import gaussian_kde
from sklearn.metrics import auc, roc_curve
from sklearn.utils.validation import check_array, check_symmetric, column_or_1d
__all__ = [
'plot_anomaly_score', 'plot_graphical_model',
'plot_partial_corrcoef', 'plot_roc_curve'
]
def plot_anomaly_score(
anomaly_score, ax=None, bins='auto', figsize=None,
filename=None, hist=True, kde=True, threshold=None,
title=None, xlabel='Samples', xlim=None, ylabel='Anomaly score',
ylim=None, **kwargs
):
"""Plot the anomaly score for each sample.
Parameters
----------
anomaly_score : array-like of shape (n_samples,)
Anomaly score for each sample.
ax : matplotlib Axes, default None
Target axes instance.
bins : int, str or array-like, default 'auto'
Number of hist bins.
figsize : tuple, default None
Tuple denoting figure size of the plot.
filename : str, default None
If provided, save the current figure.
hist : bool, default True
If True, plot a histogram of anomaly scores.
kde : bool, default True
If True, plot a gaussian kernel density estimate.
threshold : float, default None
Threshold.
title : string, default None
Axes title. To disable, pass None.
xlabel : string, default 'Samples'
X axis title label. To disable, pass None.
xlim : tuple, default None
Tuple passed to ``ax.xlim``.
ylabel : string, default 'Anomaly score'
Y axis title label. To disable, pass None.
ylim : tuple, default None
Tuple passed to ``ax.ylim``.
**kwargs : dict
Other keywords passed to ``ax.plot``.
Returns
-------
ax : matplotlib Axes
Axes on which the plot was drawn.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from kenchi.datasets import load_wdbc
>>> from kenchi.outlier_detection import MiniBatchKMeans
>>> from kenchi.plotting import plot_anomaly_score
>>> X, _ = load_wdbc(random_state=0, return_X_y=True)
>>> det = MiniBatchKMeans(random_state=0).fit(X)
>>> anomaly_score = det.anomaly_score(X, normalize=True)
>>> plot_anomaly_score(
... anomaly_score, threshold=det.threshold_, linestyle='', marker='.'
... ) # doctest: +ELLIPSIS
<matplotlib.axes._subplots.AxesSubplot object at 0x...>
>>> plt.show() # doctest: +SKIP
.. figure:: images/plot_anomaly_score.png
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def _get_ax_hist(ax):
locator = ax.get_axes_locator()
if locator is None:
# Create an axes on the right side of ax
divider = make_axes_locatable(ax)
ax_hist = divider.append_axes(
'right', '20%', pad=0.1, sharey=ax
)
return ax_hist
for ax_hist in ax.get_figure().get_axes():
locator_hist = ax_hist.get_axes_locator()
if ax_hist is ax:
continue
if locator_hist is None:
continue
if locator_hist._axes_divider is locator._axes_divider:
return ax_hist
anomaly_score = column_or_1d(anomaly_score)
if ax is None:
_, ax = plt.subplots(figsize=figsize)
ax.grid(True, linestyle=':')
if xlim is None:
n_samples, = anomaly_score.shape
xlim = (0., n_samples - 1.)
ax.set_xlim(xlim)
if ylim is None:
ylim = (0., 1.05 * np.max(anomaly_score))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
line, = ax.plot(anomaly_score, **kwargs)
color = line.get_color()
if threshold is not None:
ax.hlines(threshold, xlim[0], xlim[1], color=color)
if hist or kde:
ax_hist = _get_ax_hist(ax)
ax_hist.grid(True, linestyle=':')
ax_hist.tick_params(axis='y', labelleft=False)
ax_hist.set_ylim(ylim)
if hist:
# Draw a histogram
ax_hist.hist(
anomaly_score,
alpha = 0.4,
bins = bins,
color = color,
density = True,
orientation = 'horizontal'
)
if kde:
kernel = gaussian_kde(anomaly_score)
ylocs = np.linspace(ylim[0], ylim[1])
# Draw a gaussian kernel density estimate
ax_hist.plot(kernel(ylocs), ylocs, color=color)
if 'label' in kwargs:
ax.legend(loc='upper left')
if filename is not None:
ax.get_figure().savefig(filename)
return ax
def plot_roc_curve(
y_true, y_score, ax=None, figsize=None,
filename=None, title='ROC curve', xlabel='FPR', ylabel='TPR',
**kwargs
):
"""Plot the Receiver Operating Characteristic (ROC) curve.
Parameters
----------
y_true : array-like of shape (n_samples,)
True Labels.
y_score : array-like of shape (n_samples,)
Target scores.
ax : matplotlib Axes, default None
Target axes instance.
figsize : tuple, default None
Tuple denoting figure size of the plot.
filename : str, default None
If provided, save the current figure.
title : string, default 'ROC curve'
Axes title. To disable, pass None.
xlabel : string, default 'FPR'
X axis title label. To disable, pass None.
ylabel : string, default 'TPR'
Y axis title label. To disable, pass None.
**kwargs : dict
Other keywords passed to ``ax.plot``.
Returns
-------
ax : matplotlib Axes
Axes on which the plot was drawn.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from kenchi.datasets import load_wdbc
>>> from kenchi.outlier_detection import MiniBatchKMeans
>>> from kenchi.plotting import plot_roc_curve
>>> X, y = load_wdbc(random_state=0, return_X_y=True)
>>> det = MiniBatchKMeans(random_state=0).fit(X)
>>> score_samples = det.score_samples(X)
>>> plot_roc_curve(y, score_samples) # doctest: +ELLIPSIS
<matplotlib.axes._subplots.AxesSubplot object at 0x...>
>>> plt.show() # doctest: +SKIP
.. figure:: images/plot_roc_curve.png
"""
import matplotlib.pyplot as plt
fpr, tpr, _ = roc_curve(y_true, y_score)
roc_auc = auc(fpr, tpr)
if ax is None:
_, ax = plt.subplots(figsize=figsize)
ax.grid(True, linestyle=':')
ax.set_xlim(0., 1.)
ax.set_ylim(0., 1.05)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if 'label' in kwargs:
kwargs['label'] += f' (area={roc_auc:1.3f})'
else:
kwargs['label'] = f'area={roc_auc:1.3f}'
ax.plot(fpr, tpr, **kwargs)
ax.legend(loc='lower right')
if filename is not None:
ax.get_figure().savefig(filename)
return ax
def plot_graphical_model(
G, ax=None, figsize=None, filename=None,
random_state=None, title='GGM', **kwargs
):
"""Plot the Gaussian Graphical Model (GGM).
Parameters
----------
G : networkx Graph
GGM.
ax : matplotlib Axes, default None
Target axes instance.
figsize : tuple, default None
Tuple denoting figure size of the plot.
filename : str, default None
If provided, save the current figure.
random_state : int, RandomState instance, default None
Seed of the pseudo random number generator.
title : string, default 'GGM'
Axes title. To disable, pass None.
**kwargs : dict
Other keywords passed to ``nx.draw_networkx``.
Returns
-------
ax : matplotlib Axes
Axes on which the plot was drawn.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> from kenchi.plotting import plot_graphical_model
>>> from sklearn.datasets import make_sparse_spd_matrix
>>> A = make_sparse_spd_matrix(dim=20, norm_diag=True, random_state=0)
>>> G = nx.from_numpy_matrix(A)
>>> plot_graphical_model(G, random_state=0) # doctest: +ELLIPSIS
<matplotlib.axes._subplots.AxesSubplot object at 0x...>
>>> plt.show() # doctest: +SKIP
.. figure:: images/plot_graphical_model.png
"""
import matplotlib.pyplot as plt
import networkx as nx
if ax is None:
_, ax = plt.subplots(figsize=figsize)
if title is not None:
ax.set_title(title)
node_size = np.array([30. * (d + 1.) for _, d in G.degree])
pos = nx.spring_layout(G, seed=random_state)
width = np.abs([3. * w for _, _, w in G.edges(data='weight')])
# Add the draw_networkx kwargs here
kwargs.setdefault('cmap', 'Spectral')
kwargs.setdefault('node_size', node_size)
kwargs.setdefault('pos', pos)
kwargs.setdefault('width', width)
# Draw the Gaussian grapchical model
nx.draw_networkx(G, ax=ax, **kwargs)
# Turn off tick visibility
ax.tick_params('x', labelbottom=False, bottom=False)
ax.tick_params('y', labelleft=False, left=False)
if filename is not None:
ax.get_figure().savefig(filename)
return ax
def plot_partial_corrcoef(
partial_corrcoef, ax=None, cbar=True, figsize=None,
filename=None, title='Partial correlation', **kwargs
):
"""Plot the partial correlation coefficient matrix.
Parameters
----------
partial_corrcoef : array-like of shape (n_features, n_features)
Partial correlation coefficient matrix.
ax : matplotlib Axes, default None
Target axes instance.
cbar : bool, default True.
If True, draw a colorbar.
figsize : tuple, default None
Tuple denoting figure size of the plot.
filename : str, default None
If provided, save the current figure.
title : string, default 'Partial correlation'
Axes title. To disable, pass None.
**kwargs : dict
Other keywords passed to ``ax.pcolormesh``.
Returns
-------
ax : matplotlib Axes
Axes on which the plot was drawn.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from kenchi.plotting import plot_partial_corrcoef
>>> from sklearn.datasets import make_sparse_spd_matrix
>>> A = make_sparse_spd_matrix(dim=20, norm_diag=True, random_state=0)
>>> plot_partial_corrcoef(A) # doctest: +ELLIPSIS
<matplotlib.axes._subplots.AxesSubplot object at 0x...>
>>> plt.show() # doctest: +SKIP
.. figure:: images/plot_partial_corrcoef.png
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
partial_corrcoef = check_array(partial_corrcoef)
partial_corrcoef = check_symmetric(partial_corrcoef, raise_exception=True)
if ax is None:
_, ax = plt.subplots(figsize=figsize)
if title is not None:
ax.set_title(title)
# Add the pcolormesh kwargs here
kwargs.setdefault('cmap', 'RdBu')
kwargs.setdefault('edgecolors', 'white')
kwargs.setdefault('vmin', -1.)
kwargs.setdefault('vmax', 1.)
# Draw the heatmap
mesh = ax.pcolormesh(
np.ma.masked_equal(partial_corrcoef, 0.), **kwargs
)
ax.set_aspect('equal')
ax.set_facecolor('grey')
# Invert the y axis to show the plot in matrix form
ax.invert_yaxis()
if cbar:
# Create an axes on the right side of ax
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', '5%', pad=0.1)
ax.get_figure().colorbar(mesh, cax=cax)
if filename is not None:
ax.get_figure().savefig(filename)
return ax
| bsd-3-clause |
baliga-lab/kbase-cmonkey | kbcmonkey/kbase.py | 1 | 10004 | import os
import pandas
import WorkspaceClient as wsc
import CmonkeyClient as cmc
import UserAndJobStateClient as ujs
import InferelatorClient as inf
"""
KBase is a distributed platform which provides a REST API to its services.
This is an attempt to provide a Python friendly API to KBase for use with
with cmonkey and Inferelator.
It converts cmonkey standard datatypes to KBase datatypes and abstracts
calls.
"""
WORKSPACE_URL = 'https://kbase.us/services/ws'
CM_URL = 'http://140.221.85.173:7078'
INF_URL = 'http://140.221.67.196:7113'
UJS_URL = 'https://kbase.us/services/userandjobstate'
class UserAndJobState(object):
def __init__(self, ujs_service, job_id):
self.ujs_service = ujs_service
self.job_id = job_id
def get_job_description(self):
return self.ujs_service.get_job_description(self.job_id)
def get_job_status(self):
return self.ujs_service.get_job_status(self.job_id)
def get_detailed_error(self):
return self.ujs_service.get_detailed_error(self.job_id)
class WorkspaceInstance(object):
"""representation of a KBase workspace instance"""
def __init__(self, ws_service, ws_meta):
self.ws_service = ws_service
self.ws_meta = ws_meta
def id(self):
return self.ws_meta[6]
def name(self):
return self.ws_meta[0]
def __repr__(self):
return "{Workspace, name: %s, id: %d}" % (self.name(), self.id())
def save_object(self, objtype, objid, data):
"""Generic way to store an object into KBase, class-specific save functions
call this one"""
return WorkspaceObject(self,
self.ws_service.save_object({'workspace': self.name(),
'type': objtype,
'id': objid,
'data': data})[11])
def get_object(self, object_name):
"""returns the object data for the specified object"""
return self.ws_service.get_object({'workspace': self.name(), 'id': object_name})
class WorkspaceObject(object):
"""an object that is stored in a workspace"""
def __init__(self, ws_inst, id, version=1):
self.ws = ws_inst
self.id = id
self.version = version
self.obj = None
def obj_ref(self):
"""Build an object reference"""
return "%s/%s/%s" % (self.ws.id(), self.id, self.version)
def data(self):
"""retrieves the data from the workspace service"""
if self.obj is None:
self.obj = ws.get_object(self.id)
return self.obj['data']
def __workspaces(ws_service, exclude_global=True):
no_global = 1 if exclude_global else 0
for meta in ws_service.list_workspaces({'excludeGlobal': no_global}):
yield WorkspaceInstance(ws_service, meta)
"""
Gene Expressions
"""
def save_expression_sample(ws, id, condition, gene_pvals, genome_id):
"""
Saves the pvalue for each gene in an expression sample.
gene_pvals is a dictionary that maps from gene name to pvalue
condition -> source_id"""
data = {'id': id,
'source_id': condition,
'type': 'microarray',
'numerical_interpretation': 'undefined',
'external_source_date': 'unknown',
'expression_levels': gene_pvals,
'genome_id': genome_id}
return ws.save_object('KBaseExpression.ExpressionSample-1.2', id, data)
def save_expression_series(ws, name, source_file,
genome_id, samples):
"""
Gene expressions in KBase are stored as a list of ExpressionSeries
Parameters:
- ws: Workspace service interface
- workspace: workspace object
- name: unique name the object should be stored under
- source_file: source file
- genome_id: the unique name of the genome this expression series is based on
- sample_ids: a list of ExpressionSample ids, in KBase standard identifier format
"""
sample_ids = [sample.obj_ref() for sample in samples]
data = {'id': name, 'source_id': source_file,
'external_source_date': 'unknown',
'genome_expression_sample_ids_map': {genome_id: sample_ids}}
return ws.save_object('KBaseExpression.ExpressionSeries-1.0', name, data)
def import_ratios_matrix(ws, name, genome_id, filepath, sep='\t'):
"""Reads a gene expression matrix and stores it in the specified
workspace"""
filename = os.path.basename(filepath)
matrix = pandas.io.parsers.read_table(filepath, index_col=0)
samples = []
for i, colname in enumerate(matrix.columns):
colvals = matrix.values[:, i]
pvals = {rowname: colvals[j] for j, rowname in enumerate(matrix.index)}
samples.append(save_expression_sample(ws, '%s-%d' % (name, i), colname,
pvals, genome_id))
return save_expression_series(ws, name, filename, genome_id, samples)
"""
Interaction Sets
"""
def save_interaction_set(ws, name, nwtype, edges, score_name):
"""Save an interaction set, this is for things like STRING networks and operons
Edges are a list of triples (node1, node2, weight)
"""
def dataset_source(id, desc, url):
return {'id': id, 'name': id,
'reference': 'N/A',
'description': desc,
'resource_url': url}
def interaction(id, node1, node2, nwtype, weight):
return {'id': id, 'type': nwtype,
'entity1_id': node1, 'entity2_id': node2,
'scores': {score_name: weight} }
interactions = []
for i, edge in enumerate(edges):
n1, n2, weight = edge
interactions.append(interaction('edge-%d' % i, n1, n2, nwtype, weight))
data = {'id': name, 'name': name,
'description': 'my network',
'type': 'somenetwork',
'source': dataset_source('%s-source' % name, 'some description', ''),
'interactions': interactions}
return ws.save_object('KBaseNetworks.InteractionSet-1.0', name, data)
def import_network(ws, name, nwtype, filepath, sep='\t'):
filename = os.path.basename(filepath)
if nwtype == 'STRING':
score_name = 'STRING_SCORE'
else:
score_name = 'pval'
with open(filename) as infile:
edges = []
for line in infile:
n1, n2, w = line.strip().split(sep)
edges.append((n1, n2, float(w)))
return save_interaction_set(ws, name, nwtype, edges, score_name)
def import_string_network(ws, name, filepath, sep='\t'):
return import_network(ws, name, 'STRING', filepath, sep)
"""
Gene Lists
"""
def save_gene_list(ws, id, genes):
"""Saves a gene list"""
data = {'id': id,
'source_id': 'Microbes Online',
'description': 'Transcription factors',
'genes': genes}
return ws.save_object('Inferelator.GeneList-1.0', id, data)
"""
High-level Service Access
"""
def workspaces_for(user, password, service_url=WORKSPACE_URL):
ws_service = wsc.Workspace(service_url, user_id=user, password=password)
return [ws for ws in __workspaces(ws_service)]
def workspace(user, password, name, search_global=False, service_url=WORKSPACE_URL):
ws_service = wsc.Workspace(service_url, user_id=user, password=password)
for ws in __workspaces(ws_service, not search_global):
if ws.name() == name:
return ws
raise Exception("no workspace named '%s' found !" % name)
def user_job_state(user, password, jobid, service_url=UJS_URL):
ujs_service = ujs.UserAndJobState(service_url, user_id=user, password=password)
return UserAndJobState(ujs_service, jobid)
def run_cmonkey(user, password, target_workspace,
series_ref, network_ref,
service_url=CM_URL):
cm_service = cmc.Cmonkey(service_url, user_id=user, password=password)
return cm_service.run_cmonkey(target_workspace,
{'series_ref': series_ref,
'genome_ref': 'AKtest/Halobacterium_sp_NRC-1',
'operome_ref': 'AKtest/Halobacterium_sp_operons',
# 'network_ref': 'AKtest/Halobacterium_sp_STRING',
'network_ref': network_ref,
'networks_scoring': 1,
'motifs_scoring': 1})
class CmonkeyResult(object):
def __init__(self, data):
self.data = data['data']
self.__clusters = None
def num_clusters(self):
return self.data['network']['clusters_number']
def num_rows(self):
return self.data['network']['rows_number']
def num_columns(self):
return self.data['network']['columns_number']
def clusters(self):
if self.__clusters is None:
self.__clusters = []
for i, clusterdata in enumerate(self.data['network']['clusters']):
residual = clusterdata['residual']
columns = clusterdata['sample_ws_ids']
rows = clusterdata['gene_ids']
self.__clusters.append((rows, columns, residual))
return self.__clusters
def __repr__(self):
return "CmonkeyResult - %d rows, %d cols, %d clusters" % (self.num_rows(),
self.num_columns(),
self.num_clusters())
def run_inferelator(user, password, target_workspace,
tf_ref, result_ref,
service_url=INF_URL):
"""abstracts the Inferelator service"""
inf_service = inf.Inferelator(service_url, user_id=user, password=password)
return inf_service.run_inferelator(target_workspace,
{'tf_list_ws_ref': tf_ref,
'cmonkey_run_result_ws_ref': result_ref})
| lgpl-3.0 |
DTOcean/dtocean-core | dtocean_core/data/definitions.py | 1 | 129810 |
# Copyright (C) 2016 Mathew Topper, David Bould, Rui Duarte, Francesco Ferri
# Copyright (C) 2017-2018 Mathew Topper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import __builtin__
import os
from copy import deepcopy
from cycler import cycler
from datetime import datetime
from itertools import product
import yaml
import numpy as np
import pandas as pd
import xarray as xr
from natsort import natsorted
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from mpl_toolkits.mplot3d import Axes3D
from shapely.geometry import Polygon, Point
from descartes import PolygonPatch
from geoalchemy2.shape import to_shape
from aneris.boundary import Structure
from ..utils.database import (get_table_df,
get_one_from_column,
filter_one_from_column,
get_all_from_columns)
BLUE = '#6699cc'
class UnknownData(Structure):
'''An item of data whose structrue is not understood'''
def get_data(self, raw, meta_data):
return raw
def get_value(self, data):
return deepcopy(data)
class SeriesData(Structure):
'''Structure represented in a series of some sort'''
def get_data(self, raw, meta_data):
series = pd.Series(raw)
return series
def get_value(self, data):
result = None
if data is not None:
result = data.copy()
return result
@staticmethod
def auto_file_input(self):
self.check_path()
if ".csv" in self._path:
series = pd.read_csv(self._path,
header=None,
index_col=0,
squeeze=True)
else:
raise TypeError("The specified file format is not supported.",
"Supported format is .csv")
self.data.result = series
return
@staticmethod
def auto_file_output(self):
self.check_path()
s = self.data.result
if ".csv" in self._path:
s.to_csv(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format is .csv")
return
@staticmethod
def get_valid_extensions(cls):
return [".csv"]
class TimeSeries(SeriesData):
'''List of tuples expected with the first entries being datetime.datetime
objects.'''
def get_data(self, raw, meta_data):
if isinstance(raw, pd.Series):
if not isinstance(raw.index, pd.DatetimeIndex):
errStr = ("TimeSeries requires a DatetimeIndex as the index "
"of any given series. Current index type is "
"{}".format(type(raw.index)))
raise ValueError(errStr)
return raw
dates, values = zip(*raw)
if not all(isinstance(x, datetime) for x in dates):
errStr = ("TimeSeries requires a datetime.datetime object as first"
"index of all given tuples.")
raise ValueError(errStr)
dt_index = pd.DatetimeIndex(dates)
time_series = pd.Series(values, index=dt_index)
return time_series
@staticmethod
def auto_plot(self):
fig = plt.figure()
ax = fig.gca()
self.data.result.plot(ax=ax)
# Pad the y-axis slightly
ymin, ymax = ax.get_ylim()
ylength = ymax - ymin
ymargin = (0.05 * ylength) / ylength
ax.margins(y=ymargin)
if self.meta.result.labels is not None:
ylabel = self.meta.result.labels[0]
else:
ylabel = ""
if self.meta.result.units is not None:
ylabel = "{} [${}$]".format(ylabel, self.meta.result.units[0])
plt.ylabel(ylabel.strip())
plt.title(self.meta.result.title)
self.fig_handle = plt.gcf()
return
@staticmethod
def auto_file_input(self):
fmtStr = "%Y-%m-%d %H:%M:%S.%f"
SeriesData.auto_file_input(self)
s = self.data.result
try:
s.index = s.index.map(lambda x: pd.to_datetime(x, format=fmtStr))
except ValueError: # wrong datetime object format
errStr = ("TimeSeries requires a datetime.datetime object "
"as first index of all given entries. "
"The accepted format is: {}").format(fmtStr)
raise ValueError(errStr)
self.data.result = s
return
class TimeSeriesColumn(TimeSeries):
"""The first two entries of the tables key of the DDS entry should refer
to the date and time columns within the database. These do not need to be
specified in the labels key, but all other columns should be labelled."""
@staticmethod
def auto_db(self):
schema, table = self.meta.result.tables[0].split(".")
df = get_table_df(self._db,
schema,
table,
self.meta.result.tables[1:])
if df.empty:
result = None
else:
dt_labels = ["Date", "Time"]
dt_labels.extend(self.meta.result.labels)
name_map = {k: v for k, v in zip(self.meta.result.tables[1:],
dt_labels)}
df = df.rename(columns=name_map)
# Don't allow Date to have any null
if pd.isnull(df["Date"]).any(): return
dtstrs = [datetime.combine(date, time) for
date, time in zip(df["Date"], df["Time"])]
df["DateTime"] = dtstrs
df = df.drop("Date", 1)
df = df.drop("Time", 1)
df = df.set_index("DateTime")
result = df.to_records(convert_datetime64=True)
self.data.result = result
return
class TableData(Structure):
'''Structure represented in a pandas dataframe. Note the labels are
order sensitive, so care should be taken when defining them. When adding
labels using the argument 'add_labels' to pass a list, by default they are
added to the back of the meta data labels. They can be added to the front
of the labels if the argument 'add_labels_pos' is set to "front".
'''
def get_data(self, raw,
meta_data,
add_labels=None,
add_labels_pos="back",
relax_cols=False):
if meta_data.labels is None:
errStr = "Labels must be set for TableData column names"
raise ValueError(errStr)
if (meta_data.units is not None and
len(meta_data.units) != len(meta_data.labels)):
errStr = ("Meta data inconsistent. There are {} units defined "
"but {} labels").format(len(meta_data.units),
len(meta_data.labels))
raise ValueError(errStr)
req_cols = meta_data.labels[:]
if add_labels is not None:
if add_labels_pos == "front":
add_labels.extend(req_cols)
req_cols = add_labels
elif add_labels_pos == "back":
req_cols.extend(add_labels)
else:
errStr = ("Argument add_labels_pos may only have value "
"'back' or 'front' not '{}'").format(add_labels_pos)
raise ValueError(errStr)
if isinstance(raw, dict):
raw_cols = raw.keys()
columns = None
elif isinstance(raw, pd.DataFrame):
raw_cols = raw.columns.values
columns = None
else:
raw_cols = req_cols
columns = req_cols
# Covert req_cols and raw_cols into sets
req_set = set(req_cols)
raw_set = set(raw_cols)
if not relax_cols and raw_set != req_set:
missing = req_set - raw_set
extra = raw_set - req_set
errStr = "Columns in raw data are incorrectly labelled."
if missing:
safe_missing = [str(x) for x in missing]
missing_str = ", ".join(safe_missing)
errStr += " Missing are '{}'.".format(missing_str)
if extra:
safe_extra = [str(x) for x in extra]
extra_str = ", ".join(safe_extra)
errStr += " Erroneous are '{}'.".format(extra_str)
raise ValueError(errStr)
dataframe = pd.DataFrame(raw, columns=columns)
# Order the columns
if relax_cols:
dataframe = dataframe[natsorted(dataframe.columns)]
else:
dataframe = dataframe[req_cols]
return dataframe
def get_value(self, data):
result = None
if data is not None:
result = data.copy()
return result
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
self.data.result = df
return
@staticmethod
def auto_file_output(self):
self.check_path()
df = self.data.result
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
class TableDataColumn(TableData):
@staticmethod
def auto_db(self):
schema, table = self.meta.result.tables[0].split(".")
df = get_table_df(self._db,
schema,
table,
self.meta.result.tables[1:])
if df.empty:
df = None
else:
name_map = {k: v for k, v in zip(self.meta.result.tables[1:],
self.meta.result.labels)}
df = df.rename(columns=name_map)
# Don't allow all null values
if pd.isnull(df).all().all(): df = None
self.data.result = df
return
class IndexTable(TableData):
'''Structure represented in a pandas dataframe with a defined index.
The first label will identify which dictionary key to use as the index.
The index column is then considered invariant from the users persepective.
'''
def get_data(self, raw, meta_data):
if meta_data.labels is None:
errStr = ("The first label of a variable with IndexTable "
"structure must indicate the key to be used as the "
"index.")
raise ValueError(errStr)
index_key = meta_data.labels[0]
dataframe = super(IndexTable, self).get_data(raw, meta_data)
if index_key not in dataframe.columns:
errStr = ("IndexTable structure requires one column "
"to have value '{}'").format(index_key)
raise ValueError(errStr)
if meta_data.valid_values is not None:
index_series = dataframe[index_key]
has_indexes = index_series.isin(meta_data.valid_values)
if not has_indexes.all():
errStr = ("The indices of the given raw data do not match the "
"valid variables meta data")
raise ValueError(errStr)
dataframe = dataframe.set_index(index_key)
if meta_data.valid_values is not None:
dataframe.reindex(index=meta_data.valid_values)
return dataframe
@staticmethod
def auto_file_output(self):
self.data.result = self.data.result.reset_index()
TableData.auto_file_output(self)
return
class IndexTableColumn(IndexTable):
@staticmethod
def auto_db(self):
schema, table = self.meta.result.tables[0].split(".")
df = get_table_df(self._db,
schema,
table,
self.meta.result.tables[1:])
if df.empty:
df = None
else:
name_map = {k: v for k, v in zip(self.meta.result.tables[1:],
self.meta.result.labels)}
df = df.rename(columns=name_map)
# Don't allow null values in the keys
if pd.isnull(df[self.meta.result.labels[0]]).any(): df = None
self.data.result = df
return
class LineTable(TableData):
'''Structure represented in a pandas dataframe with free variable data on
the index. The first label will identify which dictionary key to use as
the index.
Each column of the table then represents a line using identical abscissae
values.'''
def get_data(self, raw, meta_data, relax_cols=False):
if meta_data.labels is None:
errStr = ("The first label of a variable with LineTable structure "
"must indicate the key to be used as the index.")
raise ValueError(errStr)
index_key = meta_data.labels[0]
dataframe = super(LineTable, self).get_data(raw,
meta_data,
relax_cols=relax_cols)
if index_key not in dataframe.columns:
errStr = ("LineTable structure requires one column "
"to have value '{}'").format(index_key)
raise ValueError(errStr)
dataframe = dataframe.set_index(index_key)
return dataframe
@staticmethod
def auto_plot(self):
# Get number of columns for legend
ncol = len(self.data.result.columns) / 20 + 1
fig = plt.figure()
ax = fig.gca()
self.data.result.plot(ax=ax)
lgd = ax.legend(bbox_to_anchor=(1.04, 1),
loc="upper left",
ncol=ncol)
xlabel = self.meta.result.labels[0]
ylabel = None
if len(self.meta.result.labels) > 1:
ylabel = self.meta.result.labels[1]
if self.meta.result.units is not None:
if self.meta.result.units[0] is not None:
xlabel = "{} [${}$]".format(xlabel, self.meta.result.units[0])
if (len(self.meta.result.units) > 1 and
self.meta.result.units[1] is not None):
ylabel = "{} [${}$]".format(ylabel, self.meta.result.units[1])
plt.xlabel(xlabel)
if ylabel is not None: plt.ylabel(ylabel)
plt.title(self.meta.result.title)
# Auto adjust canvas for legend
# https://stackoverflow.com/a/45846024
plt.gcf().canvas.draw()
invFigure = plt.gcf().transFigure.inverted()
lgd_pos = lgd.get_window_extent()
lgd_coord = invFigure.transform(lgd_pos)
lgd_xmax = lgd_coord[1, 0]
ax_pos = plt.gca().get_window_extent()
ax_coord = invFigure.transform(ax_pos)
ax_xmax = ax_coord[1, 0]
shift = ax_xmax / lgd_xmax
plt.gcf().tight_layout(rect=(0, 0, shift, 1))
self.fig_handle = plt.gcf()
return
@staticmethod
def auto_file_output(self):
IndexTable.auto_file_output(self)
return
class LineTableExpand(LineTable):
'''Structure represented in a pandas dataframe with free variable data on
the index. The first label will identify which dictionary key to use as
the index. The input data keys/columns will be included in the final table.
Each column of the table then represents a line using identical abscissae
values.'''
def get_data(self, raw, meta_data):
dataframe = super(LineTableExpand, self).get_data(raw,
meta_data,
relax_cols=True)
return dataframe
class LineTableColumn(LineTable):
@staticmethod
def auto_db(self):
schema, table = self.meta.result.tables[0].split(".")
df = get_table_df(self._db,
schema,
table,
self.meta.result.tables[1:])
if df.empty:
df = None
else:
name_map = {k: v for k, v in zip(self.meta.result.tables[1:],
self.meta.result.labels)}
df = df.rename(columns=name_map)
# Don't allow null values in the keys
if pd.isnull(df[self.meta.result.labels[0]]).any(): df = None
self.data.result = df
return
class TimeTable(TableData):
'''Structure represented in a pandas dataframe with a datetime index. One
key in the raw data should be named "DateTime". If non-indexed raw data
is given then the datetimes should be in the first column.'''
def get_data(self, raw, meta_data):
dataframe = super(TimeTable, self).get_data(raw,
meta_data,
add_labels=["DateTime"],
add_labels_pos="front")
if "DateTime" not in dataframe.columns:
errStr = ("TimeTable structure requires one column "
"to have value 'DateTime'")
raise ValueError(errStr)
if not all(isinstance(x, datetime) for x in dataframe["DateTime"]):
errStr = ("TimeTable requires a datetime.datetime object "
"as first index of all given entries.")
raise ValueError(errStr)
dataframe = dataframe.set_index(["DateTime"])
return dataframe
@staticmethod
def auto_plot(self):
fig = plt.figure()
self.data.result.plot(ax=fig.gca())
plt.title(self.meta.result.title)
self.fig_handle = plt.gcf()
return
@staticmethod
def auto_file_input(self):
fmtStr = "%Y-%m-%d %H:%M:%S.%f"
TableData.auto_file_input(self)
df = self.data.result
try:
df = df.set_index("DateTime")
df.index = df.index.map(
lambda x: pd.to_datetime(x, format=fmtStr))
df.index.name = "DateTime"
df = df.reset_index()
except ValueError: # wrong datetime object format
errStr = ("TimeTable requires a datetime.datetime object "
"as first index of all given entries. "
"The accepted format is: {}").format(fmtStr)
raise ValueError(errStr)
self.data.result = df
return
@staticmethod
def auto_file_output(self):
IndexTable.auto_file_output(self)
return
class TimeTableColumn(TimeTable):
"""The first two entries of the tables key of the DDS entry should refer
to the date and time columns within the database. These do not need to be
specified in the labels key. The remaining colums of the tables key should
match to values in the labels key."""
@staticmethod
def auto_db(self):
schema, table = self.meta.result.tables[0].split(".")
df = get_table_df(self._db,
schema,
table,
self.meta.result.tables[1:])
if df.empty:
df = None
else:
dt_labels = ["Date", "Time"]
dt_labels.extend(self.meta.result.labels)
name_map = {k: v for k, v in zip(self.meta.result.tables[1:],
dt_labels)}
df = df.rename(columns=name_map)
# Don't allow Date to have any null
if pd.isnull(df["Date"]).any(): return
dtstrs = [datetime.combine(date, time) for
date, time in zip(df["Date"], df["Time"])]
df["DateTime"] = dtstrs
df = df.drop("Date", 1)
df = df.drop("Time", 1)
self.data.result = df
return
class TriStateTable(TableData):
'''Structure represented in a pandas dataframe with tri-state values.'''
def get_data(self, raw, meta_data):
df = super(TriStateTable, self).get_data(raw, meta_data)
if not np.all(df.isin(["true", "false", "unknown"])):
errStr = ('Given raw value is incorrectly formatted. It must be '
'a string with value "true", "false" or "unknown".')
raise ValueError(errStr)
return df
class TriStateIndexTable(IndexTable):
'''Structure represented in a pandas dataframe with tri-state values and
a predefined index column.'''
def get_data(self, raw, meta_data):
df = super(TriStateIndexTable, self).get_data(raw, meta_data)
if not np.all(df.isin(["true", "false", "unknown"])):
errStr = ('Given raw value is incorrectly formatted. It must be '
'a string with value "true", "false" or "unknown".')
raise ValueError(errStr)
return df
class NumpyND(Structure):
'''Numpy array. This structure is too general for most applications and so
the get_value method deliberately raises an error. Subclasses of this class
should be used instead.'''
def get_data(self, raw, meta_data):
array = np.array(raw)
return array
def get_value(self, data):
errStr = "Only subclasses of NumpyND may be used."
raise NotImplementedError(errStr)
class Numpy2D(NumpyND):
'''Numpy2D array.'''
def get_data(self, raw, meta_data):
data = super(Numpy2D, self).get_data(raw, meta_data)
if len(data.shape) != 2:
errStr = ("Numpy2D class requires 2 dimensions "
"supplied data has {}").format(
len(data.shape))
raise ValueError(errStr)
return data
def get_value(self, data):
result = None
if data is not None:
result = data.copy()
return result
class Numpy2DColumn(Numpy2D):
'''Numpy2DColumn array.'''
@staticmethod
def auto_db(self):
schema, table = self.meta.result.tables[0].split(".")
df = get_table_df(self._db,
schema,
table,
self.meta.result.tables[1:4])
if df.empty:
result = None
else:
# Don't allow first two columns to have any null
if pd.isnull(df[self.meta.result.tables[1:3]]).any().any(): return
df = df.set_index(self.meta.result.tables[1:3])
groups = df.groupby(level=df.index.names)
df = groups.first()
levels = map(tuple, df.index.levels)
index = list(product(*levels))
df = df.reindex(index)
shape = map(len, df.index.levels)
result = df.values.reshape(shape)
self.data.result = result
return
class Numpy3D(NumpyND):
'''Numpy3D array.'''
def get_data(self, raw, meta_data):
data = super(Numpy3D, self).get_data(raw, meta_data)
if len(data.shape) != 3:
errStr = ("Numpy3D class requires 3 dimensions "
"supplied data has {}").format(
len(data.shape))
raise ValueError(errStr)
return data
def get_value(self, data):
result = None
if data is not None:
result = data.copy()
return result
class Numpy3DColumn(Numpy3D):
'''Numpy3DColumn array.'''
@staticmethod
def auto_db(self):
schema, table = self.meta.result.tables[0].split(".")
df = get_table_df(self._db,
schema,
table,
self.meta.result.tables[1:5])
if df.empty:
result = None
else:
# Don't allow first three columns to have any null
if pd.isnull(df[self.meta.result.tables[1:4]]).any().any(): return
df = df.set_index(self.meta.result.tables[1:4])
groups = df.groupby(level=df.index.names)
df = groups.first()
levels = map(tuple, df.index.levels)
index = list(product(*levels))
df = df.reindex(index)
shape = map(len, df.index.levels)
result = df.values.reshape(shape)
self.data.result = result
return
class NumpyLine(NumpyND):
'''2D Numpy array with the first dimension having value 2'''
def get_data(self, raw, meta_data):
data = super(NumpyLine, self).get_data(raw, meta_data)
if data.shape[1] != 2:
errStr = ("Second dimension must have value 2. The second "
"dimension of the given data has value {}").format(
data.shape[1])
raise ValueError(errStr)
# Sort on the zero axis
data = data[np.argsort(data[:, 0])]
return data
def get_value(self, data):
result = None
if data is not None:
result = data.copy()
return result
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
if "x" in df.columns and "y" in df.columns:
data = np.c_[df.x,df.y]
else:
raise ValueError("The specified file structure is not supported, "
"the columns' headers shuld be defined as: "
"x, y)")
# Sort on the zero axis
data = data[np.argsort(data[:, 0])]
self.data.result = data
return
@staticmethod
def auto_file_output(self):
self.check_path()
if isinstance(self.data.result, np.ndarray):
data_ = self.data.result
else:
raise TypeError("Data type not understood: possible type for a "
"NumpyND subclass is: numpy.ndarray")
data = {"x": data_[:,0],
"y": data_[:,1]}
df = pd.DataFrame(data)
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
@staticmethod
def auto_plot(self):
plt.figure()
plt.plot(*zip(*self.data.result))
plt.title(self.meta.result.title)
xlabel = ""
ylabel = ""
if self.meta.result.labels is not None:
xlabel = self.meta.result.labels[0]
ylabel = self.meta.result.labels[1]
if xlabel is None: xlabel = ""
if ylabel is None: ylabel = ""
if self.meta.result.units is not None:
xunit = self.meta.result.units[0]
yunit = self.meta.result.units[1]
if xunit is not None: xlabel = "{} [${}$]".format(xlabel, xunit)
if yunit is not None: ylabel = "{} [${}$]".format(ylabel, yunit)
xlabel = xlabel.lstrip()
ylabel = ylabel.lstrip()
if xlabel: plt.xlabel(xlabel)
if ylabel: plt.ylabel(ylabel)
self.fig_handle = plt.gcf()
return
class NumpyLineArray(NumpyLine):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
result = get_one_from_column(self._db,
schema,
table,
self.meta.result.tables[1])
if result is not None and result[0] is not None:
self.data.result = result[0]
return
class NumpyLineColumn(NumpyLine):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
col_lists = get_all_from_columns(self._db,
schema,
table,
self.meta.result.tables[1:3])
line = zip(col_lists[0], col_lists[1])
# Filter out None in first column
line = [(x, y) for (x, y) in line if x is not None]
if line: self.data.result = line
return
class NumpyLineDict(NumpyLine):
"""Collection of NumpyLine structures on matching axes."""
def get_data(self, raw, meta_data):
valid_dict = {k: super(NumpyLineDict, self).get_data(v, meta_data) for
k, v in raw.items()}
return valid_dict
def get_value(self, data):
copy_dict = None
if data is not None:
copy_dict = {k: super(NumpyLineDict, self).get_value(v) for
k, v in data.items()}
return copy_dict
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
xl = pd.ExcelFile(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {}, {}".format('.xls',
'.xlsx'))
result = {}
for sheet_name in xl.sheet_names:
df = xl.parse(sheet_name)
if "x" in df.columns and "y" in df.columns:
data = np.c_[df.x, df.y]
else:
errStr = ("The specified file structure is not supported, "
"the columns' headers shuld be defined as: (x, y)")
raise ValueError(errStr)
# Sort on the zero axis
data = data[np.argsort(data[:, 0])]
result[sheet_name] = data
self.data.result = result
return
@staticmethod
def auto_file_output(self):
self.check_path()
data_dict = self.data.result
if ".xls" in self._path:
xl = pd.ExcelWriter(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are "
"{}, {}".format('.xls', '.xlsx'))
# Sort the keys
keys = data_dict.keys()
keys.sort()
for key in keys:
value = data_dict[key]
data = {"x": value[:, 0],
"y": value[:, 1]}
df = pd.DataFrame(data)
df.to_excel(xl, sheet_name=key, index=False)
xl.save()
return
@staticmethod
def get_valid_extensions(cls):
return [".xls", ".xlsx"]
@staticmethod
def auto_plot(self):
plt.figure()
kwargs = {}
if len(self.data.result) < 10:
kwargs["label"] = self.data.result.keys()
else:
kwargs["color"] = '0.5'
for line in self.data.result:
plt.plot(*zip(*self.data.result[line]), **kwargs)
if len(self.data.result) < 10:
plt.legend(bbox_to_anchor=(1.05, 1),
loc=2,
borderaxespad=0.)
xlabel = ''
if self.meta.result.labels is not None:
xlabel = self.meta.result.labels[0]
if self.meta.result.units is not None:
xlabel = "{} [${}$]".format(xlabel, self.meta.result.units[0])
plt.xlabel(xlabel)
plt.title(self.meta.result.title)
self.fig_handle = plt.gcf()
return
class NumpyLineDictArrayColumn(NumpyLineDict):
"""Collect a column with keys and a second column containing 2D arrays"""
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
col_lists = get_all_from_columns(self._db,
schema,
table,
self.meta.result.tables[1:3])
all_keys = col_lists[0]
all_lines = col_lists[1]
# Don't allow any None keys
result_dict = {key: line for key, line in zip(all_keys, all_lines)
if key is not None}
if result_dict: self.data.result = result_dict
return
class NumpyBar(NumpyLine):
'''2D Numpy array with the first dimension having value 2 for binned
data'''
@staticmethod
def _auto_plot(self):
return
class Histogram(Structure):
"""Structure to store histogram data. The input is a tuple of bin values
and the bins separators. The final structure is a dictionary with keys
"values" and "bins".
"""
def get_data(self, raw, meta_data):
if len(raw[1]) != len(raw[0]) + 1:
errStr = ("The bin separators must contain one more item than the "
"bin values. Given data contains {} values and {} "
"bin separators").format(len(raw[0]),
len(raw[1]))
raise ValueError(errStr)
histogram = {"values": raw[0],
"bins" : raw[1]}
return histogram
def get_value(self, data):
return deepcopy(data)
@staticmethod
def auto_file_input(self):
column_requirements = ("bin start", "bin end", "bin value")
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
if all([x in df.columns for x in column_requirements]):
data = np.c_[df["bin start"],df["bin end"], df["bin value"]]
else:
raise ValueError("The specified file structure is not supported, "
"the columns' headers shuld be defined as: "
"bin start, bin end, bin value)")
# Sort on the zero axis
data = data[np.argsort(data[:, 0])] # is this needed?
#check bin consistency
n_bins = data.shape[0]
for ib in range(1, n_bins):
if not data[ib-1,1] == data[ib,0]:
raise ValueError("The data format is incorrect. ",
"The relation\nbin_end(i) == bin_start(i+1)",
"\nis not satisfied ")
self.data.result = (
data[:,2],
np.unique(data[:,[0,1]].flatten())
)
return
@staticmethod
def auto_file_output(self):
self.check_path()
data_ = self.data.result
data = {"bin start": data_["bins"][:-1],
"bin end": data_["bins"][1:],
"bin value": data_["values"]}
df = pd.DataFrame(data)
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
@staticmethod
def auto_plot(self):
hist = self.data.result
bins = hist['bins']
values = hist['values']
nvalues = len(values)
width = np.ediff1d(bins)
x = bins[:nvalues]
plt.figure()
plt.bar(x, values, width, align = 'edge')
plt.title(self.meta.result.title)
self.fig_handle = plt.gcf()
return
class HistogramColumn(Histogram):
"""Assumes the first column contains bin values (Frequency) and next two
columns contain bin separators.
"""
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
col_lists = get_all_from_columns(self._db,
schema,
table,
self.meta.result.tables[1:4])
all_bin_values = col_lists[0]
all_bin_lowers = col_lists[1]
all_bin_uppers = col_lists[2]
if not all_bin_values or not all_bin_lowers or not all_bin_uppers:
return
# Don't allow None in the bin separators
if None in all_bin_lowers or None in all_bin_uppers: return
lowest_val = min(all_bin_lowers)
bin_separators = [lowest_val] + all_bin_uppers
result = (all_bin_values, bin_separators)
self.data.result = result
return
class HistogramDict(Histogram):
"""Dictionary containing histogram dictionaries for a related quantity.
The raw data should be a dictionary with values as
(bin values, bin separators)
"""
def get_data(self, raw, meta_data):
hist_dict = {k: super(HistogramDict, self).get_data(v, meta_data)
for k, v in raw.items()}
return hist_dict
@staticmethod
def auto_file_input(self):
self.check_path()
column_requirements = ("bin start", "bin end", "bin value")
if ".xls" in self._path:
xl = pd.ExcelFile(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {}, {}".format('.xls',
'.xlsx'))
result = {}
for sheet_name in xl.sheet_names:
df = xl.parse(sheet_name)
if all([x in df.columns for x in column_requirements]):
data = np.c_[df["bin start"], df["bin end"], df["bin value"]]
else:
errStr = ("The specified file structure is not supported "
"the columns' headers should be defined as: "
"(bin start, bin end, bin value)")
raise ValueError(errStr)
# Sort on the zero axis
data = data[np.argsort(data[:, 0])] # is this needed?
# Check bin consistency
n_bins = data.shape[0]
for ib in range(1, n_bins):
if not data[ib - 1, 1] == data[ib, 0]:
errStr = ("The data format is incorrect. ",
"The relation 'bin_end(i) == bin_start(i+1)' ",
"is not satisfied ")
raise ValueError(errStr)
result[sheet_name] = (data[:, 2],
np.unique(data[:, [0, 1]].flatten())
)
self.data.result = result
return
@staticmethod
def auto_file_output(self):
self.check_path()
data_dict = self.data.result
if ".xls" in self._path:
xl = pd.ExcelWriter(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are "
"{}, {}".format('.xls', '.xlsx'))
# Sort the keys
keys = data_dict.keys()
keys.sort()
for key in keys:
value = data_dict[key]
data = {"bin start": value["bins"][:-1],
"bin end": value["bins"][1:],
"bin value": value["values"]}
df = pd.DataFrame(data)
df.to_excel(xl, sheet_name=key, index=False)
xl.save()
return
@staticmethod
def get_valid_extensions(cls):
return [".xls", ".xlsx"]
@staticmethod
def auto_plot(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_prop_cycle(cycler('color', ['c', 'm', 'y', 'k']))
for i, hist in enumerate(self.data.result):
bins = self.data.result[hist]['bins']
values = self.data.result[hist]['values']
nvalues = len(values)
width = np.ediff1d(bins)
x = bins[:nvalues]
ax.bar(x,
values,
zs=len(self.data.result) - i,
zdir="y",
width=width,
align='edge',
alpha=0.6)
plt.yticks(range(len(self.data.result)),
self.data.result.keys(),
rotation=-15,
va='center',
ha='left')
plt.title(self.meta.result.title)
self.fig_handle = plt.gcf()
return
class CartesianData(NumpyND):
'''Array with single dimension of length 2 or 3.'''
def get_data(self, raw, meta_data):
data = super(CartesianData, self).get_data(raw, meta_data)
if not (data.shape == (3,) or data.shape == (2,)):
errStr = ("Data must be single dimension vector "
"of length 2 or 3. The shape of the "
"given data is {}").format(data.shape)
raise ValueError(errStr)
return data
def get_value(self, data):
result = None
if data is not None:
result = data.copy()
return result
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
if "x" in df.columns and "y" in df.columns:
if not len(df.x) > 1:
data = np.r_[df.x,df.y]
if "z" in df.columns: data = np.r_[data, df.z]
else:
raise ValueError("The CartesianData structure only support",
" x, y and z(optional) columns with lenght 1")
else:
raise ValueError("The specified file structure is not supported, "
"the columns' headers shuld be defined as: "
"x, y, z(optional))")
self.data.result = data
return
@staticmethod
def auto_file_output(self):
self.check_path()
if isinstance(self.data.result, np.ndarray):
data_ = self.data.result
else:
raise TypeError("Data type not understood: possible type for a "
"CartesianList subclass is: numpy.ndarray")
data = {"x": data_[0],
"y": data_[1]}
if data_.shape[0] == 3:
data["z"] = data_[2]
df = pd.DataFrame(data, index=[0])
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
class CartesianDataColumn(CartesianData):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
result = get_one_from_column(self._db,
schema,
table,
self.meta.result.tables[1])
if result is not None and result[0] is not None:
self.data.result = result[0]
return
class CartesianList(Numpy2D):
'''2D array with second dimension of length 2 or 3.'''
def get_data(self, raw, meta_data):
data = super(CartesianList, self).get_data(raw, meta_data)
if not (data.shape[1] == 3 or data.shape[1] == 2):
errStr = ("Second dimension must be of length 2 or 3. The length "
"for the given data is {}").format(data.shape[1])
raise ValueError(errStr)
return data
def get_value(self, data):
result = None
if data is not None:
result = data.copy()
return result
@staticmethod
def auto_plot(self):
x = []
y = []
for coords in self.data.result:
x.append(coords[0])
y.append(coords[1])
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1,aspect='equal')
ax1.plot(x,y,'k+', mew=2, markersize=10)
ax1.margins(0.1,0.1)
ax1.autoscale_view()
xlabel=''
ylabel=''
if self.meta.result.labels is not None:
xlabel = self.meta.result.labels[0]
ylabel = self.meta.result.labels[1]
if self.meta.result.units is not None:
xlabel = "{} [${}$]".format(xlabel, self.meta.result.units[0])
ylabel = "{} [${}$]".format(ylabel, self.meta.result.units[1])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(self.meta.result.title)
self.fig_handle = plt.gcf()
return
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
if "x" in df.columns and "y" in df.columns:
data = np.c_[df.x,df.y]
if "z" in df.columns: data = np.c_[data, df.z]
else:
raise ValueError("The specified file structure is not supported, "
"the columns' headers shuld be defined as: "
"x, y, z(optional))")
self.data.result = data
return
@staticmethod
def auto_file_output(self):
self.check_path()
if isinstance(self.data.result, np.ndarray):
data_ = self.data.result
else:
raise TypeError("Data type not understood: possible type for a "
"CartesianList subclass is: numpy.ndarray")
data = {"x": data_[:,0],
"y": data_[:,1]}
if data_.shape[1] == 3:
data["z"] = data_[:,2]
df = pd.DataFrame(data)
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
class CartesianListColumn(CartesianList):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
result = get_one_from_column(self._db,
schema,
table,
self.meta.result.tables[1])
if result is not None and result[0] is not None:
self.data.result = result[0]
return
class CartesianDict(CartesianData):
'''Dictionary of arrays with single dimension of length 2 or 3.'''
def get_data(self, raw, meta_data):
safe_data = {}
for key, value in raw.iteritems():
safe_value = super(CartesianDict, self).get_data(value, meta_data)
safe_data[key] = safe_value
return safe_data
def get_value(self, data):
new_dict = None
if data is not None:
new_dict = {k: super(CartesianDict, self).get_value(v)
for k, v in data.items()}
return new_dict
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
if all([el in df.columns for el in ["x", "y", "ID"]]):
data = np.c_[df.x,df.y]
if "z" in df.columns: data = np.c_[data, df.z]
else:
raise ValueError("The specified file structure is not supported, "
"the columns' headers shuld be defined as: "
"ID, x, y, z(optional))")
if len(np.unique(df.ID)) != data.shape[0]:
raise ValueError("The ID columns can not contains multiple",
" instance of the same data.")
data_ = {}
for k, v in zip(df.ID, data):
data_[k] = v
self.data.result = data_
return
@staticmethod
def auto_file_output(self):
self.check_path()
data_ = self.data.result
columns = ["ID", "x", "y"]
if data_.itervalues().next().shape[0] == 3:
columns += ["z"]
df = pd.DataFrame(columns=columns)
for k, v in data_.iteritems():
df2 = pd.DataFrame(v.reshape((1,len(columns)-1)),
columns=columns[1:])
df2["ID"] = k
df = df.append(df2,
ignore_index=True,
sort=False)
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
@staticmethod
def auto_plot(self):
x = []
y = []
n = []
for key, coords in self.data.result.iteritems():
x.append(coords[0])
y.append(coords[1])
n.append(key)
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1,aspect='equal')
ax1.plot(x,y,'k+', mew=2, markersize=10)
for i, txt in enumerate(n):
ax1.annotate(txt, (x[i],y[i]))
ax1.margins(0.1,0.1)
ax1.autoscale_view()
xlabel=''
ylabel=''
''' not working
if self.meta.result.labels is not None:
xlabel = self.meta.result.labels[0]
ylabel = self.meta.result.labels[1]
if self.meta.result.units is not None:
xlabel = "{} {}".format(xlabel, self.meta.result.units[0])
ylabel = "{} {}".format(ylabel, self.meta.result.units[1])
'''
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(self.meta.result.title)
self.fig_handle = plt.gcf()
return
class CartesianDictColumn(CartesianDict):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
col_lists = get_all_from_columns(self._db,
schema,
table,
self.meta.result.tables[1:3])
result_dict = {k: v for k, v in zip(col_lists[0], col_lists[1])
if k is not None and v is not None}
if result_dict: self.data.result = result_dict
return
class CartesianListDict(CartesianList):
'''Dictionary of 2D arrays with second dimension of length 2 or 3.'''
def get_data(self, raw, meta_data):
safe_data = {}
for key, value in raw.iteritems():
safe_value = super(CartesianListDict, self).get_data(value,
meta_data)
safe_data[key] = safe_value
return safe_data
def get_value(self, data):
new_dict = None
if data is not None:
new_dict = {k: super(CartesianListDict, self).get_value(v)
for k, v in data.items()}
return new_dict
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
if ("x" in df.columns and
"y" in df.columns and
"ID" in df.columns):
dim=2
if "z" in df.columns: dim=3
else:
raise ValueError("The specified file structure is not supported, "
"the columns' headers shuld be defined as: "
"x, y, z(optional))")
ks = np.unique(df.ID)
data = {}
for k in ks:
t = df[df["ID"]==k]
if dim==2:
data[k] = np.c_[t.x, t.y]
else:
data[k] = np.c_[t.x, t.y, t.z]
self.data.result = data
return
@staticmethod
def auto_file_output(self):
self.check_path()
data_ = self.data.result
columns = ["ID", "x", "y"]
if data_.itervalues().next().shape[1] == 3:
columns += ["z"]
df = pd.DataFrame(columns=columns)
for k, v in data_.iteritems():
df2 = pd.DataFrame(v, columns=columns[1:])
df2["ID"] = [k]*v.shape[0]
df = df.append(df2,
ignore_index=True,
sort=False)
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
@staticmethod
def _auto_plot(self):
return
# @staticmethod
# def auto_plot(self):
#
# # TODO: INSUFFICIENT IMPLENENTATION: ONLY PLOTS ONE POINT PER KEY
#
# x = []
# y = []
# n = []
#
# for key, coords in self.data.result.iteritems():
# x.append(coords[0][0])
# y.append(coords[0][1])
# n.append(key)
#
# fig = plt.figure()
# ax1 = fig.add_subplot(1,1,1,aspect='equal')
# ax1.plot(x,y,'k+', mew=2, markersize=10)
# for i, txt in enumerate(n):
# ax1.annotate(txt, (x[i],y[i]))
# ax1.margins(0.1,0.1)
# ax1.autoscale_view()
#
# xlabel=''
# ylabel=''
#
#
# ''' not working
# if self.meta.result.labels is not None:
# xlabel = self.meta.result.labels[0]
# ylabel = self.meta.result.labels[1]
#
# if self.meta.result.units is not None:
# xlabel = "{} {}".format(xlabel, self.meta.result.units[0])
# ylabel = "{} {}".format(ylabel, self.meta.result.units[1])
# '''
#
# plt.xlabel(xlabel)
# plt.ylabel(ylabel)
#
# plt.title(self.meta.result.title)
#
# self.fig_handle = plt.gcf()
#
# return
class CartesianListDictColumn(CartesianListDict):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
col_lists = get_all_from_columns(self._db,
schema,
table,
self.meta.result.tables[1:3])
result_dict = {k: v for k, v in zip(col_lists[0], col_lists[1])
if k is not None and v is not None}
if result_dict: self.data.result = result_dict
return
class SimpleData(Structure):
'''Simple single value data such as a bool, str, int or float'''
def get_data(self, raw, meta_data):
simple = self._check_types(raw, meta_data.types)
if meta_data.valid_values is not None:
if simple not in meta_data.valid_values:
valid_str = ", ".join(meta_data.valid_values)
errStr = ("Raw data '{}' does not match any valid value from: "
"{}").format(simple, valid_str)
raise ValueError(errStr)
return simple
def get_value(self, data):
return deepcopy(data)
def _check_types(self, raw, type_list):
if type_list is not None:
try:
simple_type = getattr(__builtin__, type_list[0])
simple = simple_type(raw)
except TypeError:
errStr = ("Raw data is of incorrect type. Should be "
"{}, but is {}.").format(type_list,
type(raw))
raise TypeError(errStr)
else:
errStr = "SimpleData structures require types meta data to be set"
raise ValueError(errStr)
return simple
class PathData(SimpleData):
"""A SimpleData subclass for retrieving path strings. Should be used as
a super class for file or directory paths"""
def _check_types(self, raw, type_list):
simple = super(PathData, self)._check_types(raw, ["str"])
return simple
class DirectoryData(PathData):
"""A PathData subclass for retrieving path strings to directories."""
class SimpleList(Structure):
'''Simple list of value data such as a bool, str, int or float'''
def get_data(self, raw, meta_data):
raw_list = raw
if meta_data.types is not None:
simple_list = []
for item in raw_list:
try:
simple_type = getattr(__builtin__,
meta_data._types[0])
simple_item = simple_type(item)
except TypeError:
errStr = ("Raw data is of incorrect type. Should be "
"{}, but is {}.").format(meta_data._types[0],
type(item))
raise TypeError(errStr)
simple_list.append(simple_item)
else:
errStr = "SimpleList structures require types meta data to be set"
raise ValueError(errStr)
if meta_data.valid_values is not None:
for simple_item in simple_list:
if simple_item not in meta_data.valid_values:
valid_str = ", ".join(meta_data.valid_values)
errStr = ("Raw data '{}' does not match any valid "
"value from: {}").format(simple_item,
valid_str)
raise ValueError(errStr)
return simple_list
def get_value(self, data):
return data[:]
@staticmethod
def auto_plot(self):
if self.meta.result.types[0] not in ["float", "int"]: return
plt.figure()
plt.plot(self.data.result)
plt.title(self.meta.result.title)
if self.meta.result.units is not None:
plt.ylabel("${}$".format(self.meta.result.units[0]))
self.fig_handle = plt.gcf()
return
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
if not "data" in df.columns:
raise TypeError("The file does not contain the correct header.",
"The data column needs to have the header: 'data'")
self.data.result = list(df.data)
return
@staticmethod
def auto_file_output(self):
self.check_path()
data = self.data.result
df = pd.DataFrame(data, columns=["data"])
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
class SimpleDict(Structure):
'''Dictionary containing a named variable as a key and a simple
single valued str, float, int, bool as the value.'''
def get_data(self, raw, meta_data):
raw_dict = raw
if meta_data.types is not None:
simple_type = getattr(__builtin__, meta_data._types[0])
typed_dict = deepcopy(raw_dict)
try:
for key, value in raw_dict.iteritems():
simple_item = simple_type(value)
typed_dict[key] = simple_item
except AttributeError:
errStr = ("Raw data may not be a dictionary. Type is actually "
"{}.").format(type(raw_dict))
raise AttributeError(errStr)
except TypeError:
errStr = ("Raw data is of incorrect type. Should be "
"{}, but is {}.").format(meta_data._types,
type(value))
raise TypeError(errStr)
else:
errStr = "SimpleDict structures require types meta data to be set"
raise ValueError(errStr)
# Test keys against valid values
if meta_data.valid_values is not None:
for key in typed_dict.iterkeys():
if key not in meta_data.valid_values:
valid_str = ", ".join(meta_data.valid_values)
errStr = ("Raw data key '{}' does not match any valid "
"value from: {}").format(key,
valid_str)
raise ValueError(errStr)
return typed_dict
def get_value(self, data):
return deepcopy(data)
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
if not ("data" in df.columns
and "ID" in df.columns):
raise ValueError("The file does not contain the correct header.",
"The data column needs to have the header: 'data'",
"and the key colum needs to have the header: 'ID'")
self.data.result = dict(zip(df.ID, df.data))
return
@staticmethod
def auto_file_output(self):
self.check_path()
dc = self.data.result
data = [[k,v] for k,v in dc.iteritems()]
df = pd.DataFrame(data, columns=["ID", "data"])
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
@staticmethod
def auto_plot(self):
if not self.meta.result.types[0] in ["int", "float"]: return
num_dict = self.data.result
labels = num_dict.keys()
labels.sort()
sizes = np.array([num_dict[x] for x in labels])
plt.figure()
plt.bar(range(len(sizes)),
sizes,
align='center')
plt.xticks(range(len(sizes)),
labels,
rotation=30,
ha="right")
if self.meta.result.units is not None:
plt.ylabel("[${}$]".format(self.meta.result.units[0]))
plt.title(self.meta.result.title)
plt.tight_layout()
self.fig_handle = plt.gcf()
return
class SimplePie(SimpleDict):
@staticmethod
def auto_plot(self):
if not self.meta.result.types[0] in ["int", "float"]: return
num_dict = self.data.result
labels = num_dict.keys()
sizes = np.array(num_dict.values())
# Don't allow negative values
if (sizes < 0).any(): return
plt.figure()
_, _, autotexts = plt.pie(sizes,
labels=labels,
autopct='%1.1f%%',
shadow=True,
startangle=90)
for autotext in autotexts:
autotext.set_color('white')
autotext.set_path_effects([PathEffects.withStroke(linewidth=2,
foreground='k')])
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
plt.title(self.meta.result.title, y=1.08)
plt.tight_layout()
self.fig_handle = plt.gcf()
return
class SimpleDataColumn(SimpleData):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
result = get_one_from_column(self._db,
schema,
table,
self.meta.result.tables[1])
if result is not None and result[0] is not None:
self.data.result = result[0]
return
class SimpleDataForeignColumn(SimpleData):
"""Table meta data keys are as follows:
1. The primary table with the foreign key column
2. The secondary table to retrieve the data from
3. The column to extract the value of the foreign key in the primary table
4. The column to filter the key value against in the secondary table
5. The column to retrieve as the result in the secondary table.
"""
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
table_two_id = get_one_from_column(self._db,
schema,
table,
self.meta.result.tables[2])
if table_two_id is None or table_two_id[0] is None: return
schema, table = self.meta.result.tables[1].split(".")
result = filter_one_from_column(self._db,
schema,
table,
self.meta.result.tables[4],
self.meta.result.tables[3],
table_two_id[0])
if result is not None and result[0] is not None:
self.data.result = result[0]
return
class DirectoryDataColumn(DirectoryData):
@staticmethod
def auto_db(self):
SimpleDataColumn.auto_db(self)
return
class SimpleListColumn(SimpleList):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
col_lists = get_all_from_columns(self._db,
schema,
table,
[self.meta.result.tables[1]])
result = col_lists[0]
if result or set(result) != set([None]):
self.data.result = result
return
@staticmethod
def _auto_file_input(self):
return
@staticmethod
def _auto_file_output(self):
return
class SimpleDictColumn(SimpleDict):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
col_lists = get_all_from_columns(self._db,
schema,
table,
self.meta.result.tables[1:3])
result = {k: v for k, v in zip(col_lists[0], col_lists[1])
if k is not None and v is not None}
if result: self.data.result = result
return
class DateTimeData(Structure):
'''A datetime.dateime data object'''
def get_data(self, raw, meta_data):
if not isinstance(raw, datetime):
errStr = ("DateTimeData requires a datetime.datetime object as "
"raw data.")
raise TypeError(errStr)
return raw
def get_value(self, data):
return data
class DateTimeDict(DateTimeData):
'''Dictionary containing a named variable as a key and a datatime as the
value.'''
def get_data(self, raw, meta_data):
raw_dict = raw
checked_dict = {}
try:
for key, value in raw_dict.iteritems():
date_item = super(DateTimeDict, self).get_data(value,
meta_data)
checked_dict[key] = date_item
except AttributeError:
errStr = ("Raw data may not be a dictionary. Type is actually "
"{}.").format(type(raw_dict))
raise AttributeError(errStr)
except TypeError:
errStr = ("Raw data is of incorrect type. Should be "
"datetime.datetime, but is {}.").format(type(value))
raise TypeError(errStr)
return checked_dict
def get_value(self, data):
return deepcopy(data)
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
if not ("data" in df.columns
and "ID" in df.columns):
raise ValueError("The file does not contain the correct ",
"header. The data column needs to have the ",
"header: 'data' and the key colum needs to have "
"the header: 'ID'")
result = {}
for key, data in zip(df.ID, df.data):
ts = pd.to_datetime(data)
dt = ts.to_pydatetime()
result[key] = dt
if not result: result = None
self.data.result = result
return
@staticmethod
def auto_file_output(self):
self.check_path()
dc = self.data.result
data = [[k,v] for k,v in dc.iteritems()]
df = pd.DataFrame(data, columns=["ID", "data"])
if ".xls" in self._path:
writer = pd.ExcelWriter(self._path, engine='xlsxwriter')
df.to_excel(writer, index=False)
writer.save()
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
class TriStateData(Structure):
'''Data that can be "true", "false" or "unknown". Must be provided as
a string'''
def get_data(self, raw, meta_data):
if isinstance(raw, basestring):
if raw in ["true", "false", "unknown"]:
return raw
errStr = ('Given raw value is incorrectly formatted. It must be '
'a string with value "true", "false" or "unknown". '
'Given was: {}').format(raw)
raise ValueError(errStr)
def get_value(self, data):
return deepcopy(data)
class PointData(Structure):
'''A shapely Point variable. These are expected to be georeferenced.'''
def get_data(self, raw, meta_data):
if isinstance (raw, Point):
point = raw
else:
# Don't allow misshapen data
if not 1 < len(raw) < 4:
errStr = ("Raw data must contain 2 or 3 coordinates. "
"Given data has {}").format(len(raw))
raise ValueError(errStr)
point = Point(*[float(x) for x in raw])
return point
def get_value(self, data):
result = None
if data is not None:
result = Point(data)
return result
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
else:
raise TypeError("The specified file format is not supported. ",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
if "x" in df.columns and "y" in df.columns:
data = np.c_[df.x,df.y]
if "z" in df.columns: data = np.c_[data, df.z]
else:
raise ValueError("The specified file structure is not supported, "
"the columns' headers shuld be defined as: "
"x, y, z(optional))")
result = None
if len(data) == 1:
result = Point(data[0])
else:
result = [Point(coord) for coord in data]
self.data.result = result
return
@staticmethod
def auto_file_output(self):
self.check_path()
if isinstance(self.data.result, list):
data_ = np.array([np.array(el) for el in self.data.result])
elif isinstance(self.data.result, dict):
data_ = np.array([np.array(el) for k, el in
self.data.result.items()])
elif isinstance(self.data.result, Point):
data_ = np.array(self.data.result).reshape((1,-1))
else:
raise TypeError("Data type not understood: possible type for a "
"PointData subclass are: Point, list, dictionary")
data = {"x": data_[:,0],
"y": data_[:,1]}
if data_.shape[1] == 3:
data["z"] = data_[:,2]
df = pd.DataFrame(data)
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
@staticmethod
def auto_plot(self):
x = self.data.result.x
y = self.data.result.y
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1,aspect='equal')
ax1.plot(x,y,'k+', mew=2, markersize=10)
ax1.margins(0.1,0.1)
ax1.autoscale_view()
xlabel=''
ylabel=''
if self.meta.result.labels is not None:
xlabel = self.meta.result.labels[0]
ylabel = self.meta.result.labels[1]
if self.meta.result.units is not None:
xlabel = "{} [${}$]".format(xlabel, self.meta.result.units[0])
ylabel = "{} [${}$]".format(ylabel, self.meta.result.units[1])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.ticklabel_format(useOffset=False)
plt.title(self.meta.result.title)
self.fig_handle = plt.gcf()
return
class PointList(PointData):
'''A list containing shapely Point variables as values'''
def get_data(self, raw, meta_data):
point_list = [super(PointList, self).get_data(xy, meta_data)
for xy in raw]
return point_list
def get_value(self, data):
new_point_list = None
if data is not None:
new_point_list = [
super(PointList, self).get_value(p) for p in data]
return new_point_list
@staticmethod
def auto_plot(self):
x = []
y = []
for coords in self.data.result:
x.append(coords.x)
y.append(coords.y)
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1,aspect='equal')
ax1.plot(x,y,'k+', mew=2, markersize=10)
ax1.margins(0.1,0.1)
ax1.autoscale_view()
xlabel=''
ylabel=''
if self.meta.result.labels is not None:
xlabel = self.meta.result.labels[0]
ylabel = self.meta.result.labels[1]
if self.meta.result.units is not None:
xlabel = "{} [${}$]".format(xlabel, self.meta.result.units[0])
ylabel = "{} [${}$]".format(ylabel, self.meta.result.units[1])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.ticklabel_format(useOffset=False)
plt.title(self.meta.result.title)
self.fig_handle = plt.gcf()
return
class PointDict(PointData):
'''A dictionary containing shapely Point variables as values'''
def get_data(self, raw, meta_data):
points_dict = {k: super(PointDict, self).get_data(v, meta_data)
for k, v in raw.items()}
return points_dict
def get_value(self, data):
new_points_dict = None
if data is not None:
new_points_dict = {k: super(PointDict, self).get_value(v)
for k, v in data.items()}
return new_points_dict
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
else:
raise TypeError("The specified file format is not supported. ",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
if all([el in df.columns for el in ["x", "y", "ID"]]):
data = np.c_[df.x,df.y]
if "z" in df.columns: data = np.c_[data, df.z]
else:
raise ValueError("The specified file structure is not supported, "
"the columns' headers shuld be defined as: "
"ID, x, y, z(optional))")
self.data.result = dict(zip(df.ID, [Point(xyz) for xyz in data]))
return
@staticmethod
def auto_file_output(self):
self.check_path()
if isinstance(self.data.result, dict):
data_ = np.array([[k]+list(np.array(el)) for k, el in
self.data.result.items()])
else:
raise TypeError("Data type not understood: possible type for a "
"PointData subclass are: Point, list, dictionary")
data = {"x": data_[:,1],
"y": data_[:,2],
"ID": data_[:,0]}
if data_.shape[1]-1 == 3:
data["z"] = data_[:,3]
df = pd.DataFrame(data)
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
return
@staticmethod
def auto_plot(self):
x = []
y = []
for coords in self.data.result.itervalues():
x.append(coords.x)
y.append(coords.y)
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1, aspect='equal')
ax1.plot(x, y, 'k+', mew=2, markersize=10)
ax1.margins(0.1, 0.1)
ax1.autoscale_view()
for key, point in self.data.result.iteritems():
coords = list(point.coords)[0]
ax1.annotate(str(key),
xy=coords[:2],
xytext=(0, 10),
xycoords='data',
textcoords='offset pixels',
horizontalalignment='center',
weight="bold",
size='large')
xlabel = ''
ylabel = ''
if self.meta.result.labels is not None:
xlabel = self.meta.result.labels[0]
ylabel = self.meta.result.labels[1]
if self.meta.result.units is not None:
xlabel = "{} [${}$]".format(xlabel, self.meta.result.units[0])
ylabel = "{} [${}$]".format(ylabel, self.meta.result.units[1])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.ticklabel_format(useOffset=False)
plt.title(self.meta.result.title)
'''
xlabel = self.meta.result.labels[0]
ylabel = self.meta.result.labels[1]
if self.meta.result.units[0] is not None:
xlabel = "{} {}".format(xlabel, self.meta.result.units[0])
plt.xlabel(xlabel)
if self.meta.result.units[1] is not None:
xlabel = "{} {}".format(ylabel, self.meta.result.units[1])
plt.ylabel(ylabel)
'''
self.fig_handle = plt.gcf()
return
class PointDataColumn(PointData):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
result = get_one_from_column(self._db,
schema,
table,
self.meta.result.tables[1])
if result is not None and result[0] is not None:
self.data.result = to_shape(result[0])
return
class PointDictColumn(PointDict):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
col_lists = get_all_from_columns(self._db,
schema,
table,
self.meta.result.tables[1:3])
filter_dict = {k: v for k, v in zip(col_lists[0], col_lists[1])
if k is not None and v is not None}
point_dict = {key: to_shape(wkb_point)
for key, wkb_point in filter_dict.items()}
if point_dict: self.data.result = point_dict
return
class PolygonData(Structure):
def get_data(self, raw, meta_data):
if isinstance (raw, Polygon):
ring = raw
else:
np_raw = np.array(raw)
if len(np_raw.shape) != 2:
errStr = ("Raw data must have exactly 2 dimensions. Given "
"data has {}").format(len(np_raw))
raise ValueError(errStr)
# Don't allow misshapen data
if not 1 < np_raw.shape[1] < 4:
errStr = ("Raw data must contain 2 or 3 dimensional "
"coordinates. Given data has {}").format(len(raw))
raise ValueError(errStr)
ring = Polygon(np_raw)
return ring
def get_value(self, data):
result = None
if data is not None:
result = Polygon(data)
return result
@staticmethod
def auto_plot(self):
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1,aspect='equal')
patch = PolygonPatch(self.data.result,
fc=BLUE,
ec=BLUE,
fill=False,
linewidth=2)
ax1.add_patch(patch)
coords = list(self.data.result.exterior.coords)
for i, xy in enumerate(coords[:-1]):
ax1.annotate(str(xy[:2]),
xy=xy[:2],
horizontalalignment='center',
weight="bold",
size='large')
ax1.margins(0.1,0.1)
ax1.autoscale_view()
xlabel = 'UTM x [$m$]'
ylabel = 'UTM y [$m$]'
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.ticklabel_format(useOffset=False)
plt.xticks(rotation=30, ha='right')
plt.title(self.meta.result.title)
plt.tight_layout()
self.fig_handle = plt.gcf()
return
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
else:
raise TypeError("The specified file format is not supported. ",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
if len(df) < 3:
raise ValueError("PolygonError: A LinearRing must have ",
"at least 3 coordinate tuples")
if "x" in df.columns and "y" in df.columns and "z" in df.columns:
data = Polygon(np.c_[df.x, df.y, df.z])
elif "x" in df.columns and "y" in df.columns:
data = Polygon(np.c_[df.x, df.y])
else:
raise ValueError("The specified file structure is not supported, "
"the columns' headers should be defined as: "
"x, y, z(optional)")
self.data.result = data
return
@staticmethod
def auto_file_output(self):
self.check_path()
poly = self.data.result
data = []
if isinstance(poly, Polygon):
data = np.array(poly.exterior.coords[:])[:-1]
else:
raise TypeError("The result does not contain valid",
" Polygon object.")
if data.shape[1] == 2:
columns = ["x", "y"]
elif data.shape[1] == 3:
columns = ["x", "y", "z"]
else:
errStr = "Look, I'm a doctor, not an escalator."
raise SystemError(errStr)
df = pd.DataFrame(data, columns=columns)
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
else:
raise TypeError("The specified file format is not supported.",
"Supported format are {},{},{}".format('.csv',
'.xls',
'.xlsx'))
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
class PolygonDataColumn(PolygonData):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
result = get_one_from_column(self._db,
schema,
table,
self.meta.result.tables[1])
if result is not None and result[0] is not None:
self.data.result = to_shape(result[0])
return
class PolygonList(PolygonData):
def get_data(self, raw, meta_data):
ring_list = [super(PolygonList, self).get_data(x, meta_data)
for x in raw]
return ring_list
def get_value(self, data):
ring_list = None
if data is not None:
ring_list = [super(PolygonList, self).get_value(x) for x in data]
return ring_list
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
if ("ID" in df.columns and
"x" in df.columns and
"y" in df.columns and
"z" in df.columns):
ks = np.unique(df.ID)
data = []
for k in ks:
t = df[df["ID"]==k]
data.append(Polygon(np.c_[t.x, t.y, t.z]))
elif "ID" in df.columns and "x" in df.columns and "y" in df.columns:
ks = np.unique(df.ID)
data = []
for k in ks:
t = df[df["ID"]==k]
data.append(Polygon(np.c_[t.x, t.y]))
else:
raise ValueError("The specified file structure is not supported, "
"the columns' headers should be defined as: "
"ID, x, y, z(optional)")
self.data.result = data
return
@staticmethod
def auto_file_output(self):
self.check_path()
polys = self.data.result
data = []
for ip, poly in enumerate(polys):
if isinstance(poly, Polygon):
data.append(("polygon-{}".format(ip),
np.array(poly.exterior.coords[:])[:-1]))
else:
raise TypeError("The result list does not contain valid",
" Polygon objects.")
if data[0][1].shape[1] == 2:
columns = ["ID", "x", "y"]
elif data[0][1].shape[1] == 3:
columns = ["ID", "x", "y", "z"]
else:
errStr = "I'm a doctor, not a bricklayer."
raise SystemError(errStr)
df = pd.DataFrame(columns=columns)
for k, v in data:
df2 = pd.DataFrame(v, columns=columns[1:])
df2["ID"] = [k]*v.shape[0]
df = df.append(df2,
ignore_index=True,
sort=False)
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
@staticmethod
def auto_plot(self):
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1,aspect='equal')
for polygon in self.data.result:
patch = PolygonPatch(polygon,
fc=BLUE,
ec=BLUE,
fill=False,
linewidth=2)
ax1.add_patch(patch)
ax1.margins(0.1,0.1)
ax1.autoscale_view()
xlabel = 'UTM x [$m$]'
ylabel = 'UTM y [$m$]'
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.ticklabel_format(useOffset=False)
plt.xticks(rotation=30, ha='right')
plt.title(self.meta.result.title)
plt.tight_layout()
self.fig_handle = plt.gcf()
return
class PolygonListColumn(PolygonList):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
col_lists = get_all_from_columns(self._db,
schema,
table,
[self.meta.result.tables[1]])
all_entries = col_lists[0]
if all_entries and set(all_entries) != set([None]):
self.data.result = [to_shape(wkb_poly)
for wkb_poly in all_entries]
return
class PolygonDict(PolygonData):
def get_data(self, raw, meta_data):
ring_dict = {k: super(PolygonDict, self).get_data(v, meta_data)
for k, v in raw.items()}
return ring_dict
def get_value(self, data):
ring_dict = None
if data is not None:
ring_dict = {k: super(PolygonDict, self).get_value(v)
for k, v in data.items()}
return ring_dict
@staticmethod
def auto_file_input(self):
self.check_path()
if ".xls" in self._path:
df = pd.read_excel(self._path)
elif ".csv" in self._path:
df = pd.read_csv(self._path)
data = {}
if ("ID" in df.columns and
"x" in df.columns and
"y" in df.columns and
"z" in df.columns):
ks = np.unique(df.ID)
for k in ks:
t = df[df["ID"] == k]
data[k] = Polygon(np.c_[t.x, t.y, t.z])
elif "ID" in df.columns and "x" in df.columns and "y" in df.columns:
ks = np.unique(df.ID)
for k in ks:
t = df[df["ID"] == k]
data[k] = Polygon(np.c_[t.x, t.y])
else:
raise ValueError("The specified file structure is not supported, "
"the columns' headers should be defined as: "
"ID, x, y, z(optional)")
self.data.result = data
return
@staticmethod
def auto_file_output(self):
self.check_path()
polys = self.data.result
data = []
for name, poly in polys.iteritems():
if isinstance(poly, Polygon):
data.append((name, np.array(poly.exterior.coords[:])[:-1]))
else:
raise TypeError("The result list does not contain valid",
" Polygon objects.")
if data[0][1].shape[1] == 2:
columns = ["ID", "x", "y"]
elif data[0][1].shape[1] == 3:
columns = ["ID", "x", "y", "z"]
else:
errStr = "I'm a doctor, not a coal miner."
raise SystemError(errStr)
df = pd.DataFrame(columns=columns)
for k, v in data:
df2 = pd.DataFrame(v, columns=columns[1:])
df2["ID"] = [k]*v.shape[0]
df = df.append(df2,
ignore_index=True,
sort=False)
if ".xls" in self._path:
df.to_excel(self._path, index=False)
elif ".csv" in self._path:
df.to_csv(self._path, index=False)
return
@staticmethod
def get_valid_extensions(cls):
return [".csv", ".xls", ".xlsx"]
@staticmethod
def auto_plot(self):
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1, aspect='equal')
for key, polygon in self.data.result.iteritems():
patch = PolygonPatch(polygon,
fc=BLUE,
ec=BLUE,
fill=False,
linewidth=2)
ax1.add_patch(patch)
centroid = np.array(polygon.centroid)
ax1.annotate(str(key),
xy=centroid[:2],
xytext=(0, 0),
xycoords='data',
textcoords='offset pixels',
horizontalalignment='center',
weight="bold",
size='large')
ax1.margins(0.1, 0.1)
ax1.autoscale_view()
xlabel = 'UTM x [$m$]'
ylabel = 'UTM y [$m$]'
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.ticklabel_format(useOffset=False)
plt.xticks(rotation=30, ha='right')
plt.title(self.meta.result.title)
plt.tight_layout()
self.fig_handle = plt.gcf()
return
class PolygonDictColumn(PolygonDict):
@staticmethod
def auto_db(self):
if self.meta.result.tables is None:
errStr = ("Tables not defined for variable "
"'{}'.").format(self.meta.result.identifier)
raise ValueError(errStr)
schema, table = self.meta.result.tables[0].split(".")
col_lists = get_all_from_columns(self._db,
schema,
table,
self.meta.result.tables[1:3])
filter_dict = {k: v for k, v in zip(col_lists[0], col_lists[1])
if k is not None and v is not None}
poly_dict = {key: to_shape(wkb_poly)
for key, wkb_poly in filter_dict.items()}
if poly_dict: self.data.result = poly_dict
return
class XGridND(Structure):
'''xrarray DataArray object. See xarray.pydata.org
Note: This class should not be used directly, subclass and set get_n_dims
to an integer value.'''
def get_n_dims(self):
return None
def get_data(self, raw, meta_data):
"""
Add raw data.
Args:
data (dict): dictionary with following keys:
values (numpy.ndarray): The data to store.
coords (list): List of arrays or lists with the coordinates for
each dimension. They are ordered by the dimensions of the
array.
Note:
The "labels" key in the DDS files is used to provide dimension
and data dimension names. The number of labels should match
the number of dimensions in the data
The "units" key in the DDS files is used to add units attributes
to the dimensions and the data. The first n entries matches
the dimesnions and the last matches the data.
"""
coords = raw["coords"]
n_dims = self.get_n_dims()
if meta_data.labels is None:
errStr = ("Labels metadata must be set for {} data "
"structures").format(self.__class__.__name__)
raise ValueError(errStr)
dims = meta_data.labels[:]
if len(dims) != n_dims:
errStr = ("Given number of labels is incorrect. The data has {} "
"dimensions but {} labels are given").format(len(dims),
n_dims)
raise ValueError(errStr)
if meta_data.units is not None:
units = meta_data.units[:]
else:
units = None
coords, attrs = self._get_coords_attrs(dims,
coords,
units)
data_array = xr.DataArray(raw["values"],
coords=coords,
attrs=attrs)
return data_array
def _get_coords_attrs(self, dims, coords, units):
if len(dims) != len(coords):
errStr = ("The number of coordinate lists must match the number "
"of labels.")
raise ValueError(errStr)
if units is not None and len(units) != len(dims) + 1:
errStr = ("The number of units must match the number "
"of labels plus one.")
raise ValueError(errStr)
attrs = None
coord_tuples = []
for dim, coord_list in zip(dims, coords):
coord_item = coord_tuples.append((dim, coord_list))
if units is not None:
data_unit = units.pop()
if data_unit is not None:
attrs = {'units': data_unit}
new_tuples = []
for coord_item, unit in zip(coord_tuples, units):
if unit is not None:
coord_attrs = {'units': unit}
new_coord_item = (coord_item[0],
coord_item[1],
coord_attrs)
else:
new_coord_item = coord_item
new_tuples.append(new_coord_item)
coord_tuples = new_tuples
return coord_tuples, attrs
def get_value(self, data):
result = None
if data is not None:
result = data.copy(deep=True)
return result
@staticmethod
def auto_file_input(self):
self.check_path(True)
dataset = xr.open_dataset(self._path)
coord_list = []
for coord in self.meta.result.labels:
coord_list.append(dataset.coords[coord])
raw_dict = {"values": dataset["data"].values,
"coords": coord_list}
self.data.result = raw_dict
return
@staticmethod
def auto_file_output(self):
self.check_path()
data = self.data.result
data = data.to_dataset(name="data")
data.to_netcdf(self._path, format="NETCDF4")
return
@staticmethod
def get_valid_extensions(cls):
return [".nc"]
class XGrid2D(XGridND):
'''xrarray DataArray object with 2 dimensions and arbitrary number of
values. See xarray.pydata.org'''
def get_n_dims(self):
return 2
@staticmethod
def auto_plot(self):
xcoord = self.data.result.coords[self.meta.result.labels[0]]
ycoord = self.data.result.coords[self.meta.result.labels[1]]
if xcoord.values.dtype.kind in {'U', 'S'}:
xuniques = xcoord.values
x = range(len(xuniques))
else:
xuniques, x = np.unique(xcoord, return_inverse=True)
xuniques = ['{0:.8g}'.format(tick) for tick in xuniques]
if ycoord.values.dtype.kind in {'U', 'S'}:
yuniques = ycoord.values
y = range(len(yuniques))
else:
yuniques, y = np.unique(ycoord, return_inverse=True)
yuniques = ['{0:.8g}'.format(tick) for tick in yuniques]
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1, aspect='equal')
plt.contourf(x, y, self.data.result.T)
clb = plt.colorbar()
xlabel = self.meta.result.labels[0]
ylabel = self.meta.result.labels[1]
if self.meta.result.units is not None:
if self.meta.result.units[0] is not None:
xlabel = "{} [${}$]".format(xlabel, self.meta.result.units[0])
if self.meta.result.units[1] is not None:
ylabel = "{} [${}$]".format(ylabel, self.meta.result.units[1])
if self.meta.result.units[2] is not None:
clb.set_label("${}$".format(self.meta.result.units[2]))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
ax1.set_xticklabels(xuniques)
ax1.set_yticklabels(yuniques)
plt.title(self.meta.result.title)
self.fig_handle = plt.gcf()
return
class XGrid3D(XGridND):
'''xrarray DataArray object with 3 dimensions and arbitrary number of
values. See xarray.pydata.org'''
def get_n_dims(self):
return 3
class XSetND(XGridND):
'''xrarray Dataset object with n_dims dimensions and arbitrary number of
values. See xarray.pydata.org
Note: This class should not be used directly, subclass and set get_n_dims
to an integer value.'''
def get_data(self, raw, meta_data):
"""
Add raw data.
Args:
data (dict): dictionary with following keys:
values (dict): keys: dataset name
values (numpy.ndarray): The data to store.
coords (list): List of arrays or lists with the coordinates for
each dimension. They are ordered by the dimensions of the
array.
Note:
The "labels" key in the DDS files is used to provide dimension
and data dimension names. The first n labels will be used
to define the dimension names, the remaining should match
to the name of each data item stored in the set.
The "units" key in the DDS files is used to add units attributes
to the dimensions and the data. The first n entries matches
the dimesnions and the last matches the data.
"""
n_dims = self.get_n_dims()
if meta_data.labels is None:
errStr = ("Labels metadata must be set for {} data "
"structures").format(self.__class__.__name__)
raise ValueError(errStr)
if len(meta_data.labels) < n_dims:
errStr = "Insufficent entries in labels metadata to set dimensions"
raise ValueError(errStr)
dims = meta_data.labels[:n_dims]
set_names = meta_data.labels[n_dims:]
if not set_names:
errStr = "No data labels are set in labels metadata."
raise ValueError(errStr)
missing_values = set(set_names) - set(raw["values"])
if missing_values:
errStr = "Data labels '{}' are missing from raw data".format(
", ".join(missing_values))
raise ValueError(errStr)
if meta_data.units is not None:
all_units = meta_data.units[:]
coord_units = meta_data.units[:n_dims]
else:
all_units = None
coord_units = None
set_dict = {}
for k in raw["values"]:
if k not in set_names:
errStr = ("Data label '{}' is not valid for this structure "
"defintion. Must be one of: "
"{}").format(k, ", ".join(set_names))
raise ValueError(errStr)
if all_units is not None:
unit_idx = set_names.index(k)
local_units = coord_units[:]
local_units.append(all_units[unit_idx + n_dims])
else:
local_units = None
coords, attrs = self._get_coords_attrs(dims,
raw["coords"],
local_units)
data_array = xr.DataArray(raw["values"][k],
coords=coords,
attrs=attrs)
set_dict[k] = data_array
data_set = xr.Dataset(set_dict)
return data_set
@staticmethod
def auto_file_input(self):
self.check_path(True)
dataset = xr.open_dataset(self._path)
values_dict = {}
for key, dataarray in dataset.data_vars.iteritems():
values_dict[key] = dataarray.values
n_vars = len(values_dict)
coord_list = []
for coord in self.meta.result.labels[:-n_vars]:
coord_list.append(dataset.coords[coord])
raw_dict = {"values": values_dict,
"coords": coord_list}
self.data.result = raw_dict
return
@staticmethod
def auto_file_output(self):
self.check_path()
data = self.data.result
data.to_netcdf(self._path, format="NETCDF4")
return
@staticmethod
def get_valid_extensions(cls):
return [".nc"]
class XSet2D(XSetND):
'''xrarray Dataset object with 2 dimensions and arbitrary number of
values. See xarray.pydata.org'''
def get_n_dims(self):
return 2
class XSet3D(XSetND):
'''xrarray Dataset object with 3 dimensions and arbitrary number of
values. See xarray.pydata.org'''
def get_n_dims(self):
return 3
class Strata(XSet3D):
'''xrarray Dataset object with 3 dimensions and arbitrary number of
values. This is a bespoke class for sediment layer retrieval.'''
@staticmethod
def auto_plot(self):
bathy = self.data.result["depth"].sel(layer="layer 1")
x = bathy.coords[self.meta.result.labels[0]]
y = bathy.coords[self.meta.result.labels[1]]
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1,aspect='equal')
plt.contourf(x, y, bathy.T)
clb = plt.colorbar()
xlabel = "UTM x [$m$]"
ylabel = "UTM y [$m$]"
zlabel = "Depth [$m$]"
plt.xlabel(xlabel)
plt.ylabel(ylabel)
clb.set_label(zlabel)
plt.ticklabel_format(useOffset=False)
plt.xticks(rotation=30, ha='right')
plt.title(self.meta.result.title)
plt.tight_layout()
self.fig_handle = plt.gcf()
return
@staticmethod
def auto_file_input(self):
self.check_path(True)
strata = xr.open_dataset(self._path)
sediment_path = self._path.replace("depth", "sediment")
sediment_data = xr.open_dataset(sediment_path)
strata["sediment"] = sediment_data["sediment"]
values_dict = {"depth": strata["depth"].values,
"sediment": strata["sediment"].values}
coord_list = [strata.coords[self.meta.result.labels[0]],
strata.coords[self.meta.result.labels[1]],
strata.coords["layer"]]
raw_dict = {"values": values_dict,
"coords": coord_list}
self.data.result = raw_dict
return
@staticmethod
def auto_file_output(self):
self.check_path()
root_path = os.path.splitext(self._path)[0]
depth_data = self.data.result["depth"]
depth_set = depth_data.to_dataset()
data_path = "{}_depth.nc".format(root_path)
depth_set.to_netcdf(data_path, format="NETCDF4")
sediment_data = self.data.result["sediment"]
sediment_data = sediment_data.astype(str)
sediment_set = sediment_data.to_dataset()
data_path = "{}_sediment.nc".format(root_path)
sediment_set.to_netcdf(data_path, format="NETCDF4")
return
class Network(Structure):
'''Structure which describes the networked elements of the electrical and
moorings / foundations outputs
Note:
A the highest level a dictionary is expected with two keys: topology
and nodes. The topology key contains the connectivity of the network
and the nodes key contains labels for the nodes, in particularly the
quantity of components used at each node and a unique marker that can
be used to assosiate external data to the node labels.'''
def get_data(self, raw, meta_data):
if set(raw.keys()) != set(["topology", "nodes"]):
errStr = ("The two top level keys 'topology' and 'nodes' must "
"be set for a valid network.")
raise KeyError(errStr)
# Need to test more of the network details here. Keys could be checked
# along with counting the number of items vs the number of markers.
return raw
def get_value(self, data):
return deepcopy(data)
@staticmethod
def auto_file_input(self):
self.check_path(True)
with open(self._path, 'r') as stream:
data = yaml.load(stream)
self.data.result = data
return
@staticmethod
def auto_file_output(self):
self.check_path()
network_dict = self.data.result
with open(self._path, 'w') as stream:
yaml.dump(network_dict, stream, default_flow_style=False)
return
@staticmethod
def get_valid_extensions(cls):
return [".yaml"]
class EIADict(Structure):
'''Structure for storing environmental recommendations'''
def get_data(self, raw, meta_data):
return raw
def get_value(self, data):
return deepcopy(data)
@staticmethod
def auto_file_output(self):
SimpleDict.auto_file_output(self)
return
@staticmethod
def get_valid_extensions(cls):
return SimpleDict.get_valid_extensions(cls)
class RecommendationDict(Structure):
'''Structure for storing environmental recommendations'''
def get_data(self, raw, meta_data):
return raw
def get_value(self, data):
return deepcopy(data)
@staticmethod
def auto_file_output(self):
SimpleDict.auto_file_output(self)
return
@staticmethod
def get_valid_extensions(cls):
return SimpleDict.get_valid_extensions(cls)
| gpl-3.0 |
chusine/dlnd | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
Barmaley-exe/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
ycaihua/scikit-learn | sklearn/__init__.py | 5 | 2955 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
#
__version__ = '0.16.dev'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search', 'hmm',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
samzhang111/scikit-learn | sklearn/decomposition/pca.py | 9 | 23163 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 2 by default.
.. versionchanged:: 0.18
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
k is not set then all components are stored and the sum of explained
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=2, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=2,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
Luftzig/sound-images | soundimages.py | 1 | 3365 | """
SoundImages: play sample taken from file
Author: Yoav Luft, yoav.luft _at_ gmail.com
"""
import Nsound as nsound
# import scikits.audiolab as audiolab
import numpy as np
import scipy.ndimage as ndimage
import threading
import matplotlib.pyplot as plt
import scipy.misc
from Queue import Queue
OUTPUT_SAMPLE_RATE = 44100.0
def subsample(img, c0, c1, num=441, tile=100):
x, y = np.linspace(c0[0], c1[0], num), np.linspace(c0[1], c1[1], num)
return np.tile(ndimage.map_coordinates(img, np.vstack((x, y))), tile)
def play_sample(img, c0, c1, num=441, tile=100):
subsample(img, c0, c1, num, tile)
class AudioPlayer:
"""Simple audio player that consumes audio from a queue"""
def __init__(self):
self.snd_buffer = Queue(1)
self.is_running = False
self.player_thread = None
self.output_stream = nsound.AudioPlayback(OUTPUT_SAMPLE_RATE)
def run(self):
self.player_thread = threading.Thread(target=self.play)
self.is_running = True
self.player_thread.daemon = True
self.player_thread.start()
def get_queue(self):
return self.snd_buffer
def play(self):
print 'Play started'
while 1:
print 'Sampling'
sample = self.snd_buffer.get()
print 'Playing sample'
self.output_stream << sample
class Display:
def __init__(self, img):
self.initialize_image_figure(img)
self.last_click = (None, None)
self.press_handler = self.image_figure.canvas.mpl_connect('button_press_event', self.press_handle)
self.move_handler = self.image_figure.canvas.mpl_connect('motion_notify_event', self.move_handle)
self.player = AudioPlayer()
self.player.run()
def initialize_image_figure(self, img):
self.image = img
self.image_figure = plt.figure()
self.axes = self.image_figure.add_subplot(211)
self.axes_image = self.axes.imshow(self.image)
self.line = None
self.subsample = self.image_figure.add_subplot(212)
axes_axis = self.axes.axis()
self.subsample.set_xlim(xmin=axes_axis[0], xmax=axes_axis[1])
def press_handle(self, event):
print 'Click event', event.xdata, event.ydata
self.last_click = (event.xdata, event.ydata)
def move_handle(self, event):
# print 'Move event', event.xdata, event.ydata
if self.last_click[0] is None or self.last_click[1] is None or event.xdata is None or event.ydata is None:
return
print('Last click was: {} event move: {}'.format(self.last_click, (event.xdata, event.ydata)))
if self.line is not None:
self.line[0].set_data([[self.last_click[0], event.xdata], [self.last_click[1], event.ydata]])
else:
self.line = self.axes.plot([self.last_click[0], event.xdata], [self.last_click[1], event.ydata], "ro-")
self.axes.figure.canvas.draw()
subsampled_line = subsample(self.image, self.last_click, (event.xdata, event.ydata), num=441)
self.subsample.lines = []
self.subsample.plot(subsampled_line)
# print("Subsampled line {}".format(subsampled_line))
# self.player.get_queue().put(subsampled_line)
print
if __name__ == '__main__':
lena = scipy.misc.lena()
display = Display(lena)
plt.show()
| gpl-2.0 |
flightgong/scikit-learn | sklearn/utils/testing.py | 1 | 19151 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regexp
except ImportError:
# for Py 2.6
def assert_raises_regexp(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except Exception as e:
error_message = str(e)
if not re.compile(expected_regexp).match(error_message):
raise AssertionError("Error message should match pattern "
"'%s'. '%s' does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("Should have raised %r" %
expected_exception(expected_regexp))
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
# To remove when we support numpy 1.7
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not w[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)"
% (func.__name__, warning_class, w[0]))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not w[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)"
% (func.__name__, warning_class, w[0]))
# substring will match, the entire message with typo won't
msg = w[0].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if not check_in_message(msg):
raise AssertionError("The message received ('%s') for <%s> is "
"not the one you expected ('%s')"
% (msg, func.__name__, message
))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions"""
try:
function(*args, **kwargs)
raise AssertionError("Should have raised %r" % exception(message))
except exception as e:
error_message = str(e)
assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict: contains data as
columns_dict[column_name] = array of data
dataname: name of data set
matfile: file-like object or file name
ordering: list of column_names, determines the ordering in the data set
Note: this function transposes all arrays, while fetch_mldata only
transposes 'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
meta_estimators = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
other = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV"]
def all_estimators(include_meta_estimators=False, include_other=False,
type_filter=None):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_others : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
type_filter : string or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_other:
estimators = [c for c in estimators if not c[0] in other]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in meta_estimators]
if type_filter == 'classifier':
estimators = [est for est in estimators
if issubclass(est[1], ClassifierMixin)]
elif type_filter == 'regressor':
estimators = [est for est in estimators
if issubclass(est[1], RegressorMixin)]
elif type_filter == 'transformer':
estimators = [est for est in estimators
if issubclass(est[1], TransformerMixin)]
elif type_filter == 'cluster':
estimators = [est for est in estimators
if issubclass(est[1], ClusterMixin)]
elif type_filter is not None:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# We sort in order to have reproducible test failures
return sorted(estimators)
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
matplotlib.pylab.figure()
except:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod in sys.modules.copy().values():
if hasattr(mod, reg):
getattr(mod, reg).clear()
| bsd-3-clause |
JungeAlexander/cocoscore | src/cocoscore/tagger/co_occurrence_score.py | 1 | 46412 | import collections
import copy
import gzip
import itertools
import os
import tempfile
import warnings
from statistics import mean
from statistics import stdev
import numpy as np
import pandas as pd
from sklearn import metrics
from ..ml import cv
from ..ml.distance_scores import constant_distance
from ..ml.distance_scores import polynomial_decay_distance
from ..ml.distance_scores import reciprocal_distance
from ..ml.fasttext_helpers import fasttext_fit_predict_default
from ..ml.tools import get_log_uniform
from ..ml.tools import get_uniform
from ..tools.file_tools import get_file_handle
from .entity_mappers import get_serial_to_taxid_name_mapper
__author__ = 'Alexander Junge ([email protected])'
def get_hyperparameter_distributions(random_seed=None):
"""
:param random_seed: int to seed numpy RandomState to use while initiating parameter distributions to sample from
:return: a dictionary mapping co-occurrence score parameters to distributions to sample parameters from.
"""
if random_seed is None:
seeds = [13, 24, 43, 56, 65, 123, 12, 2]
else:
random_state = np.random.RandomState(random_seed)
seeds = random_state.randint(100000, size=8)
param_dict = {
'document_weight': get_log_uniform(-3, 1, seeds[0]),
'paragraph_weight': get_uniform(0, 20, seeds[1]),
# 'sentence_weight': get_uniform(0, 10, seeds[2]),
'weighting_exponent': get_uniform(0, 1, seeds[3]),
'decay_rate': get_uniform(0.2, 0.8, seeds[4]),
'distance_offset': get_uniform(0, .5, seeds[5]),
'distance_ceiling': get_uniform(0, .5, seeds[6]),
'score_cutoff': get_uniform(0, 1, seeds[7]),
}
return param_dict
def get_previous_hyperparameter_distributions(random_seed=None):
"""
:param random_seed: int to seed numpy RandomState to use while initiating parameter distributions to sample from
:return: a dictionary mapping previous co-occurrence score parameters to distributions to sample parameters from.
"""
if random_seed is None:
seeds = [1, 2, 3]
else:
random_state = np.random.RandomState(random_seed)
seeds = random_state.randint(100000, size=6)
param_dict = {
'document_weight': get_uniform(0, 20, seeds[0]),
'paragraph_weight': get_uniform(0, 20, seeds[1]),
# 'sentence_weight': get_uniform(0, 10, seeds[2]),
'weighting_exponent': get_uniform(0, 1, seeds[3]),
}
return param_dict
def get_entity_pairs(type_name_set, first_type, second_type):
first_type_names = set()
second_type_names = set()
for type_name in type_name_set:
my_type, name = type_name
if my_type == first_type:
first_type_names.add(type_name)
elif my_type == second_type:
second_type_names.add(type_name)
else:
raise ValueError("Encountered unknown type {:d}.".format(my_type))
if first_type != second_type:
return itertools.product(first_type_names, second_type_names)
else:
return itertools.combinations(first_type_names, 2)
def process_current_pmid_score_lines(current_pmid_lines, serial_to_type_entity, first_type, second_type):
return_list = []
pmid = int(current_pmid_lines[0][0])
type_entities = set()
type_entity_to_sentences = collections.defaultdict(set)
type_entity_to_paragraphs = collections.defaultdict(set)
for current_line in current_pmid_lines:
_, paragraph, sentence, _, _, _, my_type, serial = current_line
type_entity = serial_to_type_entity[int(serial)]
assert int(my_type) == type_entity[0]
type_entities.add(type_entity)
type_entity_to_sentences[type_entity].add((int(paragraph), int(sentence)))
type_entity_to_paragraphs[type_entity].add(int(paragraph))
for entity_pair in get_entity_pairs(type_entities, first_type, second_type):
first_type_entity, second_type_entity = entity_pair
assert first_type_entity[0] == first_type
assert second_type_entity[0] == second_type
common_sentences = type_entity_to_sentences[first_type_entity] & type_entity_to_sentences[second_type_entity]
common_paragraphs = type_entity_to_paragraphs[first_type_entity] & type_entity_to_paragraphs[second_type_entity]
entity_key = sorted((first_type_entity[1], second_type_entity[1]))
return_list.append([pmid, *entity_key, common_sentences, common_paragraphs])
return return_list
def load_matches_file(matches_file_path, entities_file, first_type, second_type):
serial_to_type_name = get_serial_to_taxid_name_mapper(entities_file, taxids=(first_type, second_type))
matches_file = get_file_handle(matches_file_path, matches_file_path.endswith('.gz'))
try:
current_pmid_lines = []
for line in matches_file:
# Fields are: pmid, paragraph, sentence, start_match, end_match, matched, type, serial
line_split = line.rstrip().split('\t')
if len(current_pmid_lines) > 0 and line_split[0] != current_pmid_lines[0][0]:
yield process_current_pmid_score_lines(current_pmid_lines, serial_to_type_name, first_type,
second_type)
current_pmid_lines = [line_split]
else:
current_pmid_lines.append(line_split)
if len(current_pmid_lines) > 0:
yield process_current_pmid_score_lines(current_pmid_lines, serial_to_type_name, first_type, second_type)
finally:
matches_file.close()
def load_sentence_score_iterator(score_dict):
for entity_pair, pmid_paragraph_sentence_dict in score_dict.items():
entity_1, entity_2 = entity_pair
pmid_to_paragraphs = collections.defaultdict(set)
pmid_to_sentences = collections.defaultdict(set)
for pmid_paragraph_sentence, _ in pmid_paragraph_sentence_dict.items():
pmid, paragraph, sentence = pmid_paragraph_sentence
pmid_to_sentences[pmid].add((paragraph, sentence))
pmid_to_paragraphs[pmid].add(paragraph)
for pmid in pmid_to_sentences:
yield pmid, entity_1, entity_2, pmid_to_sentences[pmid], pmid_to_paragraphs[pmid]
def load_paragraph_score_iterator(score_dict):
for entity_pair, pmid_paragraph_dict in score_dict.items():
entity_1, entity_2 = entity_pair
pmid_to_paragraphs = collections.defaultdict(set)
for pmid_paragraph, _ in pmid_paragraph_dict.items():
pmid, paragraph = pmid_paragraph
pmid_to_paragraphs[pmid].add(paragraph)
for pmid in pmid_to_paragraphs:
yield pmid, entity_1, entity_2, {}, pmid_to_paragraphs[pmid]
def load_document_score_iterator(score_dict):
for entity_pair, pmid_dict in score_dict.items():
entity_1, entity_2 = entity_pair
for pmid in pmid_dict.keys():
yield pmid, entity_1, entity_2, set(), set()
def get_max_score(scores_dict, pmid, entity_1, entity_2):
if not (entity_1, entity_2) in scores_dict:
return 0.0
else:
scores = [0.0]
for key, score in scores_dict[(entity_1, entity_2)].items():
if isinstance(key, tuple):
match_pmid = key[0] # sentence and paragraphs scores are index with (pmid, paragraph[, sentence])
else:
match_pmid = key # document scores are only index with pmid
if match_pmid == pmid:
scores.append(score)
return max(scores)
def get_weighted_counts(matches_file_path, sentence_scores, paragraph_scores, document_scores,
entities_file, first_type, second_type,
document_weight, paragraph_weight, sentence_weight,
ignore_scores=False, silent=False):
pair_scores = collections.defaultdict(float)
matches_iter = None
if matches_file_path is not None:
matches_iter = load_matches_file(matches_file_path, entities_file, first_type, second_type)
else:
# since document-level co-mentions are a superset of paragraph-level co-mentions which are a superset of
# sentence-level co-mentions, prefer the scores in this order
my_iterator = None
if document_scores is not None:
my_iterator = load_document_score_iterator(document_scores)
elif paragraph_scores is not None:
my_iterator = load_paragraph_score_iterator(paragraph_scores)
elif sentence_scores is not None:
my_iterator = load_sentence_score_iterator(sentence_scores)
if my_iterator is not None:
matches_iter = [my_iterator]
assert matches_iter is not None, \
'No iterator available; matches files and sentence/paragraph/document scores missing?'
for i, document_matches in enumerate(matches_iter):
if i > 0 and i % 100000 == 0 and not silent:
print('Document', i)
for matches in document_matches:
pmid, entity_1, entity_2, sentence_co_mentions, paragraph_co_mentions = matches
if isinstance(sentence_scores, dict) and not ignore_scores:
sentence_score = get_max_score(sentence_scores, pmid, entity_1, entity_2)
else:
# make sure all sentence-level co-mentions are considered as this is not the case when iterating
# over document or paragraph scores
if isinstance(sentence_scores, dict) and (entity_1, entity_2) in sentence_scores:
for pmid_paragraph_sentence in sentence_scores[(entity_1, entity_2)].keys():
if pmid_paragraph_sentence[0] == pmid:
sentence_co_mentions.add(pmid_paragraph_sentence[1:])
if len(sentence_co_mentions) > 0:
sentence_score = 1
else:
sentence_score = 0
if isinstance(paragraph_scores, dict) and not ignore_scores:
paragraph_score = get_max_score(paragraph_scores, pmid, entity_1, entity_2)
else:
# make sure all paragraph-level co-mentions are considered as this is not the case when iterating
# over document scores
if isinstance(paragraph_scores, dict) and (entity_1, entity_2) in paragraph_scores:
for pmid_paragraph in paragraph_scores[(entity_1, entity_2)].keys():
if pmid_paragraph[0] == pmid:
paragraph_co_mentions.add(pmid_paragraph[1])
if len(paragraph_co_mentions) > 0:
paragraph_score = 1
else:
paragraph_score = 0
if isinstance(document_scores, dict) and not ignore_scores:
document_score = get_max_score(document_scores, pmid, entity_1, entity_2)
else:
document_score = 1
pair_score_update = sentence_score * sentence_weight + paragraph_score * paragraph_weight + \
document_score * document_weight
# skip zero scores since they could lead to ZeroDivisionErrors later on when computing final scores
if pair_score_update > 0:
pair_scores[(entity_1, entity_2)] += pair_score_update
pair_scores[entity_1] += pair_score_update
pair_scores[entity_2] += pair_score_update
pair_scores[None] += pair_score_update
return dict(pair_scores)
def load_score_file(score_file_path, cutoff=0.0):
compression = score_file_path.endswith('.gz')
score_file = get_file_handle(score_file_path, compression)
score_dict = collections.defaultdict(dict)
try:
for line in score_file:
pmid, paragraph, sentence, entity_1, entity_2, score = line.rstrip().split('\t')
entity_key = tuple(sorted((entity_1, entity_2)))
if sentence != '-1': # sentence-level score
score_key = (int(pmid), int(paragraph), int(sentence))
elif sentence == '-1' and paragraph != '-1': # paragraph-level score
score_key = (int(pmid), int(paragraph))
else: # document-level score
score_key = int(pmid)
if cutoff <= float(score):
score_dict[entity_key][score_key] = float(score)
finally:
score_file.close()
return dict(score_dict)
def split_scores(score_dict):
sentence_scores = collections.defaultdict(dict)
paragraph_scores = collections.defaultdict(dict)
document_scores = collections.defaultdict(dict)
for entity_pair, match_to_score in score_dict.items():
for match, score in match_to_score.items():
if isinstance(match, tuple):
assert 1 < len(match) < 4, 'Unknown match length.'
if len(match) == 3: # sentence-level match
sentence_scores[entity_pair][match] = score
else: # paragraph-level match
paragraph_scores[entity_pair][match] = score
else:
document_scores[entity_pair][match] = score
sentence_scores.default_factory, paragraph_scores.default_factory, document_scores.default_factory = \
None, None, None
# instead of returning empty dictionaries, return None in such cases
sentence_scores, paragraph_scores, document_scores = (d if len(d) > 0 else None for d
in (sentence_scores, paragraph_scores, document_scores))
return sentence_scores, paragraph_scores, document_scores
def co_occurrence_score(matches_file_path, score_file_path,
entities_file, first_type, second_type,
document_weight=15.0, paragraph_weight=0.0,
sentence_weight=1.0, weighting_exponent=0.6, ignore_scores=False,
silent=False, score_cutoff=0.0):
"""
Computes co-occurrence score for a given matches file and/or sentence score file. See notes from 20170803 for an
explanation compared to DISEASES scoring scheme (as implemented in co_occurrence_score_diseases).
:param matches_file_path: matches file as produced by tagger. Used to define co-occurring terms.
If this is None, co-occurrences are extracted from score_file_path.
:param score_file_path: score file (tsv formatted) with five columns: pmid, paragraph number,
sentence number, first entity, second entity, sentence score. For document-level scores, set paragraph number and
sentence number to -1. For paragraph-level scores, set sentence number to -1.
:param entities_file: entities file as used by tagger
:param first_type: int, type of the first entity class to be scored
:param second_type: int, type of the second entity class to be scored
:param document_weight: document weight in co-occurrence score
:param paragraph_weight: paragraph weight in the co-occurrence score
:param sentence_weight: sentence weight in the co-occurrence score
:param weighting_exponent: exponent weight in the co-occurrence score
:param ignore_scores: If True, sentence scores are ignored.
:param silent: If True, no progress updates are printed
:param score_cutoff: float, sentences in score_file_path with a score lower than this cutoff
will be ignored.
:return: a dictionary mapping entity pairs to their co-occurrence scores
"""
if matches_file_path is None and score_file_path is None:
raise ValueError('matches_file_path or score_file_path must be specified.')
if score_file_path is not None:
scores = load_score_file(score_file_path, cutoff=score_cutoff)
sentence_scores, paragraph_scores, document_scores = split_scores(scores)
del scores # hint to GC as this may be large
else:
sentence_scores, paragraph_scores, document_scores = None, None, None
co_occurrence_scores = {}
weighted_counts = get_weighted_counts(matches_file_path=matches_file_path, sentence_scores=sentence_scores,
paragraph_scores=paragraph_scores, document_scores=document_scores,
entities_file=entities_file, first_type=first_type, second_type=second_type,
document_weight=document_weight, paragraph_weight=paragraph_weight,
sentence_weight=sentence_weight,
ignore_scores=ignore_scores, silent=silent)
norm_factor = weighted_counts[None]
for key, score in weighted_counts.items():
if not isinstance(key, tuple):
continue
entity_1, entity_2 = key
tmp = 1 - weighting_exponent
co_occurrence = (score ** weighting_exponent) * \
(((score * norm_factor) / (weighted_counts[entity_1] * weighted_counts[entity_2])) ** tmp)
co_occurrence_scores[key] = co_occurrence
return co_occurrence_scores
def co_occurrence_score_diseases(matches_file_path, entities_file, document_weight=3.0, paragraph_weight=0.0,
sentence_weight=0.2,
weighting_exponent=0.6,
silent=False):
return co_occurrence_score(matches_file_path=matches_file_path, score_file_path=None,
entities_file=entities_file,
first_type=9606, second_type=-26,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=sentence_weight, weighting_exponent=weighting_exponent,
ignore_scores=True, silent=silent)
def co_occurrence_score_string(matches_file_path, entities_file, entity_type, document_weight=1.0, paragraph_weight=2.0,
sentence_weight=0.2, weighting_exponent=0.6, silent=False):
return co_occurrence_score(matches_file_path=matches_file_path, score_file_path=None,
entities_file=entities_file,
first_type=entity_type, second_type=entity_type,
document_weight=document_weight, paragraph_weight=paragraph_weight,
sentence_weight=sentence_weight, weighting_exponent=weighting_exponent,
ignore_scores=True, silent=silent)
def _compute_metric(score_dict, data_frame, warn=True, metric='roc_auc_score'):
scores = []
classes = []
for _, group_df in data_frame.groupby(['entity1', 'entity2', 'class']):
if group_df.ndim == 1:
entity1, entity2, _class = group_df.loc[['entity1', 'entity2', 'class']]
else:
entity1, entity2, _class = group_df.iloc[0, :].loc[['entity1', 'entity2', 'class']]
entity_pair = tuple(sorted((entity1, entity2)))
if entity_pair in score_dict:
scores.append(score_dict[entity_pair])
else:
if warn:
warnings.warn('Missing score for entity pair {}.'.format(entity_pair))
scores.append(0.0)
classes.append(_class)
if metric == 'roc_auc_score':
return metrics.roc_auc_score(classes, scores)
elif metric == 'average_precision_score':
return metrics.average_precision_score(classes, scores)
else:
raise ValueError('Unknown scoring metric: {}'.format(metric))
def _get_train_test_scores(train_df, test_df, fasttext_function, fasttext_epochs, fasttext_dim, fasttext_bucket,
match_distance_function, constant_scoring, score_cutoff=0.0):
train_scores = pd.Series([0] * len(train_df), index=train_df.index)
test_scores = pd.Series([0] * len(test_df), index=test_df.index)
sentence_rows_train = np.logical_and(train_df.loc[:, 'sentence'] != -1,
train_df.loc[:, 'paragraph'] != -1)
sentence_rows_test = np.logical_and(test_df.loc[:, 'sentence'] != -1,
test_df.loc[:, 'paragraph'] != -1)
sentence_train_df = train_df.loc[sentence_rows_train, :]
sentence_test_df = test_df.loc[sentence_rows_test, :]
if len(sentence_train_df) > 0:
_, sentence_train_scores, _, sentence_test_scores = fasttext_function(sentence_train_df, sentence_test_df,
epochs=fasttext_epochs,
dim=fasttext_dim,
bucket=fasttext_bucket)
if constant_scoring == 'sentence':
if score_cutoff > 0:
zero_train_score_ix = set((ix for ix, s in enumerate(sentence_train_scores) if s < score_cutoff))
zero_test_score_ix = set((ix for ix, s in enumerate(sentence_test_scores) if s < score_cutoff))
else:
zero_train_score_ix = []
zero_test_score_ix = []
sentence_train_scores = constant_distance(sentence_train_df)
sentence_train_scores = [0 if ix in zero_train_score_ix else s for ix, s in enumerate(sentence_train_scores)]
sentence_test_scores = constant_distance(sentence_test_df)
sentence_test_scores = [0 if ix in zero_test_score_ix else s for ix, s in enumerate(sentence_test_scores)]
else:
sentence_train_scores = [0.0] * len(sentence_train_df)
sentence_test_scores = [0.0] * len(sentence_train_df)
train_scores[sentence_rows_train] = sentence_train_scores
test_scores[sentence_rows_test] = sentence_test_scores
non_sentence_rows_train = train_df.loc[:, 'sentence'] == -1
non_sentence_rows_test = test_df.loc[:, 'sentence'] == -1
non_sentence_train_df = train_df.loc[non_sentence_rows_train, :]
non_sentence_test_df = test_df.loc[non_sentence_rows_test, :]
non_sentence_train_scores = match_distance_function(non_sentence_train_df)
non_sentence_test_scores = match_distance_function(non_sentence_test_df)
if constant_scoring is not None:
constant_train_scores = constant_distance(non_sentence_train_df)
constant_test_scores = constant_distance(non_sentence_test_df)
paragraph_rows_train = np.logical_and(non_sentence_train_df.loc[:, 'sentence'] == -1,
non_sentence_train_df.loc[:, 'paragraph'] != -1)
paragraph_rows_test = np.logical_and(non_sentence_test_df.loc[:, 'sentence'] == -1,
non_sentence_test_df.loc[:, 'paragraph'] != -1)
document_rows_train = np.logical_not(paragraph_rows_train)
document_rows_test = np.logical_not(paragraph_rows_test)
if constant_scoring == 'paragraph':
non_sentence_train_scores[paragraph_rows_train] = constant_train_scores[paragraph_rows_train]
non_sentence_test_scores[paragraph_rows_test] = constant_test_scores[paragraph_rows_test]
elif constant_scoring == 'document':
non_sentence_train_scores[document_rows_train] = constant_train_scores[document_rows_train]
non_sentence_test_scores[document_rows_test] = constant_test_scores[document_rows_test]
elif constant_scoring == 'sentence':
pass # already handled earlier when computing sentence-level scores
else:
raise ValueError('Unknown constant_scoring parameter: {}'.format(constant_scoring))
train_scores[non_sentence_rows_train] = non_sentence_train_scores
test_scores[non_sentence_rows_test] = non_sentence_test_scores
return train_scores, test_scores
def cv_independent_associations(data_df,
param_dict,
fasttext_epochs=50,
fasttext_dim=300,
fasttext_bucket=2000000,
fasttext_threads=1,
match_distance_function=reciprocal_distance,
constant_scoring=None,
cv_folds=5,
entity_columns=('entity1', 'entity2'),
random_state=None,
warn_missing_scores=True,
metric='roc_auc_score',
pretrained_vectors_path=None,
):
"""
A wrapper around `cv_independent_associations()` in `ml/cv.py` that computes co-occurrences scores for each
CV fold and returns training and validation AUROC for each fold, mean and standard variation of
AUROC across folds along with various other dataset statistics.
:param data_df: the DataFrame to be split into CV folds
:param param_dict: dictionary mapping co-occurrence score hyperparameters to their values
:param fasttext_epochs: int, number of fasttext epochs to perform. This is primarily used for testing and should
not be changed in production.
:param fasttext_dim: int, fasttext vector dimensionality. This is primarily used for testing and should
not be changed in production.
:param fasttext_bucket: int, number of fasttext buckets. This is primarily used for testing and should
not be changed in production.
:param fasttext_threads: int, number of threads to be used by fasttext.
:param match_distance_function: function to score match distances. Takes a pandas DataFrame loaded using
tools.data_tools.load_data_frame(..., match_distance=True). Returns a pandas Series of distance scores.
:param constant_scoring: str - either 'sentence', 'paragraph' or 'document'. Indicates whether a constant scoring
function is to be used for sentence-, paragraph- or document-level co-mentions.
If None (default), match_distance_function will be used to
score both paragraph- and document-level co-mentions and fastText scores to score sentence-level co-mentions.
:param cv_folds: int, the number of CV folds to generate
:param entity_columns: tuple of str, column names in data_df where interacting entities can be found
:param random_state: numpy RandomState to use while splitting into folds
:param warn_missing_scores: boolean: if warnings should be issues during AUROC computation
:param metric: performance metric used for evaluation - can be either 'roc_auc_score' (the default) or
'average_precision_score'
:param pretrained_vectors_path: path to pre-trained word embeddings
:return: a pandas DataFrame with cross validation results
"""
cv_sets = list(cv.cv_independent_associations(data_df, cv_folds=cv_folds, random_state=random_state,
entity_columns=entity_columns))
cv_stats_df = cv.compute_cv_fold_stats(data_df, cv_sets)
def ffpf(train, valid, epochs, dim, bucket):
return fasttext_fit_predict_default(train, valid, epochs=epochs,
dim=dim, bucket=bucket,
pretrained_vectors_path=pretrained_vectors_path,
thread=fasttext_threads
)
param_dict = copy.deepcopy(param_dict)
if 'decay_rate' in param_dict or 'distance_offset' in param_dict or 'distance_ceiling' in param_dict:
decay_rate = param_dict['decay_rate']
del param_dict['decay_rate']
distance_offset = param_dict['distance_offset']
del param_dict['distance_offset']
distance_ceiling = param_dict['distance_ceiling']
del param_dict['distance_ceiling']
def nmdf(data_frame):
return match_distance_function(data_frame, decay_rate, distance_offset, distance_ceiling)
else:
nmdf = match_distance_function
train_performances = []
test_performances = []
for cv_iter, train_test_indices in enumerate(cv_sets):
train_indices, test_indices = train_test_indices
train_df = data_df.iloc[train_indices, :].copy()
test_df = data_df.iloc[test_indices, :].copy()
score_file_path = 'cv_cos_' + str(cv_iter) + '.tsv.gz'
try:
train_performance, test_performance = _get_train_test_performance(train_df=train_df, test_df=test_df,
param_dict=param_dict,
fasttext_function=ffpf,
fasttext_epochs=fasttext_epochs,
fasttext_dim=fasttext_dim,
fasttext_bucket=fasttext_bucket,
match_distance_function=nmdf,
constant_scoring=constant_scoring,
warn_missing_scores=warn_missing_scores,
metric=metric,
tmp_file_path=score_file_path,
)
train_performances.append(train_performance)
test_performances.append(test_performance)
except IOError:
# return missing results if fasttext failed for at least one CV fold
results_df = pd.DataFrame()
results_df['mean_test_score'] = [np.nan]
results_df['stdev_test_score'] = [np.nan]
results_df['mean_train_score'] = [np.nan]
results_df['stdev_train_score'] = [np.nan]
for stats_row in cv_stats_df.itertuples():
cv_fold = str(stats_row.fold)
results_df['split_' + cv_fold + '_test_score'] = [np.nan]
results_df['split_' + cv_fold + '_train_score'] = [np.nan]
results_df['split_' + cv_fold + '_n_test'] = [np.nan]
results_df['split_' + cv_fold + '_pos_test'] = [np.nan]
results_df['split_' + cv_fold + '_n_train'] = [np.nan]
results_df['split_' + cv_fold + '_pos_train'] = [np.nan]
return results_df
# aggregate performance measures and fold statistics in result DataFrame
results_df = pd.DataFrame()
results_df['mean_test_score'] = [mean(test_performances)]
results_df['stdev_test_score'] = [stdev(test_performances)]
results_df['mean_train_score'] = [mean(train_performances)]
results_df['stdev_train_score'] = [stdev(train_performances)]
for stats_row in cv_stats_df.itertuples():
cv_fold = str(stats_row.fold)
results_df['split_' + cv_fold + '_test_score'] = [test_performances[int(cv_fold)]]
results_df['split_' + cv_fold + '_train_score'] = [test_performances[int(cv_fold)]]
results_df['split_' + cv_fold + '_n_test'] = [stats_row.n_test]
results_df['split_' + cv_fold + '_pos_test'] = [stats_row.pos_test]
results_df['split_' + cv_fold + '_n_train'] = [stats_row.n_train]
results_df['split_' + cv_fold + '_pos_train'] = [stats_row.pos_train]
return results_df
def _get_cocoscores(train_df, test_df, param_dict, fasttext_function, fasttext_epochs,
fasttext_dim, fasttext_bucket,
match_distance_function,
constant_scoring, tmp_file_path=None, keep_scores_file=False):
if tmp_file_path is None:
_, tmp_file_path = tempfile.mkstemp(text=True, suffix='.gz')
try:
if 'score_cutoff' in param_dict:
score_cutoff = param_dict['score_cutoff']
else:
score_cutoff = 0.0
train_scores, test_scores = _get_train_test_scores(train_df, test_df, fasttext_function, fasttext_epochs,
fasttext_dim, fasttext_bucket,
match_distance_function,
constant_scoring, score_cutoff=score_cutoff)
train_df['predicted'] = train_scores
test_df['predicted'] = test_scores
# write combined score file for sentences/documents/paragraphs and evaluate training and validation AUROC
cv_df = pd.concat([train_df, test_df], axis=0)
cv_df['predicted'] = cv_df['predicted'].astype(np.float32)
with gzip.open(tmp_file_path, 'wt') as test_out:
cv_df.to_csv(test_out, sep='\t', header=False, index=False,
columns=['pmid', 'paragraph', 'sentence', 'entity1', 'entity2', 'predicted'])
score_dict = co_occurrence_score(matches_file_path=None,
score_file_path=tmp_file_path,
entities_file=None,
first_type=0,
second_type=0,
ignore_scores=False,
silent=True,
**param_dict,
)
return score_dict
except IOError as e:
raise e
finally:
# print(tmp_file_path)
if not keep_scores_file and os.path.isfile(tmp_file_path):
os.remove(tmp_file_path)
def _get_train_test_performance(train_df, test_df, param_dict, fasttext_function, fasttext_epochs,
fasttext_dim, fasttext_bucket,
match_distance_function,
constant_scoring, warn_missing_scores, metric, tmp_file_path=None,):
score_dict = _get_cocoscores(train_df=train_df, test_df=test_df, param_dict=param_dict,
fasttext_function=fasttext_function, fasttext_epochs=fasttext_epochs,
fasttext_dim=fasttext_dim, fasttext_bucket=fasttext_bucket,
match_distance_function=match_distance_function,
constant_scoring=constant_scoring, tmp_file_path=tmp_file_path)
train_performance = _compute_metric(score_dict, train_df, warn=warn_missing_scores, metric=metric)
test_performance = _compute_metric(score_dict, test_df, warn=warn_missing_scores, metric=metric)
return train_performance, test_performance
def fit_score_default(train_df, test_df, fasttext_epochs=50, fasttext_dim=300,
fasttext_bucket=2000000, pretrained_vectors_path=None, thread=1, output_model_path=None,
output_score_path=None, output_sentence_score_path=None):
"""
Fit a CoCoScore model, using default parameters, to the given training data and
predict scores for the training and test sets.
:param train_df: pandas DataFrame holding the training data
:param test_df: pandas DataFrame holding the test data
:param fasttext_epochs: int, number of fasttext epochs to perform. This is primarily used for testing and should
not be changed in production.
:param fasttext_dim: int, fasttext vector dimensionality. This is primarily used for testing and should
not be changed in production.
:param fasttext_bucket: int, number of fasttext buckets. This is primarily used for testing and should
not be changed in production.
:param pretrained_vectors_path: path to pre-trained word embeddings
:param thread: int, the number of threads to be used by fasttext
:param output_model_path: str, path to save the fitted fasttext model to. If None, the model is not saved.
:param output_score_path: str, path to save the co-mention scores to. If None, the scores are not saved.
:param output_sentence_score_path: str, path to save the test set sentence scores to. File will be gzipped. If None,
the sentence scores are not saved.
:return: tuple of dictionaries mapping entity pairs in training and test set to their scores
"""
match_distance_function = polynomial_decay_distance
decay_rate = 0.5
distance_offset = 0.02
document_weight = 2.0
weighting_exponent = 0.5
paragraph_weight = 0.0
distance_ceiling = 9999
return _fit_score(train_df=train_df, test_df=test_df,
fasttext_fit_predict_function=fasttext_fit_predict_default,
fasttext_epochs=fasttext_epochs, fasttext_dim=fasttext_dim, fasttext_bucket=fasttext_bucket,
match_distance_function=match_distance_function, decay_rate=decay_rate,
distance_offset=distance_offset, distance_ceiling=distance_ceiling,
document_weight=document_weight,
paragraph_weight=paragraph_weight, weighting_exponent=weighting_exponent,
constant_scoring=None,
pretrained_vectors_path=pretrained_vectors_path,
thread=thread, output_model_path=output_model_path, output_score_path=output_score_path,
output_sentence_score_path=output_sentence_score_path)
def _get_score_dict(scores, df, warn=True):
pairs = (tuple(sorted([e1, e2])) for e1, e2 in zip(df['entity1'], df['entity2']))
pair_scores = {}
for p in pairs:
if p in scores:
pair_scores[p] = scores[p]
else:
pair_scores[p] = 0.0
if warn:
warnings.warn('Missing score for entity pair {}.'.format(p))
return pair_scores
def _fit_score(train_df, test_df, fasttext_fit_predict_function, fasttext_epochs, fasttext_dim, fasttext_bucket,
match_distance_function, decay_rate, distance_offset, distance_ceiling, document_weight, paragraph_weight,
weighting_exponent, constant_scoring, pretrained_vectors_path=None, thread=1,
output_model_path=None, output_score_path=None, output_sentence_score_path=None):
def mdf(data_frame):
return match_distance_function(data_frame, decay_rate, distance_offset, distance_ceiling)
def ffpf(train, test, epochs, dim, bucket):
return fasttext_fit_predict_function(train, test, epochs=epochs,
dim=dim, bucket=bucket,
pretrained_vectors_path=pretrained_vectors_path,
thread=thread,
output_model_path=output_model_path,
output_sentence_score_path=output_sentence_score_path)
param_dict = {'document_weight': document_weight,
'paragraph_weight': paragraph_weight,
'weighting_exponent': weighting_exponent}
scores = _get_cocoscores(train_df=train_df, test_df=test_df, param_dict=param_dict,
fasttext_function=ffpf, fasttext_epochs=fasttext_epochs,
fasttext_dim=fasttext_dim, fasttext_bucket=fasttext_bucket,
match_distance_function=mdf, constant_scoring=constant_scoring,
tmp_file_path=output_score_path, keep_scores_file=output_score_path is not None)
return _get_score_dict(scores, train_df), _get_score_dict(scores, test_df)
def _previous_scores(train_df, test_df, document_weight, paragraph_weight, sentence_weight, weighting_exponent):
train_df['predicted'] = [1.0] * len(train_df)
test_df['predicted'] = [1.0] * len(test_df)
df = pd.concat([train_df, test_df], axis=0)
_, tmp_file_path = tempfile.mkstemp(text=True, suffix='.gz')
try:
with gzip.open(tmp_file_path, 'wt') as test_out:
df.to_csv(test_out, sep='\t', header=False, index=False,
columns=['pmid', 'paragraph', 'sentence', 'entity1', 'entity2', 'predicted'])
old_scores_dict = co_occurrence_score(matches_file_path=None,
score_file_path=tmp_file_path,
entities_file=None,
first_type=0,
second_type=0,
document_weight=document_weight,
paragraph_weight=paragraph_weight,
sentence_weight=sentence_weight,
weighting_exponent=weighting_exponent,
ignore_scores=True,
silent=True)
return _get_score_dict(old_scores_dict, train_df), _get_score_dict(old_scores_dict, test_df)
finally:
if os.path.isfile(tmp_file_path):
os.remove(tmp_file_path)
def previous_scores_default(train_df, test_df):
"""
Compute co-occurrence scores based on previous score model used e.g. in STRING v10.
:param train_df: pandas DataFrame holding the training data
:param test_df: pandas DataFrame holding the test data
:return: tuple of dictionaries mapping entity pairs in training and test set to their scores
"""
return _previous_scores(train_df, test_df, document_weight=1.0, paragraph_weight=2.0, sentence_weight=0.2,
weighting_exponent=0.6)
def previous_scores_cv_independent_associations(data_df,
param_dict,
cv_folds=5,
entity_columns=('entity1', 'entity2'),
random_state=None,
warn_missing_scores=True,
metric='roc_auc_score'):
"""
A wrapper around `cv_independent_associations()` in `ml/cv.py` that computes previous co-occurrences scores for each
CV fold and returns training and validation AUROC for each fold, mean and standard variation of
AUROC across folds along with various other dataset statistics.
:param data_df: the DataFrame to be split into CV folds
:param param_dict: dictionary mapping co-occurrence score hyperparameters to their values
:param cv_folds: int, the number of CV folds to generate
:param entity_columns: tuple of str, column names in data_df where interacting entities can be found
:param random_state: numpy RandomState to use while splitting into folds
:param warn_missing_scores: boolean: if warnings should be issues during AUROC computation
:param metric: performance metric used for evaluation - can be either 'roc_auc_score' (the default) or
'average_precision_score'
:return: a pandas DataFrame with cross validation results
"""
cv_sets = list(cv.cv_independent_associations(data_df, cv_folds=cv_folds, random_state=random_state,
entity_columns=entity_columns))
cv_stats_df = cv.compute_cv_fold_stats(data_df, cv_sets)
train_performances = []
test_performances = []
for cv_iter, train_test_indices in enumerate(cv_sets):
train_indices, test_indices = train_test_indices
train_df = data_df.iloc[train_indices, :].copy()
test_df = data_df.iloc[test_indices, :].copy()
try:
train_scores, test_scores = _previous_scores(train_df=train_df, test_df=test_df,
**param_dict)
train_performance = _compute_metric(train_scores, train_df, warn=warn_missing_scores, metric=metric)
test_performance = _compute_metric(test_scores, test_df, warn=warn_missing_scores, metric=metric)
train_performances.append(train_performance)
test_performances.append(test_performance)
except IOError:
# return missing results if fasttext failed for at least one CV fold
results_df = pd.DataFrame()
results_df['mean_test_score'] = [np.nan]
results_df['stdev_test_score'] = [np.nan]
results_df['mean_train_score'] = [np.nan]
results_df['stdev_train_score'] = [np.nan]
for stats_row in cv_stats_df.itertuples():
cv_fold = str(stats_row.fold)
results_df['split_' + cv_fold + '_test_score'] = [np.nan]
results_df['split_' + cv_fold + '_train_score'] = [np.nan]
results_df['split_' + cv_fold + '_n_test'] = [np.nan]
results_df['split_' + cv_fold + '_pos_test'] = [np.nan]
results_df['split_' + cv_fold + '_n_train'] = [np.nan]
results_df['split_' + cv_fold + '_pos_train'] = [np.nan]
return results_df
# aggregate performance measures and fold statistics in result DataFrame
results_df = pd.DataFrame()
results_df['mean_test_score'] = [mean(test_performances)]
results_df['stdev_test_score'] = [stdev(test_performances)]
results_df['mean_train_score'] = [mean(train_performances)]
results_df['stdev_train_score'] = [stdev(train_performances)]
for stats_row in cv_stats_df.itertuples():
cv_fold = str(stats_row.fold)
results_df['split_' + cv_fold + '_test_score'] = [test_performances[int(cv_fold)]]
results_df['split_' + cv_fold + '_train_score'] = [test_performances[int(cv_fold)]]
results_df['split_' + cv_fold + '_n_test'] = [stats_row.n_test]
results_df['split_' + cv_fold + '_pos_test'] = [stats_row.pos_test]
results_df['split_' + cv_fold + '_n_train'] = [stats_row.n_train]
results_df['split_' + cv_fold + '_pos_train'] = [stats_row.pos_train]
return results_df
| mit |
evgchz/scikit-learn | sklearn/manifold/locally_linear.py | 15 | 24841 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = np.asarray(X)
Z = np.asarray(Z)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
if X.dtype.kind == 'i':
X = X.astype(np.float)
if Z.dtype.kind == 'i':
Z = Z.astype(np.float)
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
DeveloperJose/Vision-Rat-Brain | auto_encoder/test_model.py | 1 | 8222 | import numpy as np
from keras.models import Model
from keras.datasets import mnist
from keras.models import load_model
from sklearn.metrics import label_ranking_average_precision_score
import time
import cv2
t0 = time.time()
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
sw_data = np.load('atlas_sw.npz')
x_train = sw_data['images'].astype('float32') / 255.
x_shape = (x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
x_train = np.reshape(x_train, x_shape)
y_train = sw_data['labels']
print("X_Train: ", x_train.shape)
print("===--- Paxinos/Watson Atlas")
pw_data = np.load('atlas_pw.npz')
pw_y = pw_data['labels']
pw_im = pw_data['images'].astype('float32') / 255.
pw_shape = pw_im.shape[0], pw_im.shape[1], pw_im.shape[2], 1
pw_im = np.reshape(pw_im, pw_shape)
x_test = np.array([pw_im[7], pw_im[10], pw_im[26], pw_im[39]])
y_test = np.array([pw_y[7], pw_y[10], pw_y[26], pw_y[39]])
x_test = pw_im
y_test = pw_y
print("X_Test: ", x_test.shape)
noise_factor = 0.4
x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape)
x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
t1 = time.time()
print('Dataset loaded in: ', t1-t0)
print('Loading model :')
t0 = time.time()
autoencoder = load_model('autoencoder.h5')
encoder = Model(inputs=autoencoder.input, outputs=autoencoder.get_layer('encoder').output)
t1 = time.time()
print('Model loaded in: ', t1-t0)
scores = []
def retrieve_closest_elements(test_code, test_label, learned_codes):
distances = []
for code in learned_codes:
distance = np.linalg.norm(code - test_code)
distances.append(distance)
nb_elements = learned_codes.shape[0]
distances = np.array(distances)
learned_code_index = np.arange(nb_elements)
labels = np.copy(y_train).astype('float32')
labels[labels != test_label] = -1
labels[labels == test_label] = 1
labels[labels == -1] = 0
distance_with_labels = np.stack((distances, labels, learned_code_index), axis=-1)
sorted_distance_with_labels = distance_with_labels[distance_with_labels[:, 0].argsort()]
sorted_distances = 28 - sorted_distance_with_labels[:, 0]
sorted_labels = sorted_distance_with_labels[:, 1]
sorted_indexes = sorted_distance_with_labels[:, 2]
return sorted_distances, sorted_labels, sorted_indexes
def compute_average_precision_score(test_codes, test_labels, learned_codes, n_samples):
out_labels = []
out_distances = []
retrieved_elements_indexes = []
for i in range(len(test_codes)):
sorted_distances, sorted_labels, sorted_indexes = retrieve_closest_elements(test_codes[i], test_labels[i], learned_codes)
out_distances.append(sorted_distances[:n_samples])
out_labels.append(sorted_labels[:n_samples])
retrieved_elements_indexes.append(sorted_indexes[:n_samples])
out_labels = np.array(out_labels)
out_labels_file_name = 'computed_data/out_labels_{}'.format(n_samples)
np.save(out_labels_file_name, out_labels)
out_distances_file_name = 'computed_data/out_distances_{}'.format(n_samples)
out_distances = np.array(out_distances)
np.save(out_distances_file_name, out_distances)
score = label_ranking_average_precision_score(out_labels, out_distances)
scores.append(score)
return score
INDEX = 0
def retrieve_closest_images(test_element, test_label, n_samples=10):
global INDEX
learned_codes = encoder.predict(x_train)
learned_codes = learned_codes.reshape(learned_codes.shape[0],
learned_codes.shape[1] * learned_codes.shape[2] * learned_codes.shape[3])
test_code = encoder.predict(np.array([test_element]))
test_code = test_code.reshape(test_code.shape[1] * test_code.shape[2] * test_code.shape[3])
distances = []
for code in learned_codes:
distance = np.linalg.norm(code - test_code)
distances.append(distance)
nb_elements = learned_codes.shape[0]
distances = np.array(distances)
learned_code_index = np.arange(nb_elements)
labels = np.copy(y_train).astype('float32')
labels[labels != test_label] = -1
labels[labels == test_label] = 1
labels[labels == -1] = 0
distance_with_labels = np.stack((distances, labels, learned_code_index), axis=-1)
sorted_distance_with_labels = distance_with_labels[distance_with_labels[:, 0].argsort()]
sorted_distances = 28 - sorted_distance_with_labels[:, 0]
sorted_labels = sorted_distance_with_labels[:, 1]
sorted_indexes = sorted_distance_with_labels[:, 2]
kept_indexes = sorted_indexes[:n_samples]
score = label_ranking_average_precision_score(np.array([sorted_labels[:n_samples]]), np.array([sorted_distances[:n_samples]]))
kept_indexes = kept_indexes.astype(np.uint16)
result_y = y_train[kept_indexes]
result_distances = sorted_distances[kept_indexes]
print("Plate {} - ".format(test_label), end='')
for i in range(n_samples):
match_y = result_y[i]
match_d = result_distances[i]
print("[{},{:.4f}] ".format(match_y, match_d), end='')
print("")
#print("Average precision ranking score for tested element is {}".format(score))
original_image = test_element
#cv2.imshow('original_image_' + str(INDEX), original_image)
retrieved_images = x_train[int(kept_indexes[0]), :]
for i in range(1, n_samples):
retrieved_images = np.hstack((retrieved_images, x_train[int(kept_indexes[i]), :]))
#cv2.imshow('Results_' + str(INDEX), retrieved_images)
cv2.imwrite('test_results/plate_' + str(test_label) + '.jpg', 255 * cv2.resize(original_image, (0,0), fx=3, fy=3))
cv2.imwrite('test_results/results' + str(test_label) + '.jpg', 255 * cv2.resize(retrieved_images, (0,0), fx=2, fy=2))
#import pdb
#pdb.set_trace()
INDEX += 1
return result_y
def test_model(n_test_samples, n_train_samples):
learned_codes = encoder.predict(x_train)
learned_codes = learned_codes.reshape(learned_codes.shape[0], learned_codes.shape[1] * learned_codes.shape[2] * learned_codes.shape[3])
test_codes = encoder.predict(x_test)
test_codes = test_codes.reshape(test_codes.shape[0], test_codes.shape[1] * test_codes.shape[2] * test_codes.shape[3])
indexes = np.arange(len(y_test))
np.random.shuffle(indexes)
indexes = indexes[:n_test_samples]
print('Start computing score for {} train samples'.format(n_train_samples))
t1 = time.time()
score = compute_average_precision_score(test_codes[indexes], y_test[indexes], learned_codes, n_train_samples)
t2 = time.time()
print('Score computed in: ', t2-t1)
print('Model score:', score)
def plot_denoised_images():
denoised_images = autoencoder.predict(x_test_noisy.reshape(x_test_noisy.shape[0], x_test_noisy.shape[1], x_test_noisy.shape[2], 1))
test_img = x_test_noisy[0]
resized_test_img = cv2.resize(test_img, (280, 280))
cv2.imshow('input', resized_test_img)
output = denoised_images[0]
resized_output = cv2.resize(output, (280, 280))
cv2.imshow('output', resized_output)
cv2.imwrite('test_results/noisy_image.jpg', 255 * resized_test_img)
cv2.imwrite('test_results/denoised_image.jpg', 255 * resized_output)
# To test the whole model
n_test_samples = 1000
n_train_samples = [10, 50, 100, 200, 300, 400, 500, 750, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000,
20000, 30000, 40000, 50000, 60000]
#for n_train_sample in n_train_samples:
# test_model(n_test_samples, n_train_sample)
np.save('computed_data/scores', np.array(scores))
import pylab as plt
plt.xkcd()
plt.figure()
plt.title('SW Matching')
plt.xlabel('PW Plate')
plt.ylabel('SW Plate')
# To retrieve closest images
x = []
y = []
for i in range(len(x_test)):
#for i in range(3):
x.append(y_test[i]) # Plate #
predictions = retrieve_closest_images(x_test[i], y_test[i])
y.append(predictions[0]) # Top Prediction
plt.plot(x, y)
plt.savefig('results.png')
plt.show(block=True)
# To plot a denoised image
#plot_denoised_images() | mit |
fyffyt/scikit-learn | sklearn/tests/test_common.py | 70 | 7717 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance,
check_fit2d_predict1d,
check_fit1d_1sample)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.