gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy import stats
from itertools import product
from mytstats import tstatistic
from skbio.stats import composition
from skbio.stats.composition import clr, multiplicative_replacement
__all__ = ['otuLogRatios',
'ancom',
'globalLRPermTest',
'LRPermTest',
'ratios2otumat',
'loadAbundance',
'_dmeanStat',
'_sumDmeanStat',
'_maxDmeanStat',
'_tStat',
'_sumTStat',
'_maxTStat']
def _dmeanStat(mat, boolInd, axis=0):
return mat[boolInd,:].mean(axis=axis) - mat[~boolInd,:].mean(axis=axis)
def _sumDmeanStat(mat, boolInd):
return (_dmeanStat(mat, boolInd)**2).sum()
def _maxDmeanStat(mat, boolInd):
return (_dmeanStat(mat, boolInd)**2).max()
def _tStat(mat, boolInd, axis=0):
return tstatistic(mat[boolInd,:], mat[~boolInd,:], axis=axis, equal_var=True)
def _sumTStat(mat, boolInd, axis=0):
return np.abs(_tStat(mat, boolInd)).sum()
def _maxTStat(mat, boolInd, axis=0):
return np.abs(_tStat(mat, boolInd)).max()
def _rhoStat(mat, x, axis=0):
assert mat.shape[axis] == x.shape[0]
if axis == 0:
r = [
stats.spearmanr(x, mat[:, i]).correlation
for i in range(mat.shape[1 - axis])
]
else:
r = [
stats.spearmanr(x, mat[i, :]).correlation
for i in range(mat.shape[1 - axis])
]
r = np.array(r)
assert r.shape[0] == mat.shape[1 - axis], (r.shape[0], mat.shape[1 - axis])
return r
def _sumRhoStat(mat, x):
return (_rhoStat(mat, x)**2).sum()
def _maxRhoStat(mat, x):
return (_rhoStat(mat, x)**2).max()
def loadAbundance(filename, compositionNorm=True, truncate=True):
"""Load OTU counts file (phylum, genus or species level)
with OTUs along the rows and samples along the columns.
Parameters
----------
filename : str
Excel file from QIIME pipeline.
Contains OTUs along the rows and samples along the columns,
with a few header rows.
compositionNorm : bool
Add delta count to zeros and normalize each sample by the
total number of reads. (uses skbio.stats.composition.multiplicative_replacement)
truncate : bool
Discard taxa with less than 0.5% of total reads.
Discard taxa that are not present in 25% of samples.
"""
def _cleanCountDf(df):
"""Drop extra columns/headers and transpose so that
samples are along rows and OTUs along columns.
Returns
-------
outDf : pd.DataFrame [index: samples, columns: OTUs]"""
df = df.drop(['tax_id', 'rank'], axis = 1)
df = df.dropna(subset=['tax_name'], axis = 0)
df = df.rename_axis({'tax_name':'OTU'}, axis=1)
df = df.set_index('OTU')
df = df.drop(['specimen'], axis = 0)
df = df.T
df = df.dropna(subset=['label'], axis=0)
df['sid'] = df.label.str.replace('Sample-', 'S')
df = df.set_index('sid')
df = df.drop('label', axis=1)
df = df.astype(float)
return df
def _discardLow(df, thresh=0.005):
"""Discard taxa/columns with less than 0.5% of reads"""
totReads = df.values.sum()
keepInd1 = (df.sum(axis=0)/totReads) > thresh
"""Also discard taxa that are not present in 25% of samples"""
keepInd2 = (df>0).sum(axis=0)/df.shape[0] > 0.25
return df.loc[:, keepInd1 & keepInd2]
df = pd.read_excel(filename)
df = _cleanCountDf(df)
if truncate:
df = _discardLow(df)
if compositionNorm:
values = composition.multiplicative_replacement(df.values)
df = pd.DataFrame(values, columns=df.columns, index=df.index)
cols = [c for c in df.columns if not c in ['sid']]
print('Abundance data: %s samples, %s taxa' % (df.shape[0], len(cols)))
return df, cols
def ratios2otumat(otuDf, lrvec):
"""Reshape a vector of log-ratios back into a matrix of OTU x OTU
using columns in otuDf
Example
-------
qbyOTU = ratios2otumat(qvalues)
Parameters
----------
otuDf : pd.DataFrame [samples x OTUs]
Contains relative abundance [0-1] for all samples (rows) and OTUs (colums)
Returns:
--------
mat : pd.DataFrame [index: OTUs, columns: OTUs]"""
nSamples, nOTUs = otuDf.shape
otuMat = pd.DataFrame(np.zeros((nOTUs, nOTUs)), columns=otuDf.columns, index=otuDf.columns)
for ind in lrvec.index:
i = np.where(otuDf.columns == ind[0])[0]
j = np.where(otuDf.columns == ind[1])[0]
otuMat.values[i, j] = lrvec[ind]
otuMat.values[j, i] = lrvec[ind]
return otuMat
def otuLogRatios(otuDf):
"""Calculates pairwise log ratios between all OTUs for all samples.
TODO: Use skbio.stats.composition.perturb_inv for simplicity and consistency
(though I think the result will be identical)
Parameters
----------
otuDf : pd.DataFrame [samples x OTUs]
Contains relative abundance [0-1] for all samples (rows) and OTUs (colums)
Returns:
--------
logRatio : pd.DataFrame [index: (OTU1,OTU2) for each log-ratio]
Log-ratio statistic for each comparison"""
nSamples, nOTUs = otuDf.shape
"""Define minimum OTU abundance to avoid log(0)
multiplicative_replacement takes matrix [samples x OTUs]"""
assert otuDf.min().min() > 0, "Cannot input 0 values to otuLogRatios (min value {})".format(otuDf.min().min())
logOTU = np.log(otuDf).values
nRatios = int(nOTUs * (nOTUs-1) / 2)
logRatio = np.zeros((nSamples, nRatios))
"""List of tuples of two indices for each ratio [nRatios]"""
ratioIndices = [(otui, otuj) for otui in range(nOTUs - 1) for otuj in range(otui+1, nOTUs)]
"""List of indices corresponding to the ratios that contain each OTU"""
otuIndices = [[j for j in range(nRatios) if otui in ratioIndices[j]] for otui in range(nOTUs)]
ratioCount = 0
for otui in range(nOTUs - 1):
tmpCount = int(nOTUs - (otui+1))
logRatio[:, ratioCount:(ratioCount+tmpCount)] = logOTU[:, otui+1:] - logOTU[:, otui][:, None]
ratioCount += tmpCount
cols = [(otuDf.columns[ratioIndices[r][0]], otuDf.columns[ratioIndices[r][1]]) for r in range(nRatios)]
logRatio = pd.DataFrame(logRatio, index=otuDf.index, columns=cols)
return logRatio
def globalCLRPermTest(otuDf, labels, statfunc=_sumRhoStat, nperms=999, seed=110820, binary=False):
"""Calculates centered-log-ratios (CLR) for each sample and performs global
permutation tests to determine if there is a significant correlation
over all log-median-ratios, with respect to the label variable of interest.
Parameters
----------
otuDf : pd.DataFrame [samples x OTUs]
Contains relative abundance [0-1] for all samples (rows) and OTUs (colums)
labels: pd.Series (float)
Contains binary variable indicating membership into one of two categories
(e.g. treatment conditions). Must share index with otuDf.
statfunc : function
Takes a np.ndarray [n x k] and float index [n] as parameters and
returns a float summarizing over k.
nperms : int
Number of iterations for the permutation test.
seed :int
Seed for random permutation generation.
Returns:
--------
pvalue : float
Global p-value for a significant association of OTU log-median-ratios
with label, based on the summary statistic.
obs : float
Statistic summarizing the label difference."""
nSamples, nOTUs = otuDf.shape
if binary:
labelValues = labels.values.astype(bool)
else:
labelValues = labels.values.astype(float)
# Make proportions
otuDf = otuDf / otuDf.sum()
# Apply multiplicative replacement for zero values
otuMR = multiplicative_replacement(otuDf.values)
# Calculate the CLR
otuCLR = clr(otuMR)
# Make into a DataFrame
otuCLR = pd.DataFrame(otuCLR, index=otuDf.index, columns=otuDf.columns)
np.random.seed(seed)
obs = statfunc(otuCLR.values, labelValues)
samples = np.array([
statfunc(otuCLR.values, labelValues[np.random.permutation(nSamples)])
for permi in range(nperms)
])
"""Since test is based on the abs statistic it is inherently two-sided"""
pvalue = ((np.abs(samples) >= np.abs(obs)).sum() + 1) / (nperms + 1)
return pvalue, obs
def CLRPermTest(otuDf, labels, statfunc=_rhoStat, nperms=999, adjMethod='fdr_bh', seed=110820, binary=False):
"""Calculates centered-log-ratio (CLR) for all OTUs and performs
permutation tests to determine if there is a significant correlation
in OTU ratios with respect to the label variable of interest.
Parameters
----------
otuDf : pd.DataFrame [samples x OTUs]
Contains relative abundance [0-1] for all samples (rows) and OTUs (colums)
labels: pd.Series (float)
Contains binary variable indicating membership into one of two categories
(e.g. treatment conditions). Must share index with otuDf.
statfunc : function
Takes a np.array [n x k] and float index [n] as parameters and
returns a 1-D array of the statistic [k].
nperms : int
Number of iterations for the permutation test.
adjMethod : string
Passed to sm.stats.multipletests for p-value multiplicity adjustment.
If value is None then no adjustment is made.
seed :int
Seed for random permutation generation.
Returns:
--------
qvalues : pd.Series [index: OTU]
Q/P-values for each OTU computed.
observed : pd.Series [index: OTU]
Log-ratio statistic summarizing across samples."""
nSamples, nOTUs = otuDf.shape
if binary:
labelValues = labels.values.astype(bool)
else:
labelValues = labels.values.astype(float)
# Make proportions
otuDf = otuDf / otuDf.sum()
# Apply multiplicative replacement for zero values
otuMR = multiplicative_replacement(otuDf.values)
# Calculate the CLR
otuCLR = clr(otuMR)
# Make into a DataFrame
otuCLR = pd.DataFrame(otuCLR, index=otuDf.index, columns=otuDf.columns)
obs = statfunc(otuCLR.values, labelValues)
np.random.seed(seed)
samples = np.zeros((nperms, nOTUs))
for permi in range(nperms):
samples[permi, :] = statfunc(
otuCLR.values,
labelValues[np.random.permutation(nSamples)]
)
pvalues = ((np.abs(samples) >= np.abs(obs[None, :])).sum(
axis=0) + 1) / (nperms + 1)
if adjMethod is None or adjMethod.lower() == 'none':
qvalues = pvalues
else:
qvalues = _pvalueAdjust(pvalues, method=adjMethod)
qvalues = pd.Series(qvalues, index=otuDf.columns)
observed = pd.Series(obs, index=otuDf.columns)
return qvalues, observed
def globalLRPermTest(otuDf, labels, statfunc=_sumTStat, nperms=999, seed=110820):
"""Calculates pairwise log ratios between all OTUs and performs global
permutation tests to determine if there is a significant difference
over all log-ratios, with respect to the label variable of interest.
Parameters
----------
otuDf : pd.DataFrame [samples x OTUs]
Contains relative abundance [0-1] for all samples (rows) and OTUs (colums)
labels: pd.Series (bool or int)
Contains binary variable indicating membership into one of two categories
(e.g. treatment conditions). Must share index with otuDf.
statfunc : function
Takes a np.ndarray [n x k] and boolean index [n] as parameters and
returns a float summarizing over k.
nperms : int
Number of iterations for the permutation test.
seed :int
Seed for random permutation generation.
Returns:
--------
pvalue : float
Global p-value for a significant association of OTU log-ratios
with label, based on the summary statistic.
obs : float
Statistic summarizing the label difference."""
nSamples, nOTUs = otuDf.shape
# Make sure the label values are binary
assert labels.unique().shape[0] == 2
labelBool = labels.values.astype(bool)
assert labels.unique().shape[0] == 2
logRatio = otuLogRatios(otuDf)
np.random.seed(seed)
samples = np.zeros(nperms)
obs = statfunc(logRatio.values, labelBool)
for permi in range(nperms):
rind = np.random.permutation(nSamples)
samples[permi] = statfunc(logRatio.values, labelBool[rind])
"""Since test is based on the abs statistic it is inherently two-sided"""
pvalue = ((np.abs(samples) >= np.abs(obs)).sum() + 1) / (nperms + 1)
return pvalue, obs
def LRPermTest(otuDf, labels, statfunc=_dmeanStat, nperms=999, adjMethod='fdr_bh', seed=110820):
"""Calculates pairwise log ratios between all OTUs and performs
permutation tests to determine if there is a significant difference
in OTU ratios with respect to the label variable of interest.
Parameters
----------
otuDf : pd.DataFrame [samples x OTUs]
Contains relative abundance [0-1] for all samples (rows) and OTUs (colums)
labels: pd.Series (bool or int)
Contains binary variable indicating membership into one of two categories
(e.g. treatment conditions). Must share index with otuDf.
statfunc : function
Takes a np.array [n x k] and boolean index [n] as parameters and
returns a 1-D array of the statistic [k].
nperms : int
Number of iterations for the permutation test.
adjMethod : string
Passed to sm.stats.multipletests for p-value multiplicity adjustment.
If value is None then no adjustment is made.
seed :int
Seed for random permutation generation.
Returns:
--------
qvalues : pd.Series [index: (OTU1,OTU2) for each log-ratio]
Q/P-values for each log-ratio computed. otuQvalues is a reorganization of this.
observed : pd.Series [index: (OTU1,OTU2) for each log-ratio]
Log-ratio statistic summarizing across samples."""
nSamples, nOTUs = otuDf.shape
# Make sure the label values are binary
assert labels.unique().shape[0] == 2
labelBool = labels.values.astype(bool)
assert labels.unique().shape[0] == 2
nRatios = int(nOTUs * (nOTUs-1) / 2)
"""List of tuples of two indices for each ratio [nRatios]"""
ratioIndices = [(otui, otuj) for otui in range(nOTUs - 1) for otuj in range(otui+1, nOTUs)]
"""List of indices corresponding to the ratios that contain each OTU"""
otuIndices = [[j for j in range(nRatios) if otui in ratioIndices[j]] for otui in range(nOTUs)]
logRatio = otuLogRatios(otuDf)
np.random.seed(seed)
samples = np.zeros((nperms, nRatios))
obs = statfunc(logRatio.values, labelBool)
for permi in range(nperms):
rind = np.random.permutation(nSamples)
samples[permi,:] = statfunc(logRatio.values, labelBool[rind])
pvalues = ((np.abs(samples) >= np.abs(obs[None,:])).sum(axis=0) + 1) / (nperms + 1)
if adjMethod is None or adjMethod.lower() == 'none':
qvalues = pvalues
else:
qvalues = _pvalueAdjust(pvalues, method=adjMethod)
cols = [(otuDf.columns[ratioIndices[r][0]], otuDf.columns[ratioIndices[r][1]]) for r in range(nRatios)]
qvalues = pd.Series(qvalues, index=cols)
observed = pd.Series(obs, index=cols)
return qvalues, observed
def ancom(otuDf, labels, alpha=0.2, statfunc=_dmeanStat, nperms=0, adjMethod='fdr_bh', seed=110820):
"""Calculates pairwise log ratios between all OTUs and performs
permutation tests to determine if there is a significant difference
in OTU ratios with respect to the label variable of interest.
Algorithm is from:
Mandal, Siddhartha, Will Van Treuren, Richard A White, Merete Eggesbo,
Rob Knight, and Shyamal D Peddada. 2015. "Analysis of Composition
of Microbiomes: A Novel Method for Studying Microbial Composition."
Microbial Ecology in Health and Disease 26: 27663. doi:10.3402/mehd.v26.27663.
Parameters
----------
otuDf : pd.DataFrame [samples x OTUs]
Contains relative abundance [0-1] for all samples (rows) and OTUs (colums)
labels: pd.Series (bool or int)
Contains binary variable indicating membership into one of two categories
(e.g. treatment conditions). Must share index with otuDf.
alpha : float
Alpha cutoff for rejecting a log-ratio hypothesis.
If multiAdj is True, then this is a FDR-adjusted q-value cutoff.
statfunc : function
Takes a np.array [n x k] and boolean index [n] as parameters and
returns a 1-D array of the statistic [k].
nperms : int
Number of iterations for the permutation test.
If nperms = 0, then use Wilcoxon ranksum test to compute pvalue.
adjMethod : string
Passed to sm.stats.multipletests for p-value multiplicity adjustment.
If value is None then no adjustment is made.
seed :int
Seed for random permutation generation (if nperms > 0)
Returns:
--------
rej : pd.Series [index: OTUs]
Boolean indicating whether the null hypothesis is rejected for each OTU.
otuQvalues : pd.DataFrame [index: OTUs, columns: nOTUs - 1]
Q/P-value for each of the log-ratios for each OTU.
qvalues : pd.Series [index: (OTU1,OTU2) for each log-ratio]
Q/P-values for each log-ratio computed. otuQvalues is a reorganization of this.
logRatio : pd.DataFrame [index: samples, coluns: (OTU1,OTU2) for each log-ratio]
Log-ratio statistic for each comparison"""
nSamples, nOTUs = otuDf.shape
labelBool = labels.values.astype(bool)
nRatios = int(nOTUs * (nOTUs-1) / 2)
"""List of tuples of two indices for each ratio [nRatios]"""
ratioIndices = [(otui, otuj) for otui in range(nOTUs - 1) for otuj in range(otui+1, nOTUs)]
"""List of indices corresponding to the ratios that contain each OTU"""
otuIndices = [[j for j in range(nRatios) if otui in ratioIndices[j]] for otui in range(nOTUs)]
logRatio = otuLogRatios(otuDf)
if nperms > 0:
np.random.seed(seed)
samples = np.zeros((nperms, nRatios))
obs = statfunc(logRatio.values, labelBool)
for permi in range(nperms):
rind = np.random.permutation(nSamples)
samples[permi,:] = statfunc(logRatio.values, labelBool[rind])
pvalues = ((np.abs(samples) >= np.abs(obs[None,:])).sum(axis=0) + 1) / (nperms + 1)
else:
pvalues = np.zeros(nRatios)
for ratioi in range(nRatios):
_, pvalues[ratioi] = stats.ranksums(logRatio.values[labelBool, ratioi], logRatio.values[~labelBool, ratioi])
if adjMethod is None or adjMethod.lower() == 'none':
qvalues = pvalues
else:
qvalues = _pvalueAdjust(pvalues, method=adjMethod)
otuQvalues = np.asarray([qvalues[ind] for ind in otuIndices])
"""Number of hypotheses rejected, for each OTU"""
W = (otuQvalues < alpha).sum(axis=1)
"""Use cutoff of (nOTUs - 1), requiring that all log-ratios are significant for a given OTU (quite conservative)"""
rej = pd.Series(W >= (nOTUs-1), index=otuDf.columns)
otuQvalues = pd.DataFrame(otuQvalues, index=otuDf.columns, columns=['ratio_%d' % i for i in range(nOTUs-1)])
cols = [(otuDf.columns[ratioIndices[r][0]], otuDf.columns[ratioIndices[r][1]]) for r in range(nRatios)]
qvalues = pd.Series(qvalues, index=cols)
return rej, otuQvalues, qvalues, logRatio
def _pvalueAdjust(pvalues, method='fdr_bh'):
"""Convenience function for doing p-value adjustment.
Accepts any matrix shape and adjusts across the entire matrix.
Ignores nans appropriately.
1) Pvalues can be DataFrame or Series or array
2) Turn it into a one-dimensional vector
3) Qvalues intialized at p to copy nans in the right places
4) Drop the nans, calculate qvalues, copy to qvalues vector
5) Reshape qvalues
6) Return same type as pvalues"""
p = np.asarray(pvalues).ravel()
qvalues = p.copy()
nanInd = np.isnan(p)
_, q, _, _ = sm.stats.multipletests(p[~nanInd], alpha=0.2, method=method)
qvalues[~nanInd] = q
qvalues = qvalues.reshape(pvalues.shape)
if isinstance(pvalues, pd.core.frame.DataFrame):
return pd.DataFrame(qvalues, columns=[x+'_q' for x in pvalues.columns], index=pvalues.index)
elif isinstance(pvalues, pd.core.series.Series):
return pd.Series(qvalues, name=pvalues.name+'_q', index=pvalues.index)
else:
return qvalues
"""Code for using a different cutoff for W from the ANCOM supplement"""
"""W = np.zeros(n_otu)
for i in range(n_otu):
W[i] = sum(logratio_mat[i,:] < alpha)
par = n_otu-1 #cutoff
c_start = max(W)/par
cutoff = c_start - np.linspace(0.05,0.25,5)
D = 0.02 # Some arbituary constant
dels = np.zeros(len(cutoff))
prop_cut = np.zeros(len(cutoff),dtype=np.float32)
for cut in range(len(cutoff)):
prop_cut[cut] = sum(W > par*cutoff[cut])/float(len(W))
for i in range(len(cutoff)-1):
dels[i] = abs(prop_cut[i]-prop_cut[i+1])
if (dels[1]<D) and (dels[2]<D) and (dels[3]<D):
nu=cutoff[1]
elif (dels[1]>=D) and (dels[2]<D) and (dels[3]<D):
nu=cutoff[2]
elif (dels[2]>=D) and (dels[3]<D) and (dels[4]<D):
nu=cutoff[3]
else:
nu=cutoff[4]
up_point = min(W[W>nu*par])
results = otu_table.columns[W>=nu*par]
return results"""
|
|
# tangerine.py
# Contact: Jacob Schreiber
# [email protected]
import sys, time, collections, hashlib, uuid, json
import itertools as it
from pomegranate import *
encrypt = lambda x: hashlib.sha512( x ).hexdigest()
def gen():
for i in [1, 2, 3]:
yield i
GENERATOR = gen()
PAGE_LIMIT = 4096
TYPE_NAMES = { 'str' : str,
'float' : float,
'int' : int }
class Page( object ):
"""
This represents a page of tuples. Pages can be read from into RAM, stored
in cache, and written back out to disk.
"""
def __init__( self, tuples=[], id=None ):
"""
Input the tuples to be stored together.
"""
self.id = id or uuid.uuid4().hex
self.tuples = tuples
self.n = len( self.tuples )
def __str__( self ):
"""
Return the string representation of the page.
"""
return "\n".join( "\t".join( map( str, tup ) ) for tup in self.tuples )
def insert( self, tuple ):
"""
Insert a tuple into the page.
"""
for i in xrange( len(self.tuples) ):
if self.tuples[i] is None:
self.tuples[i] = tuple
else:
self.tuples.append( tuple )
self.n += 1
def delete( self, filter_expr ):
"""
Take in a dictionary of keys and the value that should be filtered
out. Must be all of them.
"""
for i in xrange( self.n ):
for key, value in filter_expr.items():
if self.tuples[i][key] != value:
break
else:
self.tuples[i] = None
self.n -= 1
def drop( self ):
"""
Drop this page.
"""
os.remove( self.id + '.page' )
del self
def scan( self ):
"""
Return tuples one at a time.
"""
for tup in self.tuples:
yield tup
def commit( self ):
"""
Commit the page to memory.
"""
data = { 'n' : self.n,
'id' : self.id,
'tuples' : self.tuples }
with open( self.id+'.page', 'w' ) as outfile:
outfile.write( json.dumps( data,
indent=4,
separators=(',', ' : ')))
@classmethod
def load( cls, filename ):
"""
Load a page from memory.
"""
assert filename.endswith('.page'), "Not a valid page file"
with open( filename, 'r' ) as infile:
data = ''.join( line.strip('\r\n') for line in infile )
data = json.loads( data )
data['tuples'] = map( tuple, data['tuples'])
return cls( data['tuples'], data['id'] )
class Table( object ):
"""
This represents a table in a database. It should store the names and types
of each column, in addition to the data. It will do this as a list of
tuples.
"""
def __init__( self, name, column_names, types, data=None, page_ids=[], pagestack_ids=[] ):
"""
Input the name of the table, name of the columns, types of the columns,
and the pages.
"""
self.name = name
self.column_names = column_names
self.column_name_map = {name: i for i, name in enumerate(column_names)}
self.types = types
self.display_limit = 20
self.page_ids = page_ids
self.pagestack_ids = pagestack_ids
if len( page_ids ) > 0:
self.pages = [ Page.load( i+'.page' ) for i in page_ids ]
else:
self.pages = []
self.pagestack = [ page for page in self.pages if page.id in pagestack_ids ]
self.data = data
if type(self.data) == list and len(self.data) > 0:
self.insert( self.data )
def __str__( self ):
"""
Return a string representation of the data.
"""
width = max( max( map( len, self.column_names ) ), 9 )
header = '| '.join( '{1:{0}} {2:3} '.format( width, cn, ct.__name__ )
for cn, ct in it.izip( self.column_names, self.types ) )
return header + '\n' + '-'*len(header) + '\n' + \
'\n'.join( '| '.join( '{1:{0}} '.format( width+4, str(t) ) for t in tup ) for i, tup in enumerate(self.scan()) if i < self.display_limit ) + '\n'
def scan( self ):
"""
Loading the data a bit a few pages at a time from memory.
"""
# If we have a generator representing operations on previous data, as
# opposed to raw data, then go through that generator one at a time.
if type(self.data) == type(GENERATOR):
for tup in self.data:
yield tup
# If we have raw data stored on pages, go through the pages one tuple
# at a time.
else:
for page in self.pages:
for tup in page.scan():
yield tup
def insert( self, data ):
"""
Insert tuples into the table. If a single tuple, then just insert that.
If a list of tuples, then insert all of the tuples into the table.
"""
# If inserting multiple tuples, recursively add them one at a time
# to reduce the amount of code I need to write.
if type(data) == list:
self.insert( data[0] )
if len(data) > 1:
self.insert( data[1:] )
return
# If no unfilled pages in the table, make a new page.
if len( self.pagestack ) == 0:
page_id = uuid.uuid4().hex
page = Page( id=page_id )
self.page_ids.append( page_id )
self.pagestack_ids.append( page_id )
self.pagestack.append( page )
self.pages.append( page )
# Otherwise select the top page.
else:
page = self.pagestack[-1]
# Ensure the tuple is a valid tuple to be inserted
for i, ct in it.izip( data, self.types ):
if type(i) != ct:
print "INSERTION ERROR: Cannot insert data of type" +\
" {} into column of type {}".format( type(i).__name__, ct.__name__ )
break
else:
page.insert( data )
# If the page is now full, remove it.
if sys.getsizeof( page.tuples ) > PAGE_LIMIT:
self.pagestack.pop()
self.pagestack_ids.pop()
def drop( self ):
"""
Drop this table by dropping all the pages.
"""
for page in self.pages:
page.drop()
os.remove( self.name + '.table' )
def _block_join( self, other, self_on, other_on ):
"""
Perform basic block join.
"""
tcolumns = map( self.column_name_map.__getitem__, self_on ) if self_on else []
ocolumns = map( other.column_name_map.__getitem__, other_on ) if other_on else []
# For each tuple in the other table
for x in self.scan():
# For each tuple in the other table
for y in other.scan():
# For each column we are joining on
for tcolumn, ocolumn in it.izip( tcolumns, ocolumns ):
# Make sure that the columns match between tuples
if x[tcolumn] != y[ocolumn]:
break
else:
yield x + y
def _hash_join( self, other, self_on, other_on ):
"""
Perform a hash join, where this table is the one the hash map is
made on.
"""
hash_map = {}
tcolumns = map( self.column_name_map.__getitem__, self_on ) if self_on else []
ocolumns = map( other.column_name_map.__getitem__, other_on ) if other_on else []
# Build a hash map on the self table
hash_col = tcolumns[0]
for tup in self.scan():
val = hash_map[ tup[hash_col] ]
if val in hash_map.keys():
hash_map[val].append( tup )
else:
hash_map[val] = [ tup ]
# Run all the tuples from the other table through the hash map
other_col = ocolumns[0]
for y in other.scan():
self_tups = hash_map[ tup[other_col] ]
# Go through all tuples which match
for x in self_tups:
for tcolumn, ocolumn in it.izip( tcolumns[1:], ocolumns[1:] ):
if x[tcolumn] != y[ocolumn]:
break
else:
yield x + y
def join( self, other_table, self_on=None, other_on=None, algorithm="block" ):
"""
Perform a table join on two tables. Specify the names of the columns
for the join to be on.
"""
this_cns = self.column_names
other_cns = other_table.column_names
new_table_names = [ "{}{}{}".format( self.name, '.' if self.name != '' else '', cn ) for cn in this_cns ] + \
[ "{}.{}".format( other_table.name, cn ) for cn in other_cns ]
new_table_types = self.types + other_table.types
join = self._block_join if algorithm == 'block' or not self_on else self._hash_join
return Table( "",
new_table_names,
new_table_types,
join( other_table, self_on, other_on ) )
def _projection( self, on ):
"""
Internal generator yielding tuples one at a time.
"""
indices = map( self.column_name_map.__getitem__, on )
for tup in self.scan():
yield tuple( tup[i] for i in indices )
def projection( self, on ):
"""
Return the table object removing some of the columns.
"""
if on == '*' or on == ['*']:
return self
indices = map( self.column_name_map.__getitem__, on )
column_names = [ self.column_names[i] for i in indices ]
types = [ self.types[i] for i in indices ]
return Table( "",
column_names,
types,
self._projection( on ) )
def _selection( self, filter_expr ):
"""
Internal generator yielding tuples one at a time.
"""
for tup in self.scan():
if filter_expr( tup, self.column_name_map ):
yield tup
def selection( self, filter_expr ):
"""
Return the table object removing some of the tuples.
"""
return Table( "",
self.column_names,
self.types,
self._selection( filter_expr ) )
def groupby( self, attribute, aggregate, aggregate_attribute=None ):
"""
Group by a particular attribute, given some aggregate. Aggregates include:
'MIN', 'MAX, 'SUM', 'AVG', 'COUNT'
"""
t = float if aggregate != 'COUNT' else int
summary = collections.defaultdict(t)
attr_column = self.column_name_map[attribute]
aggregate_attribute = aggregate_attribute or attribute
agg_column = self.column_name_map[aggregate_attribute]
n = 0.
for tup in self.scan():
attr_val = tup[attr_column]
agg_val = tup[agg_column]
if aggregate == 'COUNT':
summary[attr_val] += 1
elif aggregate == 'MIN':
if summary[attr_val] > agg_val:
summary[attr_val] = agg_val
elif aggregate == 'MAX':
if summary[attr_val] < agg_val:
summary[attr_val] = agg_val
elif aggregate == 'SUM' or aggregate == 'MEAN':
summary[attr_val] += agg_val
if aggregate == 'MEAN':
for key, val in summary.items():
summary[key] = val / n
column_names = [ attribute, "{}({})".format( aggregate, aggregate_attribute ) ]
data = [ ( a, b ) for a, b in summary.items() ]
return Table( "",
column_names,
[ self.types[attr_column], t ],
data )
def commit( self ):
"""
Commit the data to memory.
"""
for page in self.pages:
page.commit()
data = {
'name' : self.name,
'column_names' : self.column_names,
'types' : [ t.__name__ for t in self.types ],
'page_ids' : self.page_ids,
'pagestack_ids' : self.pagestack_ids,
}
with open( self.name+'.table', 'w' ) as outfile:
outfile.write( json.dumps( data,
indent=4,
separators=(',', ' : ')))
@classmethod
def load( cls, filename ):
"""
Load from a table metainformation file.
"""
assert filename.endswith( '.table' )
with open( filename, 'r' ) as infile:
data = ''.join( line.strip('\r\n') for line in infile )
data = json.loads( data )
data['types'] = [ TYPE_NAMES[t] for t in data['types'] ]
return cls( data['name'], data['column_names'], data['types'],
page_ids=data['page_ids'], pagestack_ids=data['pagestack_ids'] )
def to_csv( self, filename ):
"""
Write the data to a CSV file with headers indicating the name and type.
"""
names = self.column_names
types = self.types
with open( filename, 'w' ) as outfile:
# Make the headers which is a '{name} {type}' pair for each column
headers = ( "{} {}".format( n, t ) for n, t in zip( names, types ) )
# Write the headers out
outfile.write( ",".join( headers ) + "\n" )
# Now write out each tuple
for tup in self.data:
outfile.write( ",".join( map( str, tup ) ) + "\n" )
@classmethod
def from_csv( cls, filename ):
"""
Open a csv file with headers indicating the name and the type.
"""
# Get the name of the table from the filename
name = filename.split('\\')[-1].split('.')[0]
# Open the csv file
with open( filename, 'r' ) as infile:
# Get the names and types from
header = infile.readline().strip("\r\n").split(',')
header = [ title.split() for title in header ]
names = tuple([ x[0] for x in header ])
types = tuple([ TYPE_NAMES[x[1]] for x in header ])
data = []
for l in infile:
l = l.strip("\r\n").split(',')
l = [ cast(item) for cast, item in zip( types, l ) ]
data.append( tuple(l) )
return cls( name, names, types, data )
class User( object ):
"""
A user which can access this database.
"""
def __init__( self, username, password ):
self.username = username
self.password = password
def __str__( self ):
'''
JSON string representation of a user.
'''
return json.dumps( { 'username' : self.username,
'password' : self.password },
indent=4,
separators=(',', ' : ') )
def __repr__( self ):
return json.dumps( { 'username' : self.username,
'password' : self.password },
indent=4,
separators=(',', ' : ') )
class Database( object ):
"""
A database object, stores tables and must be 'connected' to.
"""
def __init__( self, name="", users=[], table_names=[] ):
self.name = name
self.users = users
self.table_names = table_names
self.table_map = {}
self.tables = []
self.fsm = SQLFSM()
def add_user( self, username, password ):
"""
Add a user which can access this database.
"""
user = User( username=encrypt(username),
password=encrypt(password))
self.users.append( user )
def add_table( self, name, column_names, types ):
"""
Add a table to the database.
"""
if name in self.table_map.keys():
raise Warning( "Table {} already in database.".format( name ) )
else:
table = Table( name, column_names, types )
self.table_map[name] = table
self.table_names.append( name )
self.tables.append( table )
def drop_table( self, name ):
"""
Drop a table from the database, removing all its pages as well.
"""
# Call the tables drop method, deleting it and its pages
self.table_map[name].drop()
# Now delete all the metadata associated with that table from the DB
self.table_names.remove( name )
self.tables.remove( self.table_map[name] )
del self.table_map[name]
self.commit()
def connect( self, username, password ):
"""
Attempt to connect to the database.
"""
for user in self.users:
if encrypt(username) == user.username and encrypt(password) == user.password:
self.tables = [ Table.load( name+'.table' ) for name in self.table_names ]
self.table_map = { name : table for name, table in zip( self.table_names, self.tables ) }
break
else:
raise Warning( "Username password/combination does not work." )
def execute( self, query ):
"""
Implement a lite SQL command. Parser is barely functional.
"""
fsm = self.fsm
# Process the query a bit
for item in 'GROUP BY', 'CREATE TABLE', 'INSERT INTO', 'DROP TABLE', 'HASH JOIN':
query = query.replace( item, item.replace(' ', '') )
query = query.split()
# Assign tags to each of the states
try:
tags = [ state.name for i, state in fsm.viterbi( query )[1] if not state.is_silent() ]
except:
return "INVALID QUERY"
# If commiting the database...
if tags[0] == 'commit':
self.commit()
return "DATABASE COMMIT"
# Handle insertions into the database gracefully.
elif tags[0] == 'insertinto':
table_name = None
values = []
str_values = ''
for q, tag in zip( query, tags ):
if tag == 'nkt':
table_name = q
elif tag == 'nkv':
str_values += q.replace('"', '').replace("'", "") + ' '
str_values = str_values.split(',')
for v in str_values:
try:
values.append( int(v) )
except:
try:
values.append( float(v) )
except:
values.append(v.strip(' '))
self.insert( table_name, tuple(values) )
return "DATABASE INSERT"
# If we're creating a table, we need to get the names of the columns
# and the types of those columns, as well as the name of the database
elif tags[0] == 'createtable':
table_name = None
values = ''
for q, tag in zip( query, tags ):
if tag == 'nkt':
table_name = q
elif tag == 'nkv':
values += q + ' '
# Get one long string which is all column names and types
values = values.replace(',', '').split()
column_names = values[::2]
types = map( TYPE_NAMES.__getitem__, values[1::2] )
# Use the internal method to add the table.
self.add_table( table_name, column_names, types )
return "ADD TABLE"
elif tags[0] == 'droptable':
for q, tag in zip( query, tags ):
if tag == 'nkt':
table_name = q
self.drop_table( table_name )
return "DROP TABLE"
# Go through the tags.
elif tags[0] == 'select':
# Initiate variables to store query data
column_names = []
table_names = []
where_clauses = []
groupby = None
limit = 20
join_attrs = []
join_type = 'block'
# Pull the information from the tagged query
for q, tag in zip( query, tags ):
if tag == 'nks':
column_names.append( q.replace(',', '') )
elif tag == 'nkf':
table_names.append( q.replace(',', '') )
elif tag == 'nkw':
where_clauses.append( q.replace(',', '') )
elif tag == 'nkgb':
groupby = q
elif tag == 'nkl':
limit = q
elif tag == 'hashjoin':
join_type = 'hash'
tables = map( self.table_map.__getitem__, table_names )
# Now join all the tables together. At first this may sound
# inefficient, but remember that nothing is being calculated,
# it's just a generator which is being built
t = tables[0]
for table in tables[1:]:
t = t.join( table, algorithm=join_type )
# Now convert the selection criteria into lambda expressions which
# can be evaluated by the table
for i, clause in enumerate( where_clauses ):
split_clause = None
for char in '>=', '<=', '=', '<', '>':
if char in clause:
split_clause = clause.split( char )
split_clause = [ split_clause[0], char, split_clause[1] ]
break
if len( table_names ) == 1:
table = self.table_map[table_names[0]]
else:
table = self.table_map[split_clause[0].split('.')[0]]
split_clause[0] = 'x[y["{}"]]'.format( split_clause[0] )
split_clause[1] = '==' if split_clause[1] == '=' else split_clause[1]
try:
int(split_clause[2])
func = eval( 'lambda x, y: {} {} {}'.format( *split_clause ) )
t = t.selection( func )
except:
split_clause[2] = 'x[y["{}"]]'.format( split_clause[2] )
func = eval( 'lambda x, y: {} {} {}'.format( *split_clause ) )
t = t.selection( func )
t.display_limit = limit
return t.projection( column_names )
def insert( self, table, tuples ):
"""
Insert tuples into the specified table.
"""
self.table_map[table].insert( tuples )
def close( self ):
"""
Close this database connection.
"""
self.commit()
del self
def commit( self ):
"""
Commit the database to a file. Commits all tables as well.
"""
for table in self.tables:
table.commit()
db_json = json.dumps( { 'name' : self.name,
'users' : [ user.__dict__ for user in self.users ],
'table_names' : self.table_names
},
indent=4,
separators = (',', ' : ') )
with open( self.name+'.db', 'w' ) as outfile:
outfile.write( db_json )
@classmethod
def load( cls, filename ):
"""
Load a database object from the file.
"""
assert filename.endswith('.db'), "Not a valid database file."
with open( filename, 'r' ) as infile:
db_json = ''.join( line.strip('\r\n') for line in infile )
db = json.loads( db_json )
db['users'] = [ User( d['username'], d['password'] ) for d in db['users'] ]
return cls( db['name'], db['users'], db['table_names'] )
def SQLFSM():
"""
Create and return a simple FSM for tagging parts of the query. I use a HMM
object, but the parameters I feed in make it a FSM.
"""
keywords = ( 'SELECT', 'GROUPBY', 'LIMIT', 'FROM', 'WHERE', 'CREATETABLE',
'CREATEVIEW', 'VALUES', 'INSERTINTO', '(', ')', 'AS', 'AND',
'CONNECTTO', 'DROPTABLE', 'HASHJOIN' )
not_keyword = lambda x: 0 if x not in keywords else float("-inf")
model = HiddenMarkovModel( "SQLParser" )
non_keyword_select = State( LambdaDistribution( not_keyword ), name='nks' )
non_keyword_from = State( LambdaDistribution( not_keyword ), name='nkf' )
non_keyword_where = State( LambdaDistribution( not_keyword ), name='nkw' )
non_keyword_groupby = State( LambdaDistribution( not_keyword ), name='nkgb' )
non_keyword_limit = State( LambdaDistribution( not_keyword ), name='nkl' )
non_keyword_table = State( LambdaDistribution( not_keyword ), name='nkt' )
non_keyword_table_cn = State( LambdaDistribution( not_keyword ), name='nkcn' )
non_keyword_table_ct = State( LambdaDistribution( not_keyword ), name='nkct' )
non_keyword_values = State( LambdaDistribution( not_keyword ), name='nkv' )
select = State( DiscreteDistribution( {'SELECT' : 1.0} ), name='select' )
froms = State( DiscreteDistribution( {'FROM' : 1.0 } ), name='from' )
where = State( DiscreteDistribution( {'WHERE' : 1.0 } ), name='where' )
groupby = State( DiscreteDistribution( {'GROUPBY' : 1.0 } ), name='groupby' )
limit = State( DiscreteDistribution( {'LIMIT' : 1.0 } ), name='limit' )
insertinto = State( DiscreteDistribution( {'INSERTINTO' : 1.0 } ), name='insertinto' )
createtable = State( DiscreteDistribution( {'CREATETABLE' : 1.0 } ), name='createtable' )
values = State( DiscreteDistribution( {'VALUES' : 1.0 } ), name='values' )
left_parens = State( DiscreteDistribution( {'(' : 1.0 } ), name='(' )
right_parens = State( DiscreteDistribution( {')' : 1.0 } ), name=')' )
and_state = State( DiscreteDistribution( {'AND' : 1.0 } ), name='and' )
commit = State( DiscreteDistribution( {'COMMIT' : 1.0 } ), name='commit' )
drop = State( DiscreteDistribution( {'DROPTABLE' : 1.0 } ), name='droptable' )
hashjoin = State( DiscreteDistribution( {'HASHJOIN' : 1.0} ), name='hashjoin' )
model.add_states([ non_keyword_select, non_keyword_from, non_keyword_table,
non_keyword_where, non_keyword_groupby, non_keyword_limit,
non_keyword_table, non_keyword_values,
select, froms, where, groupby, limit,
insertinto, createtable, values, left_parens, right_parens,
and_state, commit, drop, hashjoin ])
model.add_transition( model.start, select, 0.166 )
model.add_transition( select, non_keyword_select, 1.0 )
model.add_transition( non_keyword_select, non_keyword_select, 0.5 )
model.add_transition( non_keyword_select, froms, 0.5 )
model.add_transition( froms, non_keyword_from, 1.0 )
model.add_transition( non_keyword_from, non_keyword_from, 0.166 )
model.add_transition( non_keyword_from, where, 0.166 )
model.add_transition( non_keyword_from, groupby, 0.166 )
model.add_transition( non_keyword_from, limit, 0.166 )
model.add_transition( non_keyword_from, model.end, 0.166 )
model.add_transition( non_keyword_from, hashjoin, 0.17 )
model.add_transition( hashjoin, where, 0.25 )
model.add_transition( hashjoin, groupby, 0.25 )
model.add_transition( hashjoin, limit, 0.25 )
model.add_transition( hashjoin, model.end, 0.25 )
model.add_transition( where, non_keyword_where, 1.0 )
model.add_transition( non_keyword_where, non_keyword_where, 0.20 )
model.add_transition( non_keyword_where, groupby, 0.20 )
model.add_transition( non_keyword_where, limit, 0.20 )
model.add_transition( non_keyword_where, model.end, 0.20 )
model.add_transition( non_keyword_where, and_state, 0.20 )
model.add_transition( and_state, non_keyword_where, 1.0 )
model.add_transition( groupby, non_keyword_groupby, 1.0 )
model.add_transition( non_keyword_groupby, limit, 0.5 )
model.add_transition( non_keyword_groupby, model.end, 0.5 )
model.add_transition( limit, non_keyword_limit, 1.0)
model.add_transition( non_keyword_limit, model.end, 1.0 )
model.add_transition( model.start, insertinto, 0.166 )
model.add_transition( insertinto, non_keyword_table, 1.0 )
model.add_transition( non_keyword_table, left_parens, 0.33 )
model.add_transition( non_keyword_table, values, 0.33 )
model.add_transition( non_keyword_table, model.end, 0.34 )
model.add_transition( values, left_parens, 0.5 )
model.add_transition( left_parens, non_keyword_values, 1.0 )
model.add_transition( non_keyword_values, non_keyword_values, 0.5 )
model.add_transition( non_keyword_values, right_parens, 0.5 )
model.add_transition( right_parens, model.end, 1.0 )
model.add_transition( model.start, createtable, 0.166 )
model.add_transition( createtable, non_keyword_table, 1.0 )
model.add_transition( model.start, commit, 0.166 )
model.add_transition( commit, model.end, 1.0 )
model.add_transition( model.start, drop, 1.7 )
model.add_transition( drop, non_keyword_table, 1.0 )
model.bake()
return model
def connect( database, username, password ):
"""
Connect to a database, given a username and password.
"""
db = Database.load( database )
try:
db.connect( username, password )
except:
raise Warning( "Incorrect username/password combination." )
else:
return db
if __name__ == '__main__':
# If this is called directly, create a terminal.
_, db_name, user, pwd = sys.argv
db = connect( db_name, user, pwd )
# Try to connect to the database
try:
db = connect( db_name, user, pwd )
except:
print "FATAL ERROR: Incorrect username or password or database."
sys.exit()
db_name = db_name.strip('.db')
# Do command line stuff
while True:
query = ''
i = True
while not query.endswith( '; ' ):
query += raw_input( "{}{}# ".format( db_name, '=' if i else '-' ) ).strip(' ') + ' '
i = False
if query.lower() in ('exit; ', 'quit; '):
break
print db.execute( query[:-2] )
'''
try:
result = db.execute( query[:-2] )
print result
except Exception:
print "INVALID QUERY"
'''
|
|
# from django.db import models
# from django.utils.timezone import localtime
from datetime import datetime
from dateutil.relativedelta import relativedelta
from django import forms
from django.contrib.gis.db import models
from django.contrib.postgres import fields as pgmodels
from relativedeltafield import RelativeDeltaField
from apps.utils.time import UTC_P0100, format_delta
INTERVALS = {
"years": 29030400, # 60 * 60 * 24 * 7 * 4 * 12
"months": 2419200, # 60 * 60 * 24 * 7 * 4
"weeks": 604800, # 60 * 60 * 24 * 7
"days": 86400, # 60 * 60 * 24
"hours": 3600, # 60 * 60
"minutes": 60,
"seconds": 1,
}
def relative_delta_to_total_seconds(rel_delta):
years = rel_delta.years
months = rel_delta.months
days = rel_delta.days
hours = rel_delta.hours
minutes = rel_delta.minutes
seconds = rel_delta.seconds
total_seconds = years * INTERVALS["years"]
total_seconds += months * INTERVALS["months"]
total_seconds += days * INTERVALS["days"]
total_seconds += hours * INTERVALS["hours"]
total_seconds += minutes * INTERVALS["minutes"]
total_seconds += seconds * INTERVALS["seconds"]
return total_seconds
time_series_default_zero = datetime(2000, 1, 1, 1, 00, 00)
time_series_default_zero = time_series_default_zero.replace(tzinfo=UTC_P0100)
def default_relative_delta_zero():
return relativedelta(0)
def default_relative_delta_hour():
return relativedelta(hours=1)
class TimeSlots(models.Model):
name_id = models.CharField(
help_text="Unique and computer-friendly name of time_slots",
max_length=100,
unique=True,
)
name = models.CharField(
help_text="Human-readable name of the time_slots.",
max_length=50
)
zero = models.DateTimeField(
null=False,
default=time_series_default_zero
)
frequency = RelativeDeltaField(
null=False,
default=default_relative_delta_hour
)
range_from = RelativeDeltaField(
null=False,
default=default_relative_delta_zero
)
range_to = RelativeDeltaField(
null=False,
default=default_relative_delta_hour
)
def clean(self):
if self.frequency is None:
raise forms.ValidationError('frequency cannot be null')
if self.range_from is None:
raise forms.ValidationError('range_from cannot be null')
if self.range_to is None:
raise forms.ValidationError('range_to cannot be null')
if relative_delta_to_total_seconds(self.frequency) <= 0:
raise forms.ValidationError('frequency must be positive interval ')
if relative_delta_to_total_seconds(self.range_to) <= relative_delta_to_total_seconds(self.range_from):
raise forms.ValidationError('range_to must be greater than range_from')
def __str__(self):
return self.name
class Topic(models.Model):
"""Process used to generate the result, e.g. measurement or
hourly average."""
name_id = models.CharField(
help_text="Unique and computer-friendly name of the topic. eg. ('drought')",
max_length=100,
unique=True,
)
name = models.CharField(
help_text="Human-readable name of the topic.",
max_length=50
)
class Meta:
ordering = ['name']
verbose_name_plural = "topics"
def __str__(self):
return self.name
class Process(models.Model):
"""Process used to generate the result, e.g. measurement or
hourly average."""
name_id = models.CharField(
help_text="Unique and computer-friendly name of the process. eg. ('measure' or 'avg_hour')",
max_length=100,
unique=True,
editable=False
)
name = models.CharField(
help_text="Human-readable name of the process.",
max_length=50
)
class Meta:
ordering = ['name']
verbose_name_plural = "processes"
def __str__(self):
return self.name
class Property(models.Model):
"""Physical phenomenon related to weather, e.g. air temperature."""
name_id = models.CharField(
help_text="Unique and computer-friendly name of the property. eg. ('precipitation' or 'air_temperature')",
max_length=30,
unique=True,
editable=False
)
name = models.CharField(
help_text="Human-readable name of the property.",
max_length=30
)
unit = models.CharField(
help_text="Unit of observations (physical unit). Same for all "
"observations of the property.",
max_length=30
)
default_mean = models.ForeignKey(
Process,
null=True,
help_text="Process aggregation used to calculate the result.",
related_name="%(app_label)s_%(class)s_related",
editable=False,
on_delete=models.DO_NOTHING,
)
class Meta:
ordering = ['name']
verbose_name_plural = "properties"
def __str__(self):
return self.name
class AbstractFeature(models.Model):
"""Place where the observation were collected - mostly point feature like weather station."""
id_by_provider = models.CharField(
help_text="ID of the station used by provider.",
max_length=50,
editable=False
)
name = models.CharField(
help_text="Human-readable name of the station.",
max_length=50
)
# https://docs.djangoproject.com/en/1.10/topics/db/models/#field-name-hiding-is-not-permitted
# This field is supposed to be overridden in subclass if needed.
# By default it is set to point geometry (PointField) but could be either changed on other type (LineField)
# or completely removed (set to None)
# https://docs.djangoproject.com/en/1.11/ref/contrib/gis/model-api/#pointfield
#
# Spatial fields defaults to srid=4326 (WGS84)
geometry = models.PointField(
help_text="Spatial information about feature."
)
class Meta:
abstract = True
ordering = ['name']
def __str__(self):
return self.name
class AbstractObservation(models.Model):
"""An observation is an act associated with a discrete time instant or
period through which a quantity is assigned to a phenomenon (Property).
It involves application of a specified procedure (Process), such as a
sensor measurement or algorithm processing (e.g. hourly average)."""
phenomenon_time_range = pgmodels.DateTimeRangeField(
help_text="Datetime range when the observation was captured.",
)
def phenomenon_time_from(self):
return self.phenomenon_time_range.lower
phenomenon_time_from.admin_order_field = 'phenomenon_time_range'
@property
def phenomenon_time_duration(self):
delta = self.phenomenon_time_range.upper - self.phenomenon_time_range.lower
return delta
@property
def phenomenon_time_duration_for_human(self):
return format_delta(self.phenomenon_time_duration)
phenomenon_time_duration_for_human.fget.short_description = "Phenomenon time duration"
# phenomenon_time_to = models.DateTimeField(
# help_text="End of the observation. If the observation was instant, "
# "it is the same time as phenomenon_time.",
# editable=False
# )
observed_property = models.ForeignKey(
Property,
help_text="Phenomenon that was observed, e.g. air temperature.",
related_name="%(app_label)s_%(class)s_related",
editable=False,
on_delete=models.DO_NOTHING,
)
# NOTE: This field has to be overridden in child classes!
# It needs to reference proper ForeignKey (Concrete Feature inherited from AbstractFeature)
feature_of_interest = models.ForeignKey(
AbstractFeature,
help_text="Weather station where the observation was taken.",
related_name="%(app_label)s_%(class)s_related",
editable=False,
on_delete=models.DO_NOTHING,
)
procedure = models.ForeignKey(
Process,
help_text="Process used to generate the result, e.g. measurement or "
"average.",
related_name="%(app_label)s_%(class)s_related",
editable=False,
on_delete=models.DO_NOTHING,
)
related_observations = models.ManyToManyField(
'self',
help_text="Measured observations that were used to generate average "
"observation, or vice versa.",
editable=False,
)
result = models.DecimalField(
help_text="Numerical value of the measured phenomenon in units "
"specified by Process.",
max_digits=8,
decimal_places=3,
null=True,
editable=False,
)
time_slots = models.ForeignKey(
TimeSlots,
help_text="Time_slots used to calc aggregations",
null=True,
default=None,
on_delete=models.DO_NOTHING,
related_name="%(app_label)s_%(class)s_related",
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
@property
def result_for_human(self):
if self.result is not None:
res_str = "{}".format(self.result)
else:
reason = self.result_null_reason
res_str = 'unknown because of ' + reason
return res_str
result_for_human.fget.short_description = 'Result'
result_null_reason = models.CharField(
help_text="Reason why result is null.",
max_length=100,
default='',
)
class Meta:
abstract = True
get_latest_by = 'phenomenon_time_range'
ordering = ['-phenomenon_time_range', 'feature_of_interest', 'procedure',
'observed_property']
unique_together = (('phenomenon_time_range',
'observed_property', 'feature_of_interest',
'procedure'),)
|
|
#!/usr/bin/env python
#
# Copyright 2014 Knowledge Economy Developments Ltd
# Copyright 2014 - 2016 David Wells
#
# Henry Gomersall
# [email protected]
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import division
import itertools as it
import random as rand
import unittest
import numpy
import pyfftw
from pyfftw import _supported_types
from .test_pyfftw_base import run_test_suites
discrete_sine_directions = ['FFTW_RODFT00', 'FFTW_RODFT01', 'FFTW_RODFT10',
'FFTW_RODFT11']
discrete_cosine_directions = ['FFTW_REDFT00', 'FFTW_REDFT01', 'FFTW_REDFT10',
'FFTW_REDFT11']
real_transforms = discrete_sine_directions + discrete_cosine_directions
normalisation_lookup = {
'FFTW_RODFT00': lambda n: 2*(n + 1),
'FFTW_RODFT01': lambda n: 2*n,
'FFTW_RODFT10': lambda n: 2*n,
'FFTW_RODFT11': lambda n: 2*n,
'FFTW_REDFT00': lambda n: 2*(n - 1),
'FFTW_REDFT01': lambda n: 2*n,
'FFTW_REDFT10': lambda n: 2*n,
'FFTW_REDFT11': lambda n: 2*n,
}
inverse_lookup = {
'FFTW_RODFT00': 'FFTW_RODFT00',
'FFTW_RODFT01': 'FFTW_RODFT10',
'FFTW_RODFT10': 'FFTW_RODFT01',
'FFTW_RODFT11': 'FFTW_RODFT11',
'FFTW_REDFT00': 'FFTW_REDFT00',
'FFTW_REDFT01': 'FFTW_REDFT10',
'FFTW_REDFT10': 'FFTW_REDFT01',
'FFTW_REDFT11': 'FFTW_REDFT11',
}
interpolated_function_lookup = {
'FFTW_RODFT00': lambda k, x: numpy.sin(numpy.pi*(k + 1)*x),
'FFTW_RODFT01': lambda k, x: numpy.sin(numpy.pi*(k + 0.5)*x),
'FFTW_RODFT10': lambda k, x: numpy.sin(numpy.pi*(k + 1)*x),
'FFTW_RODFT11': lambda k, x: numpy.sin(numpy.pi*(k + 0.5)*x),
'FFTW_REDFT00': lambda k, x: numpy.cos(numpy.pi*k*x),
'FFTW_REDFT10': lambda k, x: numpy.cos(numpy.pi*k*x),
'FFTW_REDFT01': lambda k, x: numpy.cos(numpy.pi*(k + 0.5)*x),
'FFTW_REDFT11': lambda k, x: numpy.cos(numpy.pi*(k + 0.5)*x),
}
nodes_lookup = {
'FFTW_RODFT00': lambda n: numpy.arange(n + 2)[1:-1]/(n + 1),
'FFTW_RODFT01': lambda n: numpy.arange(1, n + 1)/n,
'FFTW_RODFT10': lambda n: (numpy.arange(n) + 0.5)/n,
'FFTW_RODFT11': lambda n: (numpy.arange(n) + 0.5)/n,
'FFTW_REDFT00': lambda n: numpy.arange(n)/(n - 1),
'FFTW_REDFT10': lambda n: (numpy.arange(n) + 0.5)/n,
'FFTW_REDFT01': lambda n: numpy.arange(n)/n,
'FFTW_REDFT11': lambda n: (numpy.arange(n) + 0.5)/n,
}
@unittest.skipIf('64' not in _supported_types, 'double precision unavailable')
class TestRealToRealLookups(unittest.TestCase):
'''Test that the lookup tables correctly pair node choices and
function choices for using the DCT/DST as interpolators.
'''
def test_lookups(self):
n = rand.randint(10, 20)
j = rand.randint(5, n) - 3
for transform in real_transforms:
nodes = nodes_lookup[transform](n)
data = interpolated_function_lookup[transform](j, nodes)
output = numpy.empty_like(data)
plan = pyfftw.FFTW(data, output, direction=[transform])
data[:] = interpolated_function_lookup[transform](j, nodes)
plan.execute()
tol = 4*j*n*1e-16
if transform == 'FFTW_RODFT00':
self.assertTrue(abs(output[j] - n - 1) < tol)
elif transform == 'FFTW_REDFT00':
self.assertTrue(abs(output[j] - n + 1) < tol)
else:
self.assertTrue(abs(output[j] - n) < tol)
class TestRealTransform(object):
'''Common set of functionality for performing tests on the real to
real transforms. This is not implemented as a distinct test class
(inheriting from unittest.TestCase) because its `__init__` method
takes multiple arguments as input which set up the size and
directions of the transform.
'''
def __init__(self, directions=['FFTW_REDFT00'], dims=(16, ), axes=None,
noncontiguous=True, dtype=None):
"""
Arguments:
- `directions`: List of abbreviated directions, like 'O11' or 'E01'.
- `dims`: Shape of the data.
- `axes`: Axes on which to take the transformation. Defaults to the
number of directions.
"""
if axes is None:
self.axes = tuple(range(len(directions)))
else:
self.axes = axes
for dim in dims:
if dim < 3:
raise NotImplementedError("Due to complications with the DCT1, "
"arrays must be of length at least "
"three.")
if len(self.axes) != len(directions):
raise ValueError("There must be exactly one axis per direction.")
self.directions = directions
self.inverse_directions = [inverse_lookup[direction]
for direction in directions]
self.dims = dims
self._normalisation_factor = 1.0
for index, axis in enumerate(self.axes):
dim = self.dims[axis]
direction = self.directions[index]
self._normalisation_factor *= normalisation_lookup[direction](dim)
if noncontiguous:
self._input_array = empty_noncontiguous(dims)
self._output_array = empty_noncontiguous(dims)
else:
self._input_array = numpy.zeros(dims)
self._output_array = numpy.zeros(dims)
self.plan = pyfftw.FFTW(self._input_array, self._output_array,
axes=self.axes, direction=self.directions)
self.inverse_plan = pyfftw.FFTW(self._input_array, self._output_array,
axes=self.axes, direction=self.inverse_directions)
self.tol = 1e-10
if dtype is None:
if '64' in _supported_types:
dtype = numpy.float64
elif '32' in _supported_types:
dtype = numpy.float32
self.tol = 1e-5
elif 'ld' in _supported_types:
dtype = numpy.longdouble
self.tol = 1e-14
self.dtype = dtype
def test_normalisation(self):
return self._normalisation_factor == float(self.plan._get_N())
def test_against_random_data(self):
data = numpy.random.rand(*self.dims).astype(self.dtype, copy=False)
self._input_array[:] = data
self.plan.execute()
self._input_array[:] = self._output_array[:]
self.inverse_plan.execute()
data *= self._normalisation_factor
err = numpy.mean(numpy.abs(data - self._output_array))/self._normalisation_factor
return err < self.tol
def test_against_exact_data(self):
points = grid(self.dims, self.axes, self.directions)
data = numpy.ones_like(points[0], dtype=self.dtype)
wavenumbers = list()
factors = list()
for index, axis in enumerate(self.axes):
# Simplification: don't test constant terms. They are weird.
if self.directions[index] in discrete_cosine_directions:
wavenumber_min = 1
wavenumber_max = self.dims[axis] - 2
else:
wavenumber_min = 0
wavenumber_max = self.dims[axis] - 2
_wavenumbers = sorted({rand.randint(wavenumber_min, wavenumber_max)
for _ in range(self.dims[axis])})
_factors = [rand.randint(1, 8) for _ in _wavenumbers]
interpolated_function = interpolated_function_lookup[
self.directions[index]]
data *= sum((factor*interpolated_function(wavenumber, points[axis])
for factor, wavenumber in zip(_factors, _wavenumbers)))
wavenumbers.append(numpy.array(_wavenumbers))
factors.append(numpy.array(_factors))
self._input_array[:] = data
self.plan.execute()
# zero all of the entries that do not correspond to a wavenumber.
exact_coefficients = numpy.ones(data.shape)
for index, axis in enumerate(self.axes):
dim = self.dims[axis]
sp = list(it.repeat(slice(None), len(data.shape)))
zero_indicies = (numpy.array(list(set(numpy.arange(0, dim)) -
set(wavenumbers[index]))))
if len(zero_indicies) == 0:
pass
else:
sp[axis] = zero_indicies
mask = numpy.ones(data.shape)
mask[tuple(sp)] = 0.0
exact_coefficients *= mask
# create the 'known' array of interpolation coefficients.
normalisation = self.plan.N/(2**len(self.axes))
for index, axis in enumerate(self.axes):
for factor, wavenumber in zip(factors[index], wavenumbers[index]):
sp = list(it.repeat(slice(None), len(data.shape)))
sp[axis] = wavenumber
exact_coefficients[tuple(sp)] *= factor
error = numpy.mean(numpy.abs(self._output_array/normalisation -
exact_coefficients))
return error < self.tol
def meshgrid(*x):
if len(x) == 1:
# necessary for one-dimensional case to work correctly. x is a
# tuple due to the * operator.
return x
else:
args = numpy.atleast_1d(*x)
s0 = (1,)*len(args)
return list(map(numpy.squeeze,
numpy.broadcast_arrays(*[x.reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(args)])))
def grid(shape, axes, directions, aspect_ratio=None):
grids = [numpy.linspace(1, 2, dim) for dim in shape]
for index, (axis, direction) in enumerate(zip(axes, directions)):
grids[axis] = nodes_lookup[direction](shape[axes[index]])
return numpy.array(meshgrid(*grids))
def empty_noncontiguous(shape):
'''Create a non-contiguous empty array with shape `shape`.
'''
offsets = lambda s: [rand.randint(0, 3) for _ in s]
strides = lambda s: [rand.randint(1, 3) for _ in s]
parent_left_offsets = offsets(shape)
parent_right_offsets = offsets(shape)
parent_strides = strides(shape)
parent_shape = list()
child_slice = list()
for index, length in enumerate(shape):
left_offset = parent_left_offsets[index]
right_offset = parent_right_offsets[index]
stride = parent_strides[index]
parent_shape.append(left_offset + stride*length + right_offset)
if right_offset == 0:
child_slice.append(slice(left_offset, None, stride))
else:
child_slice.append(slice(left_offset, -1*right_offset, stride))
child = numpy.empty(parent_shape)[tuple(child_slice)]
if list(child.shape) != list(shape):
raise ValueError("The shape of the noncontiguous array is incorrect."
" This is a bug.")
return child
def random_testcase():
ndims = rand.randint(1, 5)
axes = list()
directions = list()
dims = list()
for dim in range(ndims):
if ndims > 3:
dims.append(rand.randint(3, 10))
else:
dims.append(rand.randint(3, 100))
# throw out some dimensions randomly
if rand.choice([True, True, False]):
directions.append(rand.choice(real_transforms))
axes.append(dim)
if len(axes) == 0:
# reroll.
return random_testcase()
else:
return TestRealTransform(directions, dims, axes=axes)
@unittest.skipIf('64' not in _supported_types, 'double precision unavailable')
class RealToRealNormalisation(unittest.TestCase):
def test_normalisation(self):
for _ in range(50):
testcase = random_testcase()
self.assertTrue(testcase.test_normalisation())
@unittest.skipIf('64' not in _supported_types, 'double precision unavailable')
class RealToRealExactData(unittest.TestCase):
def test_exact_data(self):
for _ in range(50):
testcase = random_testcase()
self.assertTrue(testcase.test_against_exact_data())
@unittest.skipIf('64' not in _supported_types, 'double precision unavailable')
class RealToRealRandomData(unittest.TestCase):
def test_random_data(self):
for _ in range(50):
testcase = random_testcase()
self.assertTrue(testcase.test_against_random_data())
test_cases = (TestRealToRealLookups,
RealToRealNormalisation,
RealToRealExactData,
RealToRealRandomData,)
if __name__ == '__main__':
run_test_suites(test_cases)
|
|
"""
This script is the first thing that runs when a Kolibri container starts and
receives as args the Kolibri command CMD, e.g., ['kolibri', 'start', '--foreground']
The purpose of this script is to perform optional 'setup tasks' before starting Kolibri.
The following environment variables are used for setup steps:
- set KOLIBRI_PEX_URL to 'default' or something like http://host.org/nameof.pex
- set DOCKERMNT_PEX_PATH to ``/docker/mnt/nameof.pex`` will run the from ``./docker/mnt/``
- KOLIBRI_PROVISIONDEVICE_FACILITY if set, provision facility with this name
- CHANNELS_TO_IMPORT if set, comma separated list of channel IDs to import
"""
import logging
import os
import re
import subprocess
import sys
# py2+py3 compatible imports via http://python-future.org/compatible_idioms.html
try:
from urllib.request import Request, build_opener, HTTPRedirectHandler
except ImportError:
from urllib2 import Request, HTTPRedirectHandler, build_opener
logging.basicConfig(level=logging.INFO)
# SETTINGS
################################################################################
DEFAULT_KOLIBRI_PEX_URL = "https://learningequality.org/r/kolibri-pex-latest"
# ENV VARIABLES
################################################################################
# - KOLIBRI_PEX_URL set to 'default' or something like http://host.org/nameof.pex
# - DOCKERMNT_PEX_PATH`` to something like ``/docker/mnt/nameof.pex``
# - DEPLOY_TYPE in ['pex', 'source'] This will be detemined automatically based
# on the presence of ENV vars KOLIBRI_PEX_URL and DOCKERMNT_PEX_PATH.
# - KOLIBRI_PROVISIONDEVICE_FACILITY if set, provision facility with this name
# - CHANNELS_TO_IMPORT if set, comma separated list of channel IDs to import
DEFAULT_ENVS = {
"WHICH_PYTHON": "python2", # or python3 if you prefer; Kolibri don't care
"KOLIBRI_HOME": "/kolibrihome",
"KOLIBRI_HTTP_PORT": "8080",
"KOLIBRI_LANG": "en",
"KOLIBRI_RUN_MODE": "demoserver",
"KOLIBRI_PROVISIONDEVICE_PRESET": "formal", # other options are 'nonformal', 'informal'
"KOLIBRI_PROVISIONDEVICE_SUPERUSERNAME": "devowner",
"KOLIBRI_PROVISIONDEVICE_SUPERUSERPASSWORD": "admin123",
}
def set_default_envs():
"""
Set default values for ENV variables and infer DEPLOY_TYPE.
"""
envs = os.environ
for key in DEFAULT_ENVS:
env = os.getenv(key, None)
if env is None:
envs[key] = DEFAULT_ENVS[key]
# Logic to detemine DEPLOY_TYPE and KOLIBRI_PEX_PATH when using pex deploy
############################################################################
# Check for edge case when both URL and BUILDPATH specified
if "KOLIBRI_PEX_URL" in envs and "DOCKERMNT_PEX_PATH" in envs:
logging.warning("Using DOCKERMNT_PEX_PATH and ignoring KOLIBRI_PEX_URL.")
del envs["KOLIBRI_PEX_URL"]
# CASE A: Running the pex at KOLIBRI_PEX_URL
if "KOLIBRI_PEX_URL" in envs and "DOCKERMNT_PEX_PATH" not in envs:
if envs["KOLIBRI_PEX_URL"] == "default":
envs["KOLIBRI_PEX_URL"] = DEFAULT_KOLIBRI_PEX_URL
pex_name = "kolibri-latest.pex"
else:
pex_name = os.path.basename(
envs["KOLIBRI_PEX_URL"].split("?")[0]
) # in case ?querystr...
envs["DEPLOY_TYPE"] = "pex"
envs["KOLIBRI_PEX_PATH"] = os.path.join(envs["KOLIBRI_HOME"], pex_name)
# CASE B: Running the pex from the /docker/mnt volume
elif "DOCKERMNT_PEX_PATH" in envs and "KOLIBRI_PEX_URL" not in envs:
pex_name = os.path.basename(envs["DOCKERMNT_PEX_PATH"])
envs["DEPLOY_TYPE"] = "pex"
envs["KOLIBRI_PEX_PATH"] = os.path.join(envs["KOLIBRI_HOME"], pex_name)
# CASE C: If no PEX url is spefified, we'll run kolibri from source code
else:
envs["DEPLOY_TYPE"] = "source"
# FACILITY CREATION
################################################################################
def get_kolibri_version(kolibri_cmd):
"""
Calls `kolibri_cmd` (list) to extract version information.
The parameter `kolibri_cmd` can be either a kolibri command e.g. ['kolibri']
or a Kolibri pex invocation like ['python', 'some.pex'].
Returns tuple of ints (major, minor), or (None, None) if verison check fails.
"""
MAJOR_MINOR_PAT = re.compile(r"^(?P<major>\d+)\.(?P<minor>\d+)(\.\d+)?.*")
cmd = kolibri_cmd[:] + ["--version"]
logging.info("Calling cmd {} to get the Kolibri version information.".format(cmd))
cmd_str = " ".join(cmd)
proc = subprocess.Popen(cmd_str, stdout=subprocess.PIPE, shell=True)
line = proc.stdout.readline().decode("utf-8")
m = MAJOR_MINOR_PAT.search(line)
if m:
major, minor = m.groupdict()["major"], m.groupdict()["minor"]
return int(major), int(minor)
return None, None
def create_facility(kolibri_cmd):
"""
Create the facility so users don't have to go through setup wizard.
We must use different appraoch based on Kolibri version:
- Kolibri versions in range [0, 0.9) --> SKIP
- Kolibri versions in range [0.9, + --> provisiondevice
"""
logging.info("Running create_facility")
major, minor = get_kolibri_version(kolibri_cmd)
if major is None or minor is None:
logging.warning("Failed to retrieve Kolibri version. Skipping.")
return
if major >= 1:
provisiondevice(kolibri_cmd)
if major == 0:
if minor >= 9:
provisiondevice(kolibri_cmd)
else:
logging.info("Skipping automated facility creation step.")
def provisiondevice(kolibri_cmd):
envs = os.environ
logging.info(
">" * 80
+ "\n"
+ "Provisioning device in facility {}".format(
envs["KOLIBRI_PROVISIONDEVICE_FACILITY"]
)
)
cmd = kolibri_cmd[:]
cmd += ["manage", "provisiondevice"]
cmd += ['--facility "{}"'.format(envs["KOLIBRI_PROVISIONDEVICE_FACILITY"])]
cmd += ["--preset {}".format(envs["KOLIBRI_PROVISIONDEVICE_PRESET"])]
cmd += ["--superusername {}".format(envs["KOLIBRI_PROVISIONDEVICE_SUPERUSERNAME"])]
cmd += [
"--superuserpassword {}".format(
envs["KOLIBRI_PROVISIONDEVICE_SUPERUSERPASSWORD"]
)
]
cmd += ["--language_id {}".format(envs["KOLIBRI_LANG"])]
cmd += ["--verbosity 0"]
cmd += ["--noinput"]
logging.debug("Provision command = {}".format(" ".join(cmd)))
cmd_str = " ".join(cmd)
subprocess.call(cmd_str, shell=True)
# OTHER SETUP TASKS
################################################################################
def import_channels(kolibri_cmd):
"""
Import the channels in comma-separeted string `KOLIBRI_CHANNELS_TO_IMPORT`.
"""
logging.info(">" * 80 + "\n" + "Importing content channels...")
envs = os.environ
assert "KOLIBRI_CHANNELS_TO_IMPORT" in envs, "no KOLIBRI_CHANNELS_TO_IMPORT"
channels_list_str = envs["KOLIBRI_CHANNELS_TO_IMPORT"]
channel_ids_to_import = map(str.strip, channels_list_str.split(","))
importchannel_cmd = kolibri_cmd[:] + ["manage", "importchannel", "network"]
importcontent_cmd = kolibri_cmd[:] + ["manage", "importcontent", "network"]
for channel_id in channel_ids_to_import:
importchannel_cmd_str = " ".join(importchannel_cmd + [channel_id])
subprocess.call(importchannel_cmd_str, shell=True)
importcontent_cmd_str = " ".join(importcontent_cmd + [channel_id])
subprocess.call(importcontent_cmd_str, shell=True)
# PEX DEPLOY
################################################################################
class SmartRedirectHandler(HTTPRedirectHandler):
"""
Helper to handle redirects (don't want to use `requests`; rely only stdlib).
"""
def http_error_301(self, req, fp, code, msg, headers):
result = HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
result.status = code
return result
def copy_pex_file_to_kolibrihome():
"""
Handles both the case when we get pex from a URL and from a local path.
"""
envs = os.environ
pex_path = envs["KOLIBRI_PEX_PATH"]
if "KOLIBRI_PEX_URL" in envs and "DOCKERMNT_PEX_PATH" not in envs:
logging.info("Downloading pex from {}".format(envs["KOLIBRI_PEX_URL"]))
request = Request(
envs["KOLIBRI_PEX_URL"], headers={"User-Agent": "Mozilla/5.0"}
)
opener = build_opener(SmartRedirectHandler())
pex_response = opener.open(request)
with open(pex_path, "wb") as pex_file:
pex_file.write(pex_response.read())
elif "DOCKERMNT_PEX_PATH" in envs and "KOLIBRI_PEX_URL" not in envs:
logging.info("Copying pex from {}".format(envs["DOCKERMNT_PEX_PATH"]))
with open(envs["DOCKERMNT_PEX_PATH"], "rb") as dockermnt_pex:
with open(pex_path, "wb") as pex_file:
pex_file.write(dockermnt_pex.read())
logging.info("Pex file saved to {}".format(pex_path))
# MAIN LOGIC
################################################################################
def set_default_language(kolibri_cmd):
"""
Set the default language for this installation of Kolibri. Any running
instance of Kolibri needs to be restarted in order for this change to work.
"""
envs = os.environ
# Depends on vars: KOLIBRI_HOME and DJANGO_SETTINGS_MODULE
cmd = kolibri_cmd[:] + ["language", "setdefault", envs["KOLIBRI_LANG"]]
cmd_str = " ".join(cmd)
subprocess.call(cmd_str, shell=True)
def run_kolibri(cmd):
logging.info("Starting Kolibri using command {}".format(" ".join(cmd)))
os.chdir("/kolibri") # in case we're running from source and calling devserver
cmd_str = " ".join(cmd)
# Depends on vars: KOLIBRI_HOME, KOLIBRI_HTTP_PORT, and DJANGO_SETTINGS_MODULE
subprocess.call(cmd_str, shell=True)
# This results in pstree: init --> /docker/entrypoint.py --> sh --> kolibri
# the extra sh-intemediary is because yarn needs to read ENV variables
#
# The option of running kolibri as PID 1, i.e. process tree init --> kolibri
# does not work because kolibri (like all django servers) does not register
# an explicit handler for ^C so killing container is harder (needs kill -9)
def get_kolibri_cmd(CMD):
"""
Returns the appropriate Kolibri invocation for the current DEPLOY_TYPE.
"""
envs = os.environ
deploy_type = envs["DEPLOY_TYPE"]
if deploy_type == "pex":
python_cmd = envs["WHICH_PYTHON"]
pex_path = envs["KOLIBRI_PEX_PATH"]
kolibri_cmd = [python_cmd, pex_path]
elif deploy_type == "source":
kolibri_cmd = ["kolibri"]
return kolibri_cmd
if __name__ == "__main__":
set_default_envs()
envs = os.environ
if envs["DEPLOY_TYPE"] == "pex":
copy_pex_file_to_kolibrihome()
CMD = sys.argv[1:] # get the docker CMD passed in, striping out entrypoint
kolibri_cmd = get_kolibri_cmd(CMD)
#
# KOLIBRI SETUP AUTOMATION OPTIONAL TASKS ##################################
if "KOLIBRI_PROVISIONDEVICE_FACILITY" in envs:
create_facility(kolibri_cmd)
if "KOLIBRI_CHANNELS_TO_IMPORT" in envs:
import_channels(kolibri_cmd)
set_default_language(kolibri_cmd)
# TODO: generateuserdata?
# TODO: load entire /kolibrihome?
# TODO: load fixtures --- loaddata json and/or SQL?
#
# ASSUMPTION: first element of CMD is always specified as ['kolibri', ...]
# even when we want to run a pex file, so in that case we need to edit CMD:
if len(kolibri_cmd) > 1 and CMD[0] == "kolibri":
# replace ['kolibri' with ['python', '/path/to/some.pex' if needed
run_cmd = kolibri_cmd + CMD[1:]
else:
# otherwise send CMD straight through like a good docker entryptoint...
run_cmd = CMD[:]
# Do it!
run_kolibri(run_cmd)
|
|
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', '[email protected]'), ('Full Name', '[email protected]'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = (
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-au', gettext_noop('Australian English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-hans', gettext_noop('Simplified Chinese')),
('zh-hant', gettext_noop('Traditional Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa", "ur")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
# Settings for language cookie
LANGUAGE_COOKIE_NAME = 'django_language'
LANGUAGE_COOKIE_AGE = None
LANGUAGE_COOKIE_DOMAIN = None
LANGUAGE_COOKIE_PATH = '/'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link emails. Deprecated, must be removed in 1.8.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
EMAIL_USE_SSL = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = (
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# )
IGNORABLE_404_URLS = ()
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# The numeric mode to assign to newly-created directories, when uploading files.
# The value should be a mode as you'd pass to os.chmod;
# see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_DIRECTORY_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
# The Python dotted path to the WSGI application that Django's internal servers
# (runserver, runfcgi) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
)
############
# SESSIONS #
############
SESSION_CACHE_ALIAS = 'default' # Cache to store session data if using the cache session backend.
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_COOKIE_HTTPONLY = True # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' # class to serialize session data
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in
# CommentDetailsForm.clean_comment. All of these should be in lowercase.
PROFANITIES_LIST = ()
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'logging.config.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}
#################
# SYSTEM CHECKS #
#################
# List of all issues generated by system checks that should be silenced. Light
# issues like warnings, infos or debugs will not generate a message. Silencing
# serious issues like errors and criticals does not result in hiding the
# message, but Django will not stop you from e.g. running server.
SILENCED_SYSTEM_CHECKS = []
|
|
"""
Base classes and constants for the querying subsystem.
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Dec 2, 2011.
"""
from everest.entities.utils import identifier_from_slug
from everest.exceptions import MultipleResultsException
from everest.exceptions import NoResultsException
from everest.querying.interfaces import IQuery
from everest.querying.interfaces import ISpecificationVisitor
from everest.querying.specifications import eq
from everest.querying.specifications import order
from everest.utils import generative
from everest.utils import get_filter_specification_visitor
from everest.utils import get_order_specification_visitor
from zope.interface import implementer # pylint: disable=E0611,F0401
__docformat__ = 'reStructuredText en'
__all__ = ['CqlExpression',
'CqlExpressionList',
'EXPRESSION_KINDS',
'SpecificationVisitor',
'SpecificationExpressionHolder',
]
class EXPRESSION_KINDS(object):
"""
Supported expression kinds.
"""
CQL = 'CQL'
SQL = 'SQL'
EVAL = 'EVAL'
NOSQL = 'NOSQL'
class CqlExpression(object):
"""
Single CQL expression.
CQL expressions can be converted to a string and support the conjunction
(AND) operation.
"""
def __str__(self):
return self._as_string()
def __and__(self, other):
if isinstance(other, CqlExpression):
res = CqlExpressionList([self, other])
elif isinstance(other, CqlExpressionList):
res = CqlExpressionList([self] + other.expressions)
else:
raise TypeError("unsupported operand type(s) for &: "
"'CqlExpression' and '%s'" % type(other))
return res
def _as_string(self):
raise NotImplementedError('Abstract method.')
class CqlExpressionList(object):
"""
List of CQL expressions.
Like a single CQL expression, CQL expression lists can be converted to a
string and joined with the conjunction (AND) operation.
"""
__cql_and = '~'
def __init__(self, expressions):
self.expressions = expressions
def __and__(self, other):
if isinstance(other, CqlExpression):
res = CqlExpressionList(self.expressions + [other])
elif isinstance(other, CqlExpressionList):
res = CqlExpressionList(self.expressions + other.expressions)
else:
raise TypeError("unsupported operand type(s) for &: "
"'CqlExpression' and '%s'" % type(other))
return res
def __str__(self):
return self.__cql_and.join([str(expr) for expr in self.expressions])
class SpecificationExpressionHolder(object):
"""
Base class for specification visitors.
"""
def __init__(self):
self.__expression_stack = []
def _push(self, expr):
self.__expression_stack.append(expr)
def _pop(self):
return self.__expression_stack.pop()
@property
def expression(self):
# If we have more than one expression on the stack, traversal of the
# input specification tree has not finished yet.
assert len(self.__expression_stack) == 1
return self.__expression_stack[0]
@implementer(ISpecificationVisitor)
class SpecificationVisitor(SpecificationExpressionHolder):
"""
Base class for all specification visitors.
"""
def visit_nullary(self, spec):
op = self.__get_op_func(spec.operator.name)
self._push(op(spec))
def visit_unary(self, spec):
op = self.__get_op_func(spec.operator.name)
expr = self._pop()
self._push(op(spec, expr))
def visit_binary(self, spec):
op = self.__get_op_func(spec.operator.name)
right_expr = self._pop()
left_expr = self._pop()
self._push(op(spec, left_expr, right_expr))
def _conjunction_op(self, spec, *expressions):
raise NotImplementedError('Abstract method.')
def __get_op_func(self, op_name):
# Visitor function dispatch using the operator name.
return getattr(self, '_%s_op' % identifier_from_slug(op_name))
@implementer(IQuery)
class Query(object):
"""
Base class for everest queries.
"""
def __init__(self, entity_class):
self._entity_class = entity_class
self._filter_expr = None
self._order_expr = None
self._slice_key = None
def __iter__(self):
raise NotImplementedError('Abstract method.')
def count(self):
raise NotImplementedError('Abstract method.')
def all(self):
return list(iter(self))
def one(self):
ents = self.all()
if len(ents) == 0:
raise NoResultsException('No results found when exactly one '
'was expected.')
elif len(ents) > 1:
raise MultipleResultsException('More than one result found '
'where exactly one was expected.')
return ents[0]
@generative
def filter(self, filter_expression):
if not filter_expression is None and not self._filter_expr is None:
filter_expression = self._filter_expr & filter_expression
self._filter_expr = filter_expression
return self
def filter_by(self, **kw):
raise NotImplementedError('Abstract method.')
@generative
def order(self, order_expression):
if not order_expression is None and not self._order_expr is None:
order_expression = self._order_expr & order_expression
self._order_expr = order_expression
return self
def order_by(self, *args):
raise NotImplementedError('Abstract method.')
@generative
def slice(self, start, stop):
self._slice_key = slice(start, stop)
return self
class ExpressionBuilderMixin(object):
"""
Mixin for query classes using eval filter and order expressions.
"""
expression_kind = None
def filter_by(self, **kw):
spec = eq(**kw)
visitor_cls = get_filter_specification_visitor(self.expression_kind)
vst = visitor_cls(self._entity_class)
spec.accept(vst)
return vst.filter_query(self)
def order_by(self, *args):
spec = order(*args)
visitor_cls = get_order_specification_visitor(self.expression_kind)
vst = visitor_cls(self._entity_class)
spec.accept(vst)
return vst.order_query(self)
class RepositoryQuery(Query): # still abstract pylint:disable=W0223
"""
Query operating on objects stored in a repository and loaded through a
session.
"""
def __init__(self, entity_class, session, repository):
Query.__init__(self, entity_class)
self._session = session
self._repository = repository
def __iter__(self):
repo_ents = self._repository.retrieve(
self._entity_class,
filter_expression=self._filter_expr,
order_expression=self._order_expr,
slice_key=self._slice_key
)
for repo_ent in repo_ents:
yield self._session.load(self._entity_class, repo_ent)
def count(self):
return sum(1 for _ in self._repository.retrieve(
self._entity_class,
filter_expression=self._filter_expr))
|
|
# These classes implement a doctest runner plugin for nose, a "known failure"
# error class, and a customized TestProgram for NumPy.
# Because this module imports nose directly, it should not
# be used except by nosetester.py to avoid a general NumPy
# dependency on nose.
import os
import sys
import doctest
import inspect
import numpy
import nose
from nose.plugins import doctests as npd
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from nose.plugins.base import Plugin
from nose.util import src
from .nosetester import get_package_name
from .utils import KnownFailureException, KnownFailureTest
# Some of the classes in this module begin with 'Numpy' to clearly distinguish
# them from the plethora of very similar names from nose/unittest/doctest
#-----------------------------------------------------------------------------
# Modified version of the one in the stdlib, that fixes a python bug (doctests
# not found in extension modules, https://bugs.python.org/issue3158)
class NumpyDocTestFinder(doctest.DocTestFinder):
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.__globals__
elif inspect.isbuiltin(object):
return module.__name__ == object.__module__
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.ismethod(object):
# This one may be a bug in cython that fails to correctly set the
# __module__ attribute of methods, but since the same error is easy
# to make by extension code writers, having this safety in place
# isn't such a bad idea
return module.__name__ == object.__self__.__class__.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
doctest.DocTestFinder._find(self, tests, obj, name, module,
source_lines, globs, seen)
# Below we re-run pieces of the above method with manual modifications,
# because the original code is buggy and fails to correctly identify
# doctests in extension modules.
# Local shorthands
from inspect import (
isroutine, isclass, ismodule, isfunction, ismethod
)
# Look for tests in a module's contained objects.
if ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname1 = '%s.%s' % (name, valname)
if ( (isroutine(val) or isclass(val))
and self._from_module(module, val)):
self._find(tests, val, valname1, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((isfunction(val) or isclass(val) or
ismethod(val) or isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# second-chance checker; if the default comparison doesn't
# pass, then see if the expected output string contains flags that
# tell us to ignore the output
class NumpyOutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
ret = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if not ret:
if "#random" in want:
return True
# it would be useful to normalize endianness so that
# bigendian machines don't fail all the tests (and there are
# actually some bigendian examples in the doctests). Let's try
# making them all little endian
got = got.replace("'>", "'<")
want = want.replace("'>", "'<")
# try to normalize out 32 and 64 bit default int sizes
for sz in [4, 8]:
got = got.replace("'<i%d'" % sz, "int")
want = want.replace("'<i%d'" % sz, "int")
ret = doctest.OutputChecker.check_output(self, want,
got, optionflags)
return ret
# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
# its constructor that blocks non-default arguments from being passed
# down into doctest.DocTestCase
class NumpyDocTestCase(npd.DocTestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None, result_var='_'):
self._result_var = result_var
self._nose_obj = obj
doctest.DocTestCase.__init__(self, test,
optionflags=optionflags,
setUp=setUp, tearDown=tearDown,
checker=checker)
print_state = numpy.get_printoptions()
class NumpyDoctest(npd.Doctest):
name = 'numpydoctest' # call nosetests with --with-numpydoctest
score = 1000 # load late, after doctest builtin
# always use whitespace and ellipsis options for doctests
doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
# files that should be ignored for doctests
doctest_ignore = ['generate_numpy_api.py',
'setup.py']
# Custom classes; class variables to allow subclassing
doctest_case_class = NumpyDocTestCase
out_check_class = NumpyOutputChecker
test_finder_class = NumpyDocTestFinder
# Don't use the standard doctest option handler; hard-code the option values
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
# Test doctests in 'test' files / directories. Standard plugin default
# is False
self.doctest_tests = True
# Variable name; if defined, doctest results stored in this variable in
# the top-level namespace. None is the standard default
self.doctest_result_var = None
def configure(self, options, config):
# parent method sets enabled flag from command line --with-numpydoctest
Plugin.configure(self, options, config)
self.finder = self.test_finder_class()
self.parser = doctest.DocTestParser()
if self.enabled:
# Pull standard doctest out of plugin list; there's no reason to run
# both. In practice the Unplugger plugin above would cover us when
# run from a standard numpy.test() call; this is just in case
# someone wants to run our plugin outside the numpy.test() machinery
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != 'doctest']
def set_test_context(self, test):
""" Configure `test` object to set test context
We set the numpy / scipy standard doctest namespace
Parameters
----------
test : test object
with ``globs`` dictionary defining namespace
Returns
-------
None
Notes
-----
`test` object modified in place
"""
# set the namespace for tests
pkg_name = get_package_name(os.path.dirname(test.filename))
# Each doctest should execute in an environment equivalent to
# starting Python and executing "import numpy as np", and,
# for SciPy packages, an additional import of the local
# package (so that scipy.linalg.basic.py's doctests have an
# implicit "from scipy import linalg" as well.
#
# Note: __file__ allows the doctest in NoseTester to run
# without producing an error
test.globs = {'__builtins__':__builtins__,
'__file__':'__main__',
'__name__':'__main__',
'np':numpy}
# add appropriate scipy import for SciPy tests
if 'scipy' in pkg_name:
p = pkg_name.split('.')
p2 = p[-1]
test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
# Override test loading to customize test context (with set_test_context
# method), set standard docstring options, and install our own test output
# checker
def loadTestsFromModule(self, module):
if not self.matches(module.__name__):
npd.log.debug("Doctest doesn't want module %s", module)
return
try:
tests = self.finder.find(module)
except AttributeError:
# nose allows module.__test__ = False; doctest does not and
# throws AttributeError
return
if not tests:
return
tests.sort()
module_file = src(module.__file__)
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
# Set test namespace; test altered in place
self.set_test_context(test)
yield self.doctest_case_class(test,
optionflags=self.doctest_optflags,
checker=self.out_check_class(),
result_var=self.doctest_result_var)
# Add an afterContext method to nose.plugins.doctests.Doctest in order
# to restore print options to the original state after each doctest
def afterContext(self):
numpy.set_printoptions(**print_state)
# Ignore NumPy-specific build files that shouldn't be searched for tests
def wantFile(self, file):
bn = os.path.basename(file)
if bn in self.doctest_ignore:
return False
return npd.Doctest.wantFile(self, file)
class Unplugger:
""" Nose plugin to remove named plugin late in loading
By default it removes the "doctest" plugin.
"""
name = 'unplugger'
enabled = True # always enabled
score = 4000 # load late in order to be after builtins
def __init__(self, to_unplug='doctest'):
self.to_unplug = to_unplug
def options(self, parser, env):
pass
def configure(self, options, config):
# Pull named plugin out of plugins list
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != self.to_unplug]
class KnownFailurePlugin(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailure is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.'''
enabled = True
knownfail = ErrorClass(KnownFailureException,
label='KNOWNFAIL',
isfailure=False)
def options(self, parser, env=os.environ):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
help='Disable special handling of KnownFailure '
'exceptions')
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
KnownFailure = KnownFailurePlugin # backwards compat
class FPUModeCheckPlugin(Plugin):
"""
Plugin that checks the FPU mode before and after each test,
raising failures if the test changed the mode.
"""
def prepareTestCase(self, test):
from numpy.core._multiarray_tests import get_fpu_mode
def run(result):
old_mode = get_fpu_mode()
test.test(result)
new_mode = get_fpu_mode()
if old_mode != new_mode:
try:
raise AssertionError(
"FPU mode changed from {0:#x} to {1:#x} during the "
"test".format(old_mode, new_mode))
except AssertionError:
result.addFailure(test, sys.exc_info())
return run
# Class allows us to save the results of the tests in runTests - see runTests
# method docstring for details
class NumpyTestProgram(nose.core.TestProgram):
def runTests(self):
"""Run Tests. Returns true on success, false on failure, and
sets self.success to the same value.
Because nose currently discards the test result object, but we need
to return it to the user, override TestProgram.runTests to retain
the result
"""
if self.testRunner is None:
self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
verbosity=self.config.verbosity,
config=self.config)
plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
if plug_runner is not None:
self.testRunner = plug_runner
self.result = self.testRunner.run(self.test)
self.success = self.result.wasSuccessful()
return self.success
|
|
# ===============================================================================
# Copyright 2016 ross
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
# ============= local library imports ==========================
from __future__ import absolute_import
from __future__ import print_function
import os
from lxml import etree
from uncertainties import nominal_value, std_dev
from pychron.geochron.definitions import (
NSMAP,
EXPERIMENT_ATTRS,
PRODUCTION_ATTRS,
MEASUREMENT_ATTRS,
SAMPLE_ATTRS,
)
from pychron.loggable import Loggable
from pychron.paths import paths
class GeochronService(Loggable):
def load_schema(self):
p = os.path.join(paths.data_dir, "geochron_arar_schema.xsd")
self._schema = etree.XMLSchema(file=p)
print(self._schema)
def assemble_xml(self, analysis_group):
root = etree.Element("ArArModel", nsmap=NSMAP)
doc = etree.ElementTree(root)
self._add_sample(root, analysis_group)
return etree.tostring(doc, pretty_print=True)
def _add_sample(self, root, analysis_group):
sample_elem = etree.SubElement(root, "Sample")
analysis = analysis_group.analyses[0]
for attr, iattr, required in SAMPLE_ATTRS:
val = self._get_value(analysis, attr, iattr, required)
if val is not None:
sample_elem.attrib[attr] = val
self._add_preferred_age(sample_elem, analysis_group)
self._add_interpreted_ages(sample_elem, analysis_group)
self._add_parameters(sample_elem, analysis_group)
def _add_preferred_age(self, sample_elem, analysis_group):
# preferred_age = analysis_group.preferred_age
preferred_age_elem = etree.SubElement(sample_elem, "PreferredAge")
# for attr, iattr, required in PREFERRED_AGE_ATTRS:
# val = self._get_value(preferred_age, attr, iattr, required)
# if val is not None:
# preferred_age_elem.attrib[attr] = val
experiments_included_elem = etree.SubElement(
preferred_age_elem, "ExperimentsIncluded"
)
for analysis in analysis_group.analyses:
experiments_elem = etree.SubElement(experiments_included_elem, "Experiment")
experiments_elem.attrib["experimentIdentifier"] = analysis.record_id
def _add_interpreted_ages(self, sample_elem, analysis_group):
interpreted_ages_elem = etree.SubElement(sample_elem, "InterpretedAges")
interpreted_age_elem = etree.SubElement(interpreted_ages_elem, "InterpretedAge")
age = ""
age_error = ""
interpreted_age_error_internal = ""
interpreted_age_error_external = ""
age_type = ""
age_classification = ""
age_reference = ""
description = ""
etree.SubElement(interpreted_age_elem, age)
etree.SubElement(interpreted_age_elem, age_error)
etree.SubElement(interpreted_age_elem, interpreted_age_error_internal)
etree.SubElement(interpreted_age_elem, interpreted_age_error_external)
etree.SubElement(interpreted_age_elem, age_type)
etree.SubElement(interpreted_age_elem, age_classification)
etree.SubElement(interpreted_age_elem, age_reference)
etree.SubElement(interpreted_age_elem, description)
def _add_parameters(self, sample_elem, analysis_group):
param_elem = etree.SubElement(sample_elem, "Parameters")
ref = analysis_group.analyses[0]
param_elem.attrib["standardName"] = ref.standard_name
param_elem.attrib["standardAge"] = str(nominal_value(ref.standard_age))
param_elem.attrib["standardAgeSigma"] = str(std_dev(ref.standard_age))
param_elem.attrib["standardMaterial"] = ref.standard_material
param_elem.attrib["decayConstant40ArTotal"] = str(
nominal_value(ref.arar_constants.lambda_k)
)
param_elem.attrib["decayConstant40ArTotalSigma"] = str(
std_dev(ref.arar_constants.lambda_k)
)
param_elem.attrib["jValue"] = str(nominal_value(ref.j))
param_elem.attrib["jValueSigma"] = str(std_dev(ref.j))
# for attr, iattr, required in PARAMETER_ATTRS:
# if
#
#
# val = self._get_value(analysis_group, attr, iattr, required)
# if val is not None:
# param_elem.attrib[attr] = val
self._add_experiment(param_elem, analysis_group)
def _add_experiment(self, param_elem, analysis_group):
experiment_elem = etree.SubElement(param_elem, "Experiment")
ref = analysis_group.analyses[0]
for attr, iattr, required in EXPERIMENT_ATTRS:
val = self._get_value(ref, attr, iattr, required)
if val is not None:
experiment_elem.attrib[attr] = val
for ai in analysis_group.analyses:
self._add_irradiation(experiment_elem, ai)
self._add_measurement(experiment_elem, ai)
def _add_measurement(self, experiment_elem, analysis):
measurement_elem = etree.SubElement(experiment_elem, "Measurement")
for attr, iattr, required in MEASUREMENT_ATTRS:
val = self._get_value(analysis, attr, iattr, required)
if val is not None:
measurement_elem.attrib[attr] = val
measurement_elem.attrib["interceptUnit"] = "fA"
measurement_elem.attrib["blankUnit"] = "fA"
for k in ("36", "37", "38", "39", "40"):
iso = analysis.get_isotope("Ar{}".format(k))
v = iso.get_baseline_corrected_value()
tag = "intercept{}Ar".format(k)
measurement_elem.attrib[tag] = str(nominal_value(v))
measurement_elem.attrib["{}Sigma".format(tag)] = str(std_dev(v))
measurement_elem.attrib["{}RegressionType".format(tag)] = iso.fit
v = iso.blank.uvalue
tag = "blank{}Ar".format(k)
measurement_elem.attrib[tag] = str(nominal_value(v))
measurement_elem.attrib["{}Sigma".format(tag)] = str(std_dev(v))
def _add_irradiation(self, experiment_elem, analysis):
irradiation_elem = etree.SubElement(experiment_elem, "Irradiation")
irradiation_elem.attrib["irradiationName"] = analysis.irradiation
irradiation_elem.attrib[
"irradiationReactorName"
] = analysis.production_obj.reactor
production = analysis.production_obj
constants = analysis.arar_constants
for attr, iattr, required in PRODUCTION_ATTRS:
if attr in (
"correction40Ar36ArAtmospheric",
"correction40Ar36ArAtmosphericSigma",
"correction38Ar36ArAtmospheric",
"correction38Ar36ArAtmosphericSigma",
):
val = self._get_value(constants, attr, iattr, required)
else:
val = self._get_value(production, attr, iattr, required)
if val is not None:
irradiation_elem.attrib[attr] = val
segments = analysis.chron_segments
for i, (pwr, dur, dt, start, end) in enumerate(segments):
seg_elem = etree.SubElement(irradiation_elem, "Segment")
seg_elem.attrib["segmentNumber"] = str(i)
seg_elem.attrib["segmentDuration"] = str(
(end - start).total_seconds() / 3600.0
)
seg_elem.attrib["segmentDate"] = start.strftime("%Y:%m:%d")
seg_elem.attrib["segmentEndTime"] = end.strftime("%H:%M")
seg_elem.attrib["segmentPowerSetting"] = str(pwr)
def _get_value(self, obj, attr, iattr, required):
if not iattr:
iattr = attr
val = None
try:
val = str(getattr(obj, iattr))
except AttributeError as e:
if required:
self.warning_dialog(
'Required attribute "{}" not supplied. Contact developer'.format(
attr
)
)
self.debug("get value {:<38s} {:<38s}={}".format(attr, iattr, val))
return val
if __name__ == "__main__":
class PreferredAgeSpec:
preferredAge = "64.05"
preferredAgeSigma = "0.116406054716396"
preferredAgeSigmaInternal = "0.23"
preferredAgeSigmaExternal = "23.23"
preferredAgeType = "Age Plateau"
preferredAgeClassification = "Eruption Age"
preferredAgeReference = "Koppers, A.A.P., Yamazaki, T., Geldmacher, J., Gee, J.S., Pressling, N., Hoshi, H. and the IODP Expedition 330 Science Party (2012). Limited latitudinal mantle plume motion for the Louisville hotspot. Nature Geoscience: doi: 10.1038/NGEO1638."
preferredAgeDescription = "Classical groundmass age spectrum with high apparent ages for the low temperature steps due to remaining alteration in this submarine basaltic material and 39Ar recoil effects. The middle section reflects the primary argon being released from the groundmass giving the eruption age of the sample, mostly free of alteration and recoil effects. The high temparature steps give lower apparent ages due to recoil effect on 37Ar when outgassing the Calcium-rich clinopyroxene and plagioclase groundmass phases. The fact that the plateau age is concordant with both isochron ages and the total fusion age provides confidence in this interpretation."
class SampleSpec:
sampleID = "330-U1376A-23R-3 33-37 cm"
igsn = "MY_IGSN_HERE"
longitude = "-171.8806667"
latitude = "-32.2165"
analystName = "Anthony Koppers"
class AnalysisSpec:
runid = "12313-01A"
class AnalysisGroup:
sample = SampleSpec()
analyses = [AnalysisSpec()]
preferred_age = PreferredAgeSpec()
paths.build("_dev")
ag = AnalysisGroup()
g = GeochronService()
# g.load_schema()
g.assemble_xml(ag)
# ============= EOF =============================================
|
|
"""
Do windowed detection by classifying a number of images/crops at once,
optionally using the selective search window proposal method.
This implementation follows
Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik.
Rich feature hierarchies for accurate object detection and semantic
segmentation.
http://arxiv.org/abs/1311.2524
The selective_search_ijcv_with_python code is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- refactor into class (without globals)
- update demo notebook with new options
"""
import numpy as np
import os
import sys
import gflags
import pandas as pd
import time
import skimage.io
import skimage.transform
import selective_search_ijcv_with_python as selective_search
import caffe
NET = None
IMAGE_DIM = None
CROPPED_DIM = None
IMAGE_CENTER = None
IMAGE_MEAN = None
CROPPED_IMAGE_MEAN = None
BATCH_SIZE = None
NUM_OUTPUT = None
CROP_MODES = ['list', 'center_only', 'corners', 'selective_search']
def load_image(filename):
"""
Input:
filename: string
Output:
image: an image of size (H x W x 3) of type uint8.
"""
img = skimage.io.imread(filename)
if img.ndim == 2:
img = np.tile(img[:, :, np.newaxis], (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def format_image(image, window=None, cropped_size=False):
"""
Input:
image: (H x W x 3) ndarray
window: (4) ndarray
(ymin, xmin, ymax, xmax) coordinates, 0-indexed
cropped_size: bool
Whether to output cropped size image or full size image.
Output:
image: (3 x H x W) ndarray
Resized to either IMAGE_DIM or CROPPED_DIM.
dims: (H, W) of the original image
"""
dims = image.shape[:2]
# Crop a subimage if window is provided.
if window is not None:
image = image[window[0]:window[2], window[1]:window[3]]
# Resize to input size, subtract mean, convert to BGR
image = image[:, :, ::-1]
if cropped_size:
image = skimage.transform.resize(image, (CROPPED_DIM, CROPPED_DIM)) * 255
image -= CROPPED_IMAGE_MEAN
else:
image = skimage.transform.resize(image, (IMAGE_DIM, IMAGE_DIM)) * 255
image -= IMAGE_MEAN
image = image.swapaxes(1, 2).swapaxes(0, 1)
return image, dims
def _image_coordinates(dims, window):
"""
Calculate the original image coordinates of a
window in the canonical (IMAGE_DIM x IMAGE_DIM) coordinates
Input:
dims: (H, W) of the original image
window: (ymin, xmin, ymax, xmax) in the (IMAGE_DIM x IMAGE_DIM) frame
Output:
image_window: (ymin, xmin, ymax, xmax) in the original image frame
"""
h, w = dims
h_scale, w_scale = h / IMAGE_DIM, w / IMAGE_DIM
image_window = window * np.array((1. / h_scale, 1. / w_scale,
h_scale, w_scale))
return image_window.round().astype(int)
def _assemble_images_list(input_df):
"""
For each image, collect the crops for the given windows.
Input:
input_df: pandas.DataFrame
with 'filename', 'ymin', 'xmin', 'ymax', 'xmax' columns
Output:
images_df: pandas.DataFrame
with 'image', 'window', 'filename' columns
"""
# unpack sequence of (image filename, windows)
windows = input_df[['ymin', 'xmin', 'ymax', 'xmax']].values
image_windows = (
(ix, windows[input_df.index.get_loc(ix)]) for ix in input_df.index.unique()
)
# extract windows
data = []
for image_fname, windows in image_windows:
image = load_image(image_fname)
for window in windows:
window_image, _ = format_image(image, window, cropped_size=True)
data.append({
'image': window_image[np.newaxis, :],
'window': window,
'filename': image_fname
})
images_df = pd.DataFrame(data)
return images_df
def _assemble_images_center_only(image_fnames):
"""
For each image, square the image and crop its center.
Input:
image_fnames: list
Output:
images_df: pandas.DataFrame
With 'image', 'window', 'filename' columns.
"""
crop_start, crop_end = IMAGE_CENTER, IMAGE_CENTER + CROPPED_DIM
crop_window = np.array((crop_start, crop_start, crop_end, crop_end))
data = []
for image_fname in image_fnames:
image, dims = format_image(load_image(image_fname))
data.append({
'image': image[np.newaxis, :,
crop_start:crop_end,
crop_start:crop_end],
'window': _image_coordinates(dims, crop_window),
'filename': image_fname
})
images_df = pd.DataFrame(data)
return images_df
def _assemble_images_corners(image_fnames):
"""
For each image, square the image and crop its center, four corners,
and mirrored version of the above.
Input:
image_fnames: list
Output:
images_df: pandas.DataFrame
With 'image', 'window', 'filename' columns.
"""
# make crops
indices = [0, IMAGE_DIM - CROPPED_DIM]
crops = np.empty((5, 4), dtype=int)
curr = 0
for i in indices:
for j in indices:
crops[curr] = (i, j, i + CROPPED_DIM, j + CROPPED_DIM)
curr += 1
crops[4] = (IMAGE_CENTER, IMAGE_CENTER,
IMAGE_CENTER + CROPPED_DIM, IMAGE_CENTER + CROPPED_DIM)
all_crops = np.tile(crops, (2, 1))
data = []
for image_fname in image_fnames:
image, dims = format_image(load_image(image_fname))
image_crops = np.empty((10, 3, CROPPED_DIM, CROPPED_DIM), dtype=np.float32)
curr = 0
for crop in crops:
image_crops[curr] = image[:, crop[0]:crop[2], crop[1]:crop[3]]
curr += 1
image_crops[5:] = image_crops[:5, :, :, ::-1] # flip for mirrors
for i in range(len(all_crops)):
data.append({
'image': image_crops[i][np.newaxis, :],
'window': _image_coordinates(dims, all_crops[i]),
'filename': image_fname
})
images_df = pd.DataFrame(data)
return images_df
def _assemble_images_selective_search(image_fnames):
"""
Run Selective Search window proposals on all images, then for each
image-window pair, extract a square crop.
Input:
image_fnames: list
Output:
images_df: pandas.DataFrame
With 'image', 'window', 'filename' columns.
"""
windows_list = selective_search.get_windows(image_fnames)
data = []
for image_fname, windows in zip(image_fnames, windows_list):
image = load_image(image_fname)
for window in windows:
window_image, _ = format_image(image, window, cropped_size=True)
data.append({
'image': window_image[np.newaxis, :],
'window': window,
'filename': image_fname
})
images_df = pd.DataFrame(data)
return images_df
def assemble_batches(inputs, crop_mode='center_only'):
"""
Assemble DataFrame of image crops for feature computation.
Input:
inputs: list of filenames (center_only, corners, and selective_search mode)
OR input DataFrame (list mode)
mode: string
'list': take the image windows from the input as-is
'center_only': take the CROPPED_DIM middle of the image windows
'corners': take CROPPED_DIM-sized boxes at 4 corners and center of
the image windows, as well as their flipped versions: a total of 10.
'selective_search': run Selective Search region proposal on the
image windows, and take each enclosing subwindow.
Output:
df_batches: list of DataFrames, each one of BATCH_SIZE rows.
Each row has 'image', 'filename', and 'window' info.
Column 'image' contains (X x 3 x CROPPED_DIM x CROPPED_IM) ndarrays.
Column 'filename' contains source filenames.
Column 'window' contains [ymin, xmin, ymax, xmax] ndarrays.
If 'filename' is None, then the row is just for padding.
Note: for increased efficiency, increase the batch size (to the limit of gpu
memory) to avoid the communication cost
"""
if crop_mode == 'list':
images_df = _assemble_images_list(inputs)
elif crop_mode == 'center_only':
images_df = _assemble_images_center_only(inputs)
elif crop_mode == 'corners':
images_df = _assemble_images_corners(inputs)
elif crop_mode == 'selective_search':
images_df = _assemble_images_selective_search(inputs)
else:
raise Exception("Unknown mode: not in {}".format(CROP_MODES))
# Make sure the DataFrame has a multiple of BATCH_SIZE rows:
# just fill the extra rows with NaN filenames and all-zero images.
N = images_df.shape[0]
remainder = N % BATCH_SIZE
if remainder > 0:
zero_image = np.zeros_like(images_df['image'].iloc[0])
zero_window = np.zeros((1, 4), dtype=int)
remainder_df = pd.DataFrame([{
'filename': None,
'image': zero_image,
'window': zero_window
}] * (BATCH_SIZE - remainder))
images_df = images_df.append(remainder_df)
N = images_df.shape[0]
# Split into batches of BATCH_SIZE.
ind = np.arange(N) / BATCH_SIZE
df_batches = [images_df[ind == i] for i in range(N / BATCH_SIZE)]
return df_batches
def compute_feats(images_df):
input_blobs = [np.ascontiguousarray(
np.concatenate(images_df['image'].values), dtype='float32')]
output_blobs = [np.empty((BATCH_SIZE, NUM_OUTPUT, 1, 1), dtype=np.float32)]
NET.Forward(input_blobs, output_blobs)
feats = [output_blobs[0][i].flatten() for i in range(len(output_blobs[0]))]
# Add the features and delete the images.
del images_df['image']
images_df['feat'] = feats
return images_df
def config(model_def, pretrained_model, gpu, image_dim, image_mean_file):
global IMAGE_DIM, CROPPED_DIM, IMAGE_CENTER, IMAGE_MEAN, CROPPED_IMAGE_MEAN
global NET, BATCH_SIZE, NUM_OUTPUT
# Initialize network by loading model definition and weights.
t = time.time()
print("Loading Caffe model.")
NET = caffe.CaffeNet(model_def, pretrained_model)
NET.set_phase_test()
if gpu:
NET.set_mode_gpu()
print("Caffe model loaded in {:.3f} s".format(time.time() - t))
# Configure for input/output data
IMAGE_DIM = image_dim
CROPPED_DIM = NET.blobs()[0].width
IMAGE_CENTER = int((IMAGE_DIM - CROPPED_DIM) / 2)
# Load the data set mean file
IMAGE_MEAN = np.load(image_mean_file)
CROPPED_IMAGE_MEAN = IMAGE_MEAN[IMAGE_CENTER:IMAGE_CENTER + CROPPED_DIM,
IMAGE_CENTER:IMAGE_CENTER + CROPPED_DIM,
:]
BATCH_SIZE = NET.blobs()[0].num # network batch size
NUM_OUTPUT = NET.blobs()[-1].channels # number of output classes
if __name__ == "__main__":
# Parse cmdline options
gflags.DEFINE_string(
"model_def", "", "Model definition file.")
gflags.DEFINE_string(
"pretrained_model", "", "Pretrained model weights file.")
gflags.DEFINE_boolean(
"gpu", False, "Switch for gpu computation.")
gflags.DEFINE_string(
"crop_mode", "center_only", "Crop mode, from {}".format(CROP_MODES))
gflags.DEFINE_string(
"input_file", "", "Input txt/csv filename.")
gflags.DEFINE_string(
"output_file", "", "Output h5/csv filename.")
gflags.DEFINE_string(
"images_dim", 256, "Canonical dimension of (square) images.")
gflags.DEFINE_string(
"images_mean_file",
os.path.join(os.path.dirname(__file__), '../imagenet/ilsvrc_2012_mean.npy'),
"Data set image mean (numpy array).")
FLAGS = gflags.FLAGS
FLAGS(sys.argv)
# Configure network, input, output
config(FLAGS.model_def, FLAGS.pretrained_model, FLAGS.gpu, FLAGS.images_dim,
FLAGS.images_mean_file)
# Load input
# .txt = list of filenames
# .csv = dataframe that must include a header
# with column names filename, ymin, xmin, ymax, xmax
t = time.time()
print('Loading input and assembling batches...')
if FLAGS.input_file.lower().endswith('txt'):
with open(FLAGS.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif FLAGS.input_file.lower().endswith('csv'):
inputs = pd.read_csv(FLAGS.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Uknown input file type: not in txt or csv")
# Assemble into batches
image_batches = assemble_batches(inputs, FLAGS.crop_mode)
print('{} batches assembled in {:.3f} s'.format(len(image_batches),
time.time() - t))
# Process the batches.
t = time.time()
print 'Processing {} files in {} batches'.format(len(inputs),
len(image_batches))
dfs_with_feats = []
for i in range(len(image_batches)):
if i % 10 == 0:
print('Batch {}/{}, elapsed time: {:.3f} s'.format(i,
len(image_batches),
time.time() - t))
dfs_with_feats.append(compute_feats(image_batches[i]))
# Concatenate, droppping the padding rows.
df = pd.concat(dfs_with_feats).dropna(subset=['filename'])
df.set_index('filename', inplace=True)
print("Processing complete after {:.3f} s.".format(time.time() - t))
# Label coordinates
coord_cols = ['ymin', 'xmin', 'ymax', 'xmax']
df[coord_cols] = pd.DataFrame(data=np.vstack(df['window']),
index=df.index,
columns=coord_cols)
del(df['window'])
# Write out the results.
t = time.time()
if FLAGS.output_file.lower().endswith('csv'):
# enumerate the class probabilities
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(data=np.vstack(df['feat']),
index=df.index,
columns=class_cols)
df.to_csv(FLAGS.output_file, sep=',',
cols=coord_cols + class_cols,
header=True)
else:
df.to_hdf(FLAGS.output_file, 'df', mode='w')
print("Done. Saving to {} took {:.3f} s.".format(
FLAGS.output_file, time.time() - t))
sys.exit()
|
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Defines classes for all the resources a microvm could need attaching."""
import urllib
import re
from framework.utils import compare_versions, is_io_uring_supported, run_cmd
from framework.defs import API_USOCKET_URL_PREFIX
class Actions():
"""Facility for sending operations instructions on the microvm."""
ACTIONS_CFG_RESOURCE = 'actions'
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._actions_cfg_url = api_url + self.ACTIONS_CFG_RESOURCE
self._api_session = api_session
def put(self, **args):
"""Send an instruction to the microvm."""
datax = self.create_json(**args)
return self._api_session.put(
"{}".format(self._actions_cfg_url),
json=datax
)
@staticmethod
def create_json(action_type=None, payload=None):
"""Compose the json associated to this type of API request."""
datax = {}
if action_type is not None:
datax['action_type'] = action_type
if payload is not None:
datax['payload'] = payload
return datax
class Balloon():
"""Facility for specifying balloon device configurations."""
BALLOON_CFG_RESOURCE = 'balloon'
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._balloon_cfg_url = api_url + self.BALLOON_CFG_RESOURCE
self._api_session = api_session
def put(self, **args):
"""Specify the balloon device configuration."""
datax = self.create_json(**args)
return self._api_session.put(
"{}".format(self._balloon_cfg_url),
json=datax
)
def patch(self, **args):
"""Update a previously attached balloon device."""
datax = self.create_json(**args)
return self._api_session.patch(
"{}".format(self._balloon_cfg_url),
json=datax
)
def patch_stats(self, **args):
"""Update the balloon statistics interval."""
datax = self.create_json(**args)
return self._api_session.patch(
"{}".format(self._balloon_cfg_url + "/statistics"),
json=datax
)
def get(self):
"""Get the response of specifying the balloon configuration."""
return self._api_session.get(
self._balloon_cfg_url
)
def get_stats(self):
"""Get the response of specifying the balloon statistics."""
return self._api_session.get(
"{}".format(self._balloon_cfg_url + "/statistics")
)
@staticmethod
def create_json(
amount_mib=None,
deflate_on_oom=None,
stats_polling_interval_s=None
):
"""Compose the json associated to this type of API request."""
datax = {}
if amount_mib is not None:
datax['amount_mib'] = amount_mib
if deflate_on_oom is not None:
datax['deflate_on_oom'] = deflate_on_oom
if stats_polling_interval_s is not None:
datax['stats_polling_interval_s'] = stats_polling_interval_s
return datax
class BootSource():
"""Facility for specifying the source of the boot process."""
BOOT_CFG_RESOURCE = 'boot-source'
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._boot_cfg_url = api_url + self.BOOT_CFG_RESOURCE
self._api_session = api_session
def put(self, **args):
"""Specify the boot information."""
datax = self.create_json(**args)
return self._api_session.put(
"{}".format(self._boot_cfg_url),
json=datax
)
def patch(self, **args):
"""Update a previously attached boot source."""
datax = self.create_json(**args)
return self._api_session.patch(
"{}".format(self._boot_cfg_url),
json=datax
)
def get(self):
"""Get the response of specifying a boot source."""
return self._api_session.get(
self._boot_cfg_url
)
@staticmethod
def create_json(
boot_args=None,
kernel_image_path=None,
initrd_path=None):
"""Compose the json associated to this type of API request."""
datax = {}
if kernel_image_path is not None:
datax['kernel_image_path'] = kernel_image_path
if initrd_path is not None:
datax['initrd_path'] = initrd_path
if boot_args is not None:
datax['boot_args'] = boot_args
return datax
# Too few public methods (1/2) (too-few-public-methods)
# pylint: disable=R0903
class DescribeInstance():
"""Facility for getting the microVM state."""
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
self._descinst_cfg_url = \
API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._api_session = api_session
def get(self):
"""Get the status of configuring the current microvm."""
return self._api_session.get(
self._descinst_cfg_url
)
class Drive():
"""Facility for attaching a block device."""
DRIVE_CFG_RESOURCE = 'drives'
def __init__(
self,
api_usocket_full_name,
api_session,
firecracker_version
):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._drive_cfg_url = api_url + self.DRIVE_CFG_RESOURCE
self._api_session = api_session
self._firecracker_version = firecracker_version
def put(self, **args):
"""Attach a block device or update the details of a previous one."""
# Default the io engine to Async on kernels > 5.10 so that we
# make sure to exercise both Sync and Async behaviour in the CI.
# Also check the FC version to make sure that it has support for
# configurable io_engine.
if is_io_uring_supported() and \
compare_versions(self._firecracker_version, "0.25.0") > 0 \
and \
('io_engine' not in args or args['io_engine'] is None):
args['io_engine'] = 'Async'
datax = self.create_json(**args)
return self._api_session.put(
"{}/{}".format(self._drive_cfg_url, args['drive_id']),
json=datax
)
def put_with_default_io_engine(self, **args):
"""
Attach a block device or update the details of a previous one, using...
...the Firecracker default for the io_engine.
"""
datax = self.create_json(**args)
return self._api_session.put(
"{}/{}".format(self._drive_cfg_url, args['drive_id']),
json=datax
)
def patch(self, **args):
"""Attach a block device or update the details of a previous one."""
datax = self.create_json(**args)
return self._api_session.patch(
"{}/{}".format(self._drive_cfg_url, args['drive_id']),
json=datax
)
def get(self, drive_id):
"""Get the status of attaching some block device."""
return self._api_session.get(
"{}/{}".format(self._drive_cfg_url, drive_id)
)
@staticmethod
def create_json(
drive_id=None,
path_on_host=None,
is_root_device=None,
partuuid=None,
is_read_only=None,
rate_limiter=None,
cache_type=None,
io_engine=None):
"""Compose the json associated to this type of API request."""
datax = {}
if drive_id is not None:
datax['drive_id'] = drive_id
if path_on_host is not None:
datax['path_on_host'] = path_on_host
if is_root_device is not None:
datax['is_root_device'] = is_root_device
if partuuid is not None:
datax['partuuid'] = partuuid
if is_read_only is not None:
datax['is_read_only'] = is_read_only
if cache_type is not None:
datax['cache_type'] = cache_type
if rate_limiter is not None:
datax['rate_limiter'] = rate_limiter
if io_engine is not None:
datax['io_engine'] = io_engine
return datax
# Too few public methods (1/2) (too-few-public-methods)
# pylint: disable=R0903
class FullConfig():
"""Facility for getting the full microVM configuration."""
EXPORT_CFG_RESOURCE = 'vm/config'
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._export_cfg_url = api_url + self.EXPORT_CFG_RESOURCE
self._api_session = api_session
def get(self):
"""Get full configuration of the current microvm."""
return self._api_session.get(
self._export_cfg_url
)
# Too few public methods (1/2) (too-few-public-methods)
# pylint: disable=R0903
class InstanceVersion():
"""Facility for getting the microVM version."""
VERSION_CFG_RESOURCE = 'version'
def __init__(self, api_usocket_full_name, fc_binary_path, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._version_cfg_url = api_url + self.VERSION_CFG_RESOURCE
self._fc_binary_path = fc_binary_path
self._api_session = api_session
def get(self):
"""Get the version of the current microvm, from the cmdline."""
_, stdout, _ = run_cmd(
"{} --version".format(self._fc_binary_path))
return re.match(
r"^Firecracker v([0-9]+\.[0-9]+(\.[0-9]+)?)",
stdout.partition('\n')[0]).group(1)
def get_from_api(self):
"""Get the version of the current microvm, from the API."""
return self._api_session.get(
self._version_cfg_url
)
class Logger():
"""Facility for setting up the logging system and sending API requests."""
LOGGER_CFG_RESOURCE = 'logger'
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._logger_cfg_url = api_url + self.LOGGER_CFG_RESOURCE
self._api_session = api_session
def put(self, **args):
"""Configure or update the settings of the logging system."""
datax = self.create_json(**args)
return self._api_session.put(
"{}".format(self._logger_cfg_url),
json=datax
)
def patch(self, **args):
"""Configure or update the settings of the logging system."""
datax = self.create_json(**args)
return self._api_session.patch(
"{}".format(self._logger_cfg_url),
json=datax
)
@staticmethod
def create_json(
log_path=None,
level=None,
show_level=None,
show_log_origin=None):
"""Compose the json associated to this type of API request."""
datax = {}
if log_path is not None:
datax['log_path'] = log_path
if level is not None:
datax['level'] = level
if show_level is not None:
datax['show_level'] = show_level
if show_log_origin is not None:
datax['show_log_origin'] = show_log_origin
return datax
class SnapshotCreate():
"""Facility for sending create snapshot commands on the microvm."""
SNAPSHOT_CREATE_URL = 'snapshot/create'
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._snapshot_cfg_url = api_url + self.SNAPSHOT_CREATE_URL
self._api_session = api_session
def put(self, **args):
"""Create a snapshot of the microvm."""
self._api_session.untime()
datax = self.create_json(**args)
return self._api_session.put(
"{}".format(self._snapshot_cfg_url),
json=datax
)
@staticmethod
def create_json(mem_file_path, snapshot_path, diff=False, version=None):
"""Compose the json associated to this type of API request."""
if diff:
snapshot_type = 'Diff'
else:
snapshot_type = 'Full'
datax = {
'mem_file_path': mem_file_path,
'snapshot_path': snapshot_path,
'snapshot_type': snapshot_type,
}
if version is not None:
datax['version'] = version
return datax
class SnapshotLoad():
"""Facility for sending load snapshot commands on the microvm."""
SNAPSHOT_LOAD_URL = 'snapshot/load'
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._snapshot_cfg_url = api_url + self.SNAPSHOT_LOAD_URL
self._api_session = api_session
def put(self, **args):
"""Load a snapshot of the microvm."""
datax = self.create_json(**args)
return self._api_session.put(
"{}".format(self._snapshot_cfg_url),
json=datax
)
@staticmethod
def create_json(mem_file_path, snapshot_path, diff=False, resume=False):
"""Compose the json associated to this type of API request."""
datax = {
'mem_file_path': mem_file_path,
'snapshot_path': snapshot_path,
}
if diff:
datax['enable_diff_snapshots'] = True
if resume:
datax['resume_vm'] = True
return datax
class SnapshotHelper():
"""Facility for creation and loading of microvm snapshots."""
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
self._create = SnapshotCreate(api_usocket_full_name, api_session)
self._load = SnapshotLoad(api_usocket_full_name, api_session)
self._vm_state = Vm(api_usocket_full_name, api_session)
def create(self, mem_file_path, snapshot_path, diff=False, version=None):
"""Create a snapshot of the microvm."""
return self._create.put(
mem_file_path=mem_file_path,
snapshot_path=snapshot_path,
diff=diff,
version=version
)
def load(self, mem_file_path, snapshot_path, diff=False, resume=False):
"""Load a snapshot of the microvm."""
response = self._load.put(
mem_file_path=mem_file_path,
snapshot_path=snapshot_path,
diff=diff,
resume=resume
)
if resume and "unknown field `resume_vm`" in response.text:
# Retry using old API - separate resume command.
response = self._load.put(
mem_file_path=mem_file_path,
snapshot_path=snapshot_path,
diff=diff,
resume=False
)
if response.status_code != 204:
return response
response = self._vm_state.patch(state='Resumed')
return response
class Metrics:
"""Facility for setting up the metrics system and sending API requests."""
METRICS_CFG_RESOURCE = 'metrics'
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._metrics_cfg_url = api_url + self.METRICS_CFG_RESOURCE
self._api_session = api_session
def put(self, **args):
"""Configure or update the settings of the metrics system."""
datax = self.create_json(**args)
return self._api_session.put(
"{}".format(self._metrics_cfg_url),
json=datax
)
def patch(self, **args):
"""Configure or update the settings of the metrics system."""
datax = self.create_json(**args)
return self._api_session.patch(
"{}".format(self._metrics_cfg_url),
json=datax
)
@staticmethod
def create_json(
metrics_path=None,
):
"""Compose the json associated to this type of API request."""
datax = {}
if metrics_path is not None:
datax['metrics_path'] = metrics_path
return datax
class MachineConfigure():
"""Facility for configuring the machine capabilities."""
MACHINE_CFG_RESOURCE = 'machine-config'
def __init__(
self,
api_usocket_full_name,
api_session,
firecracker_version):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._machine_cfg_url = api_url + self.MACHINE_CFG_RESOURCE
self._api_session = api_session
self._firecracker_version = firecracker_version
self._datax = {}
@property
def configuration(self):
"""Return machine config dictionary."""
return self._datax
def put(self, **args):
"""Specify the details of the machine configuration."""
self._datax = self.create_json(**args)
return self._api_session.put(
"{}".format(self._machine_cfg_url),
json=self._datax
)
def patch(self, **args):
"""Update the details of the machine configuration."""
datax = self.create_json(**args)
self._datax.update(datax)
return self._api_session.patch(
"{}".format(self._machine_cfg_url),
json=datax
)
def get(self):
"""Get the status of configuring the current microvm."""
return self._api_session.get(
self._machine_cfg_url
)
def create_json(
self,
vcpu_count=None,
mem_size_mib=None,
smt=None,
cpu_template=None,
track_dirty_pages=None):
"""Compose the json associated to this type of API request."""
datax = {}
if vcpu_count is not None:
datax['vcpu_count'] = vcpu_count
if mem_size_mib is not None:
datax['mem_size_mib'] = mem_size_mib
if compare_versions(self._firecracker_version, "0.25.0") <= 0:
datax['ht_enabled'] = False if smt is None else smt
elif smt is not None:
datax['smt'] = smt
if cpu_template is not None:
datax['cpu_template'] = cpu_template
if track_dirty_pages is not None:
datax['track_dirty_pages'] = track_dirty_pages
return datax
class MMDS():
"""Facility for sending microvm metadata services related API calls."""
MMDS_CFG_RESOURCE = 'mmds'
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending MMDS API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._mmds_cfg_url = api_url + self.MMDS_CFG_RESOURCE
self._api_session = api_session
def put(self, **args):
"""Send a new MMDS request."""
return self._api_session.put(
"{}".format(self._mmds_cfg_url),
json=args['json']
)
def put_config(self, **args):
"""Send a new MMDS config request."""
return self._api_session.put(
"{}".format(self._mmds_cfg_url + "/config"),
json=args['json']
)
def patch(self, **args):
"""Update the details of some MMDS request."""
return self._api_session.patch(
"{}".format(self._mmds_cfg_url),
json=args['json']
)
def get(self):
"""Get the status of the MMDS request."""
return self._api_session.get(
self._mmds_cfg_url
)
class Network():
"""Facility for handling network configuration for a microvm."""
NET_CFG_RESOURCE = 'network-interfaces'
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._net_cfg_url = api_url + self.NET_CFG_RESOURCE
self._api_session = api_session
def put(self, **args):
"""Attach a new tap interface."""
datax = self.create_json(**args)
return self._api_session.put(
"{}/{}".format(self._net_cfg_url, args['iface_id']),
json=datax
)
def patch(self, **args):
"""Apply an update to some tap interface."""
datax = self.create_json(**args)
return self._api_session.patch(
"{}/{}".format(self._net_cfg_url, args['iface_id']),
json=datax
)
@staticmethod
def create_json(
iface_id=None,
host_dev_name=None,
guest_mac=None,
rx_rate_limiter=None,
tx_rate_limiter=None):
"""Create the json for the net specific API request."""
datax = {
'iface_id': iface_id
}
if host_dev_name is not None:
datax['host_dev_name'] = host_dev_name
if guest_mac is not None:
datax['guest_mac'] = guest_mac
if tx_rate_limiter is not None:
datax['tx_rate_limiter'] = tx_rate_limiter
if rx_rate_limiter is not None:
datax['rx_rate_limiter'] = rx_rate_limiter
return datax
class Vm():
"""Facility for handling the state for a microvm."""
VM_CFG_RESOURCE = 'vm'
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._vm_cfg_url = api_url + self.VM_CFG_RESOURCE
self._api_session = api_session
def patch(self, **args):
"""Apply an update to the microvm state."""
datax = self.create_json(**args)
return self._api_session.patch(
self._vm_cfg_url,
json=datax
)
@staticmethod
def create_json(state):
"""Create the json for the vm specific API request."""
datax = {
'state': state
}
return datax
class Vsock():
"""Facility for handling vsock configuration for a microvm."""
VSOCK_CFG_RESOURCE = 'vsock'
def __init__(self, api_usocket_full_name, api_session):
"""Specify the information needed for sending API requests."""
url_encoded_path = urllib.parse.quote_plus(api_usocket_full_name)
api_url = API_USOCKET_URL_PREFIX + url_encoded_path + '/'
self._vsock_cfg_url = api_url + self.VSOCK_CFG_RESOURCE
self._api_session = api_session
def put(self, **args):
"""Attach a new vsock device."""
datax = self.create_json(**args)
return self._api_session.put(
self._vsock_cfg_url,
json=datax
)
def patch(self, **args):
"""Apply an update to some vsock device."""
datax = self.create_json(**args)
return self._api_session.patch(
self._vsock_cfg_url,
json=datax
)
@staticmethod
def create_json(
guest_cid,
uds_path,
vsock_id=None):
"""Create the json for the vsock specific API request."""
datax = {
'guest_cid': guest_cid,
'uds_path': uds_path
}
if vsock_id:
datax['vsock_id'] = vsock_id
return datax
|
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Node Composer: Isolate Distributor based on ``ortools``
Clusters the components of a composition into groups according to several
criteria.
:author: Thomas Calmant
:license: Apache Software License 2.0
:version: 1.0.1
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, \
Instantiate
# OR-Tools Linear solver
from ortools.linear_solver import pywraplp as ortools
# Composer
from cohorte.composer.node.beans import EligibleIsolate
import cohorte.composer
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory()
@Provides(cohorte.composer.SERVICE_DISTRIBUTOR_ISOLATE)
@Instantiate('cohorte-composer-node-distributor')
class IsolateDistributor(object):
"""
Clusters components into groups. Each group corresponds to an isolate.
"""
def __init__(self):
"""
Sets up members
"""
# Number of calls to this distributor
self._nb_distribution = 0
# Names of components considered unstable
self.__unstable = set()
def distribute(self, components, existing_isolates):
"""
Computes the distribution of the given components
:param components: A list of RawComponent beans
:param existing_isolates: A set of pre-existing eligible isolates
:return: A tuple of tuples: updated and new EligibleIsolate beans
"""
# Prepare the lists of updated and new isolates
updated_isolates = set()
new_isolates = set()
# Create a map name -> isolate bean
map_isolates = {isolate.name: isolate for isolate in existing_isolates}
# 1. Predefined host isolates
reserved_isolates = set()
remaining = set()
for component in components:
if component.isolate:
isolate_name = component.isolate
reserved_isolates.add(isolate_name)
try:
# Use existing bean
isolate = map_isolates[isolate_name]
isolate.add_component(component)
updated_isolates.add(isolate)
except KeyError:
# Create a new bean
isolate = EligibleIsolate(component.isolate,
component.language,
[component])
map_isolates[isolate_name] = isolate
new_isolates.add(isolate)
else:
# Component must be treated afterwards
remaining.add(component)
# Hide reserved isolates
for isolate_name in reserved_isolates:
map_isolates.pop(isolate_name)
# 2. Unstable components must be isolated
# ... group remaining components by language
remaining_stable = {}
for component in remaining:
if component.name in self.__unstable:
# Component is known as unstable: isolate it
isolate = EligibleIsolate(None, component.language,
[component])
new_isolates.add(isolate)
else:
# Store stable component, grouped by language
remaining_stable.setdefault(component.language, set()) \
.add(component)
for language, components in remaining_stable.items():
# Gather components according to their compatibility
updated, added = self.__csp_dist(map_isolates, components,
language)
updated_isolates.update(updated)
new_isolates.update(added)
# Return tuples of updated and new isolates beans
return tuple(updated_isolates), tuple(new_isolates)
def __csp_dist(self, map_isolates, components, language):
"""
Gather components using OR-Tools
:param map_isolates: A Name -> EligibleIsolate bean map
:param components: Set of components to gather
:param language: Implementation language of components
:return: A tuple: (updated isolates, new isolates)
"""
# Normalize entries (components and isolates)
components_names = sorted(component.name for component in components)
nb_components = len(components_names)
isolates_names = sorted(map_isolates.keys())
# Compute boundaries
max_isolates = max(len(components_names), len(isolates_names)) + 1
# Prepare the incompatibility matrix
incompat_matrix = self.__make_incompatibility_matrix(components_names)
# Prepare the problem solver
solver = ortools.Solver("Components distribution",
ortools.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
# Declare variables
# ... component on isolate (Iso_i <=> Iso_i_j = 1)
iso = {}
for i, name in enumerate(components_names):
for j in range(max_isolates):
iso[i, j] = solver.IntVar(0, 1, "{0} on {1}".format(name, j))
# ... assigned isolates (for the objective)
assigned_isolates = [solver.IntVar(0, 1, "Isolate {0}".format(i))
for i in range(max_isolates)]
# ... number of isolates for a component (must be 1)
nb_component_isolate = [solver.Sum(iso[i, j]
for j in range(max_isolates))
for i in range(nb_components)]
# ... number of components for an isolate
nb_isolate_components = [solver.Sum(iso[i, j]
for i in range(nb_components))
for j in range(max_isolates)]
# Constraints:
# ... 1 isolate per component
for i in range(nb_components):
solver.Add(nb_component_isolate[i] == 1)
# ... assigned isolates values must be updated
for j in range(max_isolates):
solver.Add(assigned_isolates[j] >=
nb_isolate_components[j] / nb_components)
# ... Avoid incompatible components on the same isolate
for i in range(len(incompat_matrix)):
for j in range(max_isolates):
# Pair on same isolate: sum = 2
solver.Add(iso[incompat_matrix[i][0], j] +
iso[incompat_matrix[i][1], j] <=
assigned_isolates[j])
# Define the objective: minimize the number of isolates
nb_assigned_isolates = solver.Sum(assigned_isolates)
solver.Minimize(nb_assigned_isolates)
# Solve the problem
solver.Solve()
# Print results
_logger.info("Number of isolates.: %s",
int(solver.Objective().Value()))
_logger.info("Isolates used......: %s",
[int(assigned_isolates[i].SolutionValue())
for i in range(max_isolates)])
for i in range(nb_components):
for j in range(max_isolates):
if int(iso[i, j].SolutionValue()) == 1:
break
else:
# No isolate associated ?
j = None
_logger.info("Component %s: Isolate %s", components_names[i], j)
_logger.info("WallTime...: %s", solver.WallTime())
_logger.info("Iterations.: %s", solver.Iterations())
# TODO: Prepare result isolates
updated_isolates = set()
added_isolates = [EligibleIsolate(None, language, components)]
return updated_isolates, added_isolates
@staticmethod
def __make_incompatibility_matrix(components_names):
"""
Prepares the incompatibility matrix
:param components_names: List of components names.
:return: A sorted incompatibility matrix
"""
# The incompatibility dictionary: component -> incompatible
incompat = {'Component-A': ['Nemesis-A'],
'Component-B': ['Nemesis-B']}
# Prepare the matrix (set of pairs)
incompat_matrix = set()
for name, incompat_names in incompat.items():
idx_name = components_names.index(name)
for incompat_name in incompat_names:
try:
idx_incompat = components_names.index(incompat_name)
# Store a sorted tuple (hashable)
incompat_matrix.add(tuple(
sorted((idx_name, idx_incompat))))
except ValueError:
# An incompatible component is not in the composition
pass
# Return a sorted tuple or sorted tuples
return tuple(sorted(incompat_matrix))
@staticmethod
def handle_event(event):
"""
Handles a component/composition event
:param event: The event to handle
"""
# TODO: notify the crash and incompatibility stores
pass
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import unittest
from typing import NamedTuple
from unittest.mock import PropertyMock
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
import apache_beam as beam
from apache_beam import coders
from apache_beam.dataframe.convert import to_dataframe
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import utils
from apache_beam.runners.interactive.testing.mock_ipython import mock_get_ipython
from apache_beam.testing.test_stream import WindowedValueHolder
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
class Record(NamedTuple):
order_id: int
product_id: int
quantity: int
def windowed_value(e):
from apache_beam.transforms.window import GlobalWindow
return WindowedValue(e, 1, [GlobalWindow()])
class ParseToDataframeTest(unittest.TestCase):
def test_parse_windowedvalue(self):
"""Tests that WindowedValues are supported but not present.
"""
els = [windowed_value(('a', 2)), windowed_value(('b', 3))]
actual_df = utils.elements_to_df(els, include_window_info=False)
expected_df = pd.DataFrame([['a', 2], ['b', 3]], columns=[0, 1])
# check_like so that ordering of indices doesn't matter.
pd.testing.assert_frame_equal(actual_df, expected_df, check_like=True)
def test_parse_windowedvalue_with_window_info(self):
"""Tests that WindowedValues are supported and have their own columns.
"""
els = [windowed_value(('a', 2)), windowed_value(('b', 3))]
actual_df = utils.elements_to_df(els, include_window_info=True)
expected_df = pd.DataFrame(
[['a', 2, int(1e6), els[0].windows, els[0].pane_info],
['b', 3, int(1e6), els[1].windows, els[1].pane_info]],
columns=[0, 1, 'event_time', 'windows', 'pane_info'])
# check_like so that ordering of indices doesn't matter.
pd.testing.assert_frame_equal(actual_df, expected_df, check_like=True)
def test_parse_windowedvalue_with_dicts(self):
"""Tests that dicts play well with WindowedValues.
"""
els = [
windowed_value({
'b': 2, 'd': 4
}),
windowed_value({
'a': 1, 'b': 2, 'c': 3
})
]
actual_df = utils.elements_to_df(els, include_window_info=True)
expected_df = pd.DataFrame(
[[np.nan, 2, np.nan, 4, int(1e6), els[0].windows, els[0].pane_info],
[1, 2, 3, np.nan, int(1e6), els[1].windows, els[1].pane_info]],
columns=['a', 'b', 'c', 'd', 'event_time', 'windows', 'pane_info'])
# check_like so that ordering of indices doesn't matter.
pd.testing.assert_frame_equal(actual_df, expected_df, check_like=True)
def test_parse_dataframes(self):
"""Tests that it correctly parses a DataFrame.
"""
deferred = to_dataframe(beam.Pipeline() | beam.Create([Record(0, 0, 0)]))
els = [windowed_value(pd.DataFrame(Record(n, 0, 0))) for n in range(10)]
actual_df = utils.elements_to_df(
els, element_type=deferred._expr.proxy()).reset_index(drop=True)
expected_df = pd.concat([e.value for e in els], ignore_index=True)
pd.testing.assert_frame_equal(actual_df, expected_df)
def test_parse_series(self):
"""Tests that it correctly parses a Pandas Series.
"""
deferred = to_dataframe(beam.Pipeline()
| beam.Create([Record(0, 0, 0)]))['order_id']
els = [windowed_value(pd.Series([n])) for n in range(10)]
actual_df = utils.elements_to_df(
els, element_type=deferred._expr.proxy()).reset_index(drop=True)
expected_df = pd.concat([e.value for e in els], ignore_index=True)
pd.testing.assert_series_equal(actual_df, expected_df)
class ToElementListTest(unittest.TestCase):
def test_test_stream_payload_events(self):
"""Tests that the to_element_list can limit the count in a single bundle."""
coder = coders.FastPrimitivesCoder()
def reader():
element_payload = [
TestStreamPayload.TimestampedElement(
encoded_element=coder.encode(
WindowedValueHolder(WindowedValue(e, 0, []))),
timestamp=Timestamp.of(0).micros) for e in range(10)
]
event = TestStreamPayload.Event(
element_event=TestStreamPayload.Event.AddElements(
elements=element_payload))
yield event
# The reader creates 10 elements in a single TestStreamPayload but we limit
# the number of elements read to 5 here. This tests that the to_element_list
# can limit the number of elements in a single bundle.
elements = utils.to_element_list(
reader(), coder, include_window_info=False, n=5)
self.assertSequenceEqual(list(elements), list(range(5)))
def test_element_limit_count(self):
"""Tests that the to_element_list can limit the count."""
elements = utils.to_element_list(
iter(range(10)), None, include_window_info=False, n=5)
self.assertSequenceEqual(list(elements), list(range(5)))
@unittest.skipIf(
not ie.current_env().is_interactive_ready,
'[interactive] dependency is not installed.')
class IPythonLogHandlerTest(unittest.TestCase):
def setUp(self):
utils.register_ipython_log_handler()
self._interactive_root_logger = logging.getLogger(
'apache_beam.runners.interactive')
def test_ipython_log_handler_not_double_registered(self):
utils.register_ipython_log_handler()
ipython_log_handlers = list(
filter(
lambda x: isinstance(x, utils.IPythonLogHandler),
[handler for handler in self._interactive_root_logger.handlers]))
self.assertEqual(1, len(ipython_log_handlers))
@patch('apache_beam.runners.interactive.utils.IPythonLogHandler.emit')
def test_default_logging_level_is_info(self, mock_emit):
# By default the logging level of loggers and log handlers are NOTSET. Also,
# the propagation is default to true for all loggers. In this scenario, all
# loggings from child loggers will be propagated to the interactive "root"
# logger which is set to INFO level that gets handled by the sole log
# handler IPythonLogHandler which is set to NOTSET. The effect will be
# everything >= info level will be logged through IPython.core.display to
# all frontends connected to current kernel.
dummy_logger = logging.getLogger('apache_beam.runners.interactive.dummy1')
dummy_logger.info('info')
mock_emit.assert_called_once()
dummy_logger.debug('debug')
# Emit is not called, so it's still called once.
mock_emit.assert_called_once()
@patch('apache_beam.runners.interactive.utils.IPythonLogHandler.emit')
def test_child_module_logger_can_override_logging_level(self, mock_emit):
# When a child logger's logging level is configured to something that is not
# NOTSET, it takes back the logging control from the interactive "root"
# logger by not propagating anything.
dummy_logger = logging.getLogger('apache_beam.runners.interactive.dummy2')
dummy_logger.setLevel(logging.DEBUG)
mock_emit.assert_not_called()
dummy_logger.debug('debug')
# Because the dummy child logger is configured to log at DEBUG level, it
# now propagates DEBUG loggings to the interactive "root" logger.
mock_emit.assert_called_once()
# When the dummy child logger is configured to log at CRITICAL level, it
# will only propagate CRITICAL loggings to the interactive "root" logger.
dummy_logger.setLevel(logging.CRITICAL)
# Error loggings will not be handled now.
dummy_logger.error('error')
# Emit is not called, so it's still called once.
mock_emit.assert_called_once()
@unittest.skipIf(
not ie.current_env().is_interactive_ready,
'[interactive] dependency is not installed.')
@pytest.mark.skipif(
not ie.current_env().is_interactive_ready,
reason='[interactive] dependency is not installed.')
class ProgressIndicatorTest(unittest.TestCase):
def setUp(self):
ie.new_env()
@patch('IPython.get_ipython', new_callable=mock_get_ipython)
@patch(
'apache_beam.runners.interactive.interactive_environment'
'.InteractiveEnvironment.is_in_notebook',
new_callable=PropertyMock)
def test_progress_in_plain_text_when_not_in_notebook(
self, mocked_is_in_notebook, unused):
mocked_is_in_notebook.return_value = False
with patch('IPython.core.display.display') as mocked_display:
@utils.progress_indicated
def progress_indicated_dummy():
mocked_display.assert_any_call('Processing...')
progress_indicated_dummy()
mocked_display.assert_any_call('Done.')
@patch('IPython.get_ipython', new_callable=mock_get_ipython)
@patch(
'apache_beam.runners.interactive.interactive_environment'
'.InteractiveEnvironment.is_in_notebook',
new_callable=PropertyMock)
def test_progress_in_HTML_JS_when_in_notebook(
self, mocked_is_in_notebook, unused):
mocked_is_in_notebook.return_value = True
with patch('IPython.core.display.HTML') as mocked_html,\
patch('IPython.core.display.Javascript') as mocked_js:
with utils.ProgressIndicator('enter', 'exit'):
mocked_html.assert_called()
mocked_js.assert_called()
@unittest.skipIf(
not ie.current_env().is_interactive_ready,
'[interactive] dependency is not installed.')
class MessagingUtilTest(unittest.TestCase):
SAMPLE_DATA = {'a': [1, 2, 3], 'b': 4, 'c': '5', 'd': {'e': 'f'}}
def setUp(self):
ie.new_env()
def test_as_json_decorator(self):
@utils.as_json
def dummy():
return MessagingUtilTest.SAMPLE_DATA
# As of Python 3.6, for the CPython implementation of Python,
# dictionaries remember the order of items inserted.
self.assertEqual(json.loads(dummy()), MessagingUtilTest.SAMPLE_DATA)
if __name__ == '__main__':
unittest.main()
|
|
"""
Utilities for RHL's workshop at the DSFP, Chicago, July 2016
"""
import math, os, sys
import matplotlib.pyplot as pyplot
import numpy as np
import imageProc
try:
_mpFigures
except NameError:
_mpFigures = {0 : None} # matplotlib (actually pyplot) figures
eventHandlers = {} # event handlers for matplotlib figures
def getMpFigure(fig=None, clear=True):
"""Return a pyplot figure(); if fig is supplied save it and make it the default
fig may also be a bool (make a new figure) or an int (return or make a figure (1-indexed;
python-list style -n supported)
"""
if not pyplot:
raise RuntimeError("I am unable to plot as I failed to import matplotlib")
if isinstance(fig, bool): # we want a new one
fig = len(_mpFigures) + 1 # matplotlib is 1-indexed
if isinstance(fig, int):
i = fig
if i == 0:
raise RuntimeError("I'm sorry, but matplotlib uses 1-indexed figures")
if i < 0:
try:
i = sorted(_mpFigures.keys())[i] # simulate list's [-n] syntax
except IndexError:
if _mpFigures:
print >> sys.stderr, "Illegal index: %d" % i
i = 1
def lift(fig):
fig.canvas._tkcanvas._root().lift() # == Tk's raise, but raise is a python reserved word
if _mpFigures.has_key(i):
try:
lift(_mpFigures[i])
except Exception, e:
del _mpFigures[i]
if not _mpFigures.has_key(i):
for j in range(1, i):
getMpFigure(j, clear=False)
_mpFigures[i] = pyplot.figure()
#
# Modify pyplot.figure().show() to make it raise the plot too
#
def show(self, _show=_mpFigures[i].show):
_show(self)
try:
lift(self)
except Exception, e:
pass
# create a bound method
import types
_mpFigures[i].show = types.MethodType(show, _mpFigures[i], _mpFigures[i].__class__)
fig = _mpFigures[i]
if not fig:
i = sorted(_mpFigures.keys())[0]
if i > 0:
fig = _mpFigures[i[-1]]
else:
fig = getMpFigure(1)
if clear:
fig.clf()
pyplot.figure(fig.number) # make it active
return fig
class Data(object):
def __init__(self, image=None, mask=None, variance=None, shape=None):
if image is not None:
if shape:
assert image.shape == shape
else:
shape = image.shape
if mask is not None:
assert mask.shape == shape
if variance is not None:
assert variance.shape == shape
if shape:
self.image = image if image is not None else np.zeros(shape)
self.mask = mask if mask is not None else np.zeros(shape, dtype="uint16")
self.variance = variance if variance is not None else np.zeros_like(self.image)
else:
self.image, self.mask, self.variance = None, None, None
self.truth = None
def read(self, dirName="./Data", readImage=True, readEimage=True, readRaw=False):
self.image, self.mask, self.truth = readData(dirName, readImage, readEimage, readRaw)
q25, q75 = np.percentile(self.image.flat, [25, 75])
self.variance = self.image + (0.741*(q75 - q25))**2
def copy(self, image=None, mask=None):
cp = Data(None)
cp.image = image if image is not None else None if self.image is None else self.image.copy()
cp.mask = mask if mask is not None else None if self.mask is None else self.mask.copy()
cp.variance = None if self.variance is None else self.variance.copy()
cp.truth = None if self.truth is None else self.truth.copy()
if cp.mask is None:
cp.mask = np.zeros_like(cp.image, dtype="uint16")
return cp
def clearMaskPlane(self, bitName=None):
"""Clear the bitName (e.g. INTRP) bit in the mask"""
if bitName:
self.mask &= ~imageProc.maskPlanes[bitName]
else:
self.mask = 0x0
def setMaskPlane(self, threshold, bitName, clear=False):
"""Set the bitName (e.g. INTRP) bit in the mask when the image is above threshold;
if clear is True, unset the bitplane first (see also clearMaskPlane)"""
if clear:
self.clearMaskPlane(bitName)
self.mask[self.image > threshold] |= imageProc.maskPlanes[bitName]
def readData(dirName="./Data", image=True, eimage=True, readRaw=False):
ims = []
if image:
for f in ("raw" if readRaw else "image", "mask",):
ims.append(np.load(os.path.join(dirName, "%s.npy" % f)))
else:
ims += [None, None]
if eimage:
ims.append(np.load(os.path.join(dirName, "eimage.npy")))
else:
ims.append(None)
return ims
def mtv(im, I0=0, b=1, mask=None, isMask=False, alpha=None, clear=True, fig=None, evData=None):
"""Display an image, using an asinh stretch (softened by b)"""
fig = pyplot.figure(fig)
try:
mtv(im.image, I0=I0, b=b, fig=fig.number, evData=im)
mtv(im.mask, isMask=True, alpha=alpha, fig=fig.number, clear=False)
return
except AttributeError:
if not isMask:
evData = im
if isMask:
if alpha is None:
alpha = 0.7
maskPlanes = imageProc.maskPlanes
r = (im & (maskPlanes["BAD"] | maskPlanes["CR"])) != 0
g = (im & (maskPlanes["INTRP"] | maskPlanes["SATUR"] | maskPlanes["EDGE"])) != 0
b = (im & (maskPlanes["DETECTED"] | maskPlanes["EDGE"])) != 0
alpha = alpha*np.ones_like(im)
alpha[im == 0] = 0
lim4 = np.dstack([r, g, b, alpha]).reshape([im.shape[0], im.shape[1], 4])
pyplot.imshow(lim4, origin="lower", interpolation="nearest")
else:
if b == 0:
b = 1e-10
ax = pyplot.imshow(np.arcsinh((im - I0)/b), origin='lower', interpolation='nearest',
cmap=pyplot.cm.gray)
if mask is not None:
mtv(mask, isMask=True, alpha=alpha, fig=fig.number)
if evData is not None:
axes = pyplot.axes()
myText = axes.text(0.05, 1.05, 'Press "return" to show intensity here',
transform=axes.transAxes, va='top')
global eventHandlers
eventHandlers[fig] = EventHandler((evData, myText), fig)
class EventHandler(object):
"""A class to handle key strokes with matplotlib displays"""
def __init__(self, data, fig):
self.fig = fig
im, text = data
try:
self.image = im.image
self.mask = im.mask
except AttributeError:
self.image = im
self.mask = None
self.text = text
self.cid = self.fig.canvas.mpl_connect('key_press_event', self)
def __call__(self, ev):
if ev.key != "\n" and ev.key != "enter":
return
if not (ev.xdata and ev.ydata):
return
x = np.clip(int(ev.xdata + 0.5), 0, self.image.shape[0])
y = np.clip(int(ev.ydata + 0.5), 0, self.image.shape[1])
str = "(%4d, %4d) %9.2f" % (x, y, self.image[y, x])
if hasattr(self, "mask") and self.mask is not None:
str += " 0x%02x" % (self.mask[y, x])
mval = self.mask[y, x]
for k, v in imageProc.maskPlanes.items():
if mval & v:
str += " %s" % k
self.text.set_text(str)
self.fig.canvas.draw()
|
|
import numpy as np
import sklearn.svm
import sklearn.ensemble
import sklearn.neighbors
import sklearn.decomposition
import sklearn.preprocessing
import sklearn.neural_network
import sklearn.linear_model
import sklearn.feature_extraction.text
import sklearn.naive_bayes
from hyperopt.pyll import scope, as_apply
from hyperopt import hp
from .vkmeans import ColumnKMeans
@scope.define
def sklearn_SVC(*args, **kwargs):
return sklearn.svm.SVC(*args, **kwargs)
@scope.define
def sklearn_LinearSVC(*args, **kwargs):
return sklearn.svm.LinearSVC(*args, **kwargs)
@scope.define
def sklearn_KNeighborsClassifier(*args, **kwargs):
star_star_kwargs = kwargs.pop('starstar_kwargs')
kwargs.update(star_star_kwargs)
return sklearn.neighbors.KNeighborsClassifier(*args, **kwargs)
@scope.define
def sklearn_RandomForestClassifier(*args, **kwargs):
return sklearn.ensemble.RandomForestClassifier(*args, **kwargs)
@scope.define
def sklearn_ExtraTreesClassifier(*args, **kwargs):
return sklearn.ensemble.ExtraTreesClassifier(*args, **kwargs)
@scope.define
def sklearn_SGDClassifier(*args, **kwargs):
return sklearn.linear_model.SGDClassifier(*args, **kwargs)
@scope.define
def sklearn_MultinomialNB(*args, **kwargs):
return sklearn.naive_bayes.MultinomialNB(*args, **kwargs)
@scope.define
def sklearn_PCA(*args, **kwargs):
return sklearn.decomposition.PCA(*args, **kwargs)
@scope.define
def sklearn_Tfidf(*args, **kwargs):
return sklearn.feature_extraction.text.TfidfVectorizer(*args, **kwargs)
@scope.define
def sklearn_StandardScaler(*args, **kwargs):
return sklearn.preprocessing.StandardScaler(*args, **kwargs)
@scope.define
def sklearn_MinMaxScaler(*args, **kwargs):
return sklearn.preprocessing.MinMaxScaler(*args, **kwargs)
@scope.define
def sklearn_Normalizer(*args, **kwargs):
return sklearn.preprocessing.Normalizer(*args, **kwargs)
@scope.define
def sklearn_OneHotEncoder(*args, **kwargs):
return sklearn.preprocessing.OneHotEncoder(*args, **kwargs)
@scope.define
def sklearn_BernoulliRBM(*args, **kwargs):
return sklearn.neural_network.BernoulliRBM(*args, **kwargs)
@scope.define
def sklearn_ColumnKMeans(*args, **kwargs):
return ColumnKMeans(*args, **kwargs)
@scope.define
def patience_param(x):
"""
Mark a hyperparameter as having a simple monotonic increasing
relationship with both CPU time and the goodness of the model.
"""
# -- TODO: make this do something!
return x
@scope.define
def inv_patience_param(x):
"""
Mark a hyperparameter as having a simple monotonic decreasing
relationship with both CPU time and the goodness of the model.
"""
# -- TODO: make this do something!
return x
def hp_bool(name):
return hp.choice(name, [False, True])
_svc_default_cache_size = 1000.0
def _svc_gamma(name):
# -- making these non-conditional variables
# probably helps the GP algorithm generalize
gammanz = hp.choice(name + '.gammanz', [0, 1])
gamma = hp.lognormal(name + '.gamma', np.log(0.01), 2.5)
return gammanz * gamma
def _svc_max_iter(name):
return scope.patience_param(
scope.int(
hp.loguniform(
name + '.max_iter',
np.log(1e7),
np.log(1e9))))
def _svc_C(name):
return hp.lognormal(name + '.C', np.log(1000.0), 3.0)
def _svc_tol(name):
return scope.inv_patience_param(
hp.lognormal(
name + '.tol',
np.log(1e-3),
2.0))
def _random_state(name, random_state):
if random_state is None:
return hp.randint(name, 5)
else:
return random_state
def svc_linear(name,
C=None,
shrinking=None,
tol=None,
max_iter=None,
verbose=False,
random_state=None,
cache_size=_svc_default_cache_size):
"""
Return a pyll graph with hyperparamters that will construct
a sklearn.svm.SVC model with a linear kernel.
"""
def _name(msg):
return '%s.%s_%s' % (name, 'linear', msg)
rval = scope.sklearn_SVC(
kernel='linear',
C=_svc_C(name + '.linear') if C is None else C,
shrinking=hp_bool(
_name('shrinking')) if shrinking is None else shrinking,
tol=_svc_tol(name) if tol is None else tol,
max_iter=_svc_max_iter(name) if max_iter is None else max_iter,
verbose=verbose,
random_state=_random_state(_name('.rstate'), random_state),
cache_size=cache_size,
)
return rval
def svc_rbf(name,
C=None,
gamma=None,
shrinking=None,
tol=None,
max_iter=None,
verbose=False,
random_state=None,
cache_size=_svc_default_cache_size):
"""
Return a pyll graph with hyperparamters that will construct
a sklearn.svm.SVC model with an RBF kernel.
"""
def _name(msg):
return '%s.%s_%s' % (name, 'rbf', msg)
rval = scope.sklearn_SVC(
kernel='rbf',
C=_svc_C(name + '.rbf') if C is None else C,
gamma=_svc_gamma(name) if gamma is None else gamma,
shrinking=hp_bool(
_name('shrinking')) if shrinking is None else shrinking,
tol=_svc_tol(name + '.rbf') if tol is None else tol,
max_iter=(_svc_max_iter(name + '.rbf')
if max_iter is None else max_iter),
verbose=verbose,
cache_size=cache_size,
random_state=_random_state(_name('rstate'), random_state),
)
return rval
def svc_poly(name,
C=None,
gamma=None,
coef0=None,
degree=None,
shrinking=None,
tol=None,
max_iter=None,
verbose=False,
random_state=None,
cache_size=_svc_default_cache_size):
"""
Return a pyll graph with hyperparamters that will construct
a sklearn.svm.SVC model with an RBF kernel.
"""
def _name(msg):
return '%s.%s_%s' % (name, 'poly', msg)
# -- (K(x, y) + coef0)^d
coef0nz = hp.choice(_name('coef0nz'), [0, 1])
coef0 = hp.uniform(_name('coef0'), 0.0, 1.0)
poly_coef0 = coef0nz * coef0
rval = scope.sklearn_SVC(
kernel='poly',
C=_svc_C(name + '.poly') if C is None else C,
gamma=_svc_gamma(name + '.poly') if gamma is None else gamma,
coef0=poly_coef0 if coef0 is None else coef0,
degree=hp.quniform(
_name('degree'),
low=1.5,
high=8.5,
q=1) if degree is None else degree,
shrinking=hp_bool(
_name('shrinking')) if shrinking is None else shrinking,
tol=_svc_tol(name + '.poly') if tol is None else tol,
max_iter=(_svc_max_iter(name + '.poly')
if max_iter is None else max_iter),
verbose=verbose,
random_state=_random_state(_name('.rstate'), random_state),
cache_size=cache_size,
)
return rval
def svc_sigmoid(name,
C=None,
gamma=None,
coef0=None,
shrinking=None,
tol=None,
max_iter=None,
verbose=False,
random_state=None,
cache_size=_svc_default_cache_size):
"""
Return a pyll graph with hyperparamters that will construct
a sklearn.svm.SVC model with an RBF kernel.
"""
def _name(msg):
return '%s.%s_%s' % (name, 'sigmoid', msg)
# -- tanh(K(x, y) + coef0)
coef0nz = hp.choice(_name('coef0nz'), [0, 1])
coef0 = hp.normal(_name('coef0'), 0.0, 1.0)
sigm_coef0 = coef0nz * coef0
rval = scope.sklearn_SVC(
kernel='sigmoid',
C=_svc_C(name + '.sigmoid') if C is None else C,
gamma=_svc_gamma(name + '.sigmoid') if gamma is None else gamma,
coef0=sigm_coef0 if coef0 is None else coef0,
shrinking=hp_bool(
_name('shrinking')) if shrinking is None else shrinking,
tol=_svc_tol(name + '.sigmoid') if tol is None else tol,
max_iter=(_svc_max_iter(name + '.sigmoid')
if max_iter is None else max_iter),
verbose=verbose,
random_state=_random_state(_name('rstate'), random_state),
cache_size=cache_size)
return rval
def svc(name,
C=None,
kernels=['linear', 'rbf', 'poly', 'sigmoid'],
shrinking=None,
tol=None,
max_iter=None,
verbose=False,
random_state=None,
cache_size=_svc_default_cache_size):
svms = {
'linear': svc_linear(
name,
C=C,
shrinking=shrinking,
tol=tol,
max_iter=max_iter,
random_state=random_state,
verbose=verbose),
'rbf': svc_rbf(
name,
C=C,
shrinking=shrinking,
tol=tol,
max_iter=max_iter,
random_state=random_state,
verbose=verbose),
'poly': svc_poly(
name,
C=C,
shrinking=shrinking,
tol=tol,
max_iter=max_iter,
random_state=random_state,
verbose=verbose),
'sigmoid': svc_sigmoid(
name,
C=C,
shrinking=shrinking,
tol=tol,
max_iter=max_iter,
random_state=random_state,
verbose=verbose),
}
choices = [svms[kern] for kern in kernels]
if len(choices) == 1:
rval = choices[0]
else:
rval = hp.choice('%s.kernel' % name, choices)
return rval
# TODO: Some combinations of parameters are not allowed in LinearSVC
def liblinear_svc(name,
C=None,
loss=None,
penalty=None,
dual=None,
tol=None,
multi_class=None,
fit_intercept=None,
intercept_scaling=None,
class_weight=None,
random_state=None,
verbose=False):
def _name(msg):
return '%s.%s_%s' % (name, 'linear_svc', msg)
"""
The combination of penalty='l1' and loss='l1' is not supported
penalty='l2' and ploss='l1' is only supported when dual='true'
penalty='l1' is only supported when dual='false'
"""
loss_penalty_dual = hp.choice(_name('loss_penalty_dual'),
[('l1', 'l2', True),
('l2', 'l2', True),
('l2', 'l1', False),
('l2', 'l2', False)])
rval = scope.sklearn_LinearSVC(
C=_svc_C(name + '.liblinear') if C is None else C,
loss=loss_penalty_dual[0] if loss is None else loss,
penalty=loss_penalty_dual[1] if penalty is None else penalty,
dual=loss_penalty_dual[2] if dual is None else dual,
tol=_svc_tol(name + '.liblinear') if tol is None else tol,
multi_class=hp.choice(
_name('multi_class'),
['ovr', 'crammer_singer']) if multi_class is None else multi_class,
fit_intercept=hp.choice(
_name('fit_intercept'),
[True, False]) if fit_intercept is None else fit_intercept,
random_state=_random_state(_name('rstate'), random_state),
verbose=verbose,
)
return rval
# TODO: Pick reasonable default values
def knn(name,
sparse_data=False,
n_neighbors=None,
weights=None,
leaf_size=None,
metric=None,
p=None,
**kwargs):
def _name(msg):
return '%s.%s_%s' % (name, 'knn', msg)
if sparse_data:
metric_args = { 'metric':'euclidean' }
else:
metric_args = hp.pchoice(_name('metric'), [
(0.65, { 'metric':'euclidean' }),
(0.10, { 'metric':'manhattan' }),
(0.10, { 'metric':'chebyshev' }),
(0.10, { 'metric':'minkowski',
'p':scope.int(hp.quniform(_name('minkowski_p'), 1, 5, 1))}),
(0.05, { 'metric':'wminkowski',
'p':scope.int(hp.quniform(_name('wminkowski_p'), 1, 5, 1)),
'w':hp.uniform( _name('wminkowski_w'), 0, 100 ) }),
] )
rval = scope.sklearn_KNeighborsClassifier(
n_neighbors=scope.int(hp.quniform(
_name('n_neighbors'),
0.5, 50, 1)) if n_neighbors is None else n_neighbors,
weights=hp.choice(
_name('weights'),
['uniform', 'distance']) if weights is None else weights,
leaf_size=scope.int(hp.quniform(
_name('leaf_size'),
0.51, 100, 1)) if leaf_size is None else leaf_size,
starstar_kwargs=metric_args
)
return rval
# TODO: Pick reasonable default values
def random_forest(name,
n_estimators=None,
criterion=None,
max_features=None,
max_depth=None,
min_samples_split=None,
min_samples_leaf=None,
bootstrap=None,
oob_score=None,
n_jobs=1,
random_state=None,
verbose=False):
def _name(msg):
return '%s.%s_%s' % (name, 'random_forest', msg)
"""
Out of bag estimation only available if bootstrap=True
"""
bootstrap_oob = hp.choice(_name('bootstrap_oob'),
[(True, True),
(True, False),
(False, False)])
rval = scope.sklearn_RandomForestClassifier(
n_estimators=scope.int(hp.quniform(
_name('n_estimators'),
1, 50, 1)) if n_estimators is None else n_estimators,
criterion=hp.choice(
_name('criterion'),
['gini', 'entropy']) if criterion is None else criterion,
max_features=hp.choice(
_name('max_features'),
['sqrt', 'log2',
None]) if max_features is None else max_features,
max_depth=max_depth,
min_samples_split=hp.quniform(
_name('min_samples_split'),
1, 10, 1) if min_samples_split is None else min_samples_split,
min_samples_leaf=hp.quniform(
_name('min_samples_leaf'),
1, 5, 1) if min_samples_leaf is None else min_samples_leaf,
bootstrap=bootstrap_oob[0] if bootstrap is None else bootstrap,
oob_score=bootstrap_oob[1] if oob_score is None else oob_score,
n_jobs=n_jobs,
random_state=_random_state(_name('rstate'), random_state),
verbose=verbose,
)
return rval
# TODO: Pick reasonable default values
# TODO: the parameters are the same as RandomForest, stick em together somehow
def extra_trees(name,
n_estimators=None,
criterion=None,
max_features=None,
max_depth=None,
min_samples_split=None,
min_samples_leaf=None,
bootstrap=None,
oob_score=None,
n_jobs=1,
random_state=None,
verbose=False):
def _name(msg):
return '%s.%s_%s' % (name, 'extra_trees', msg)
bootstrap_oob = hp.choice(_name('bootstrap_oob'),
[(True, True),
(True, False),
(False, False)])
rval = scope.sklearn_ExtraTreesClassifier(
n_estimators=scope.int(hp.quniform(
_name('n_estimators'),
1, 50, 1)) if n_estimators is None else n_estimators,
criterion=hp.choice(
_name('criterion'),
['gini', 'entropy']) if criterion is None else criterion,
max_features=hp.choice(
_name('max_features'),
['sqrt', 'log2',
None]) if max_features is None else max_features,
max_depth=max_depth,
min_samples_split=hp.quniform(
_name('min_samples_split'),
1, 10, 1) if min_samples_split is None else min_samples_split,
min_samples_leaf=hp.quniform(
_name('min_samples_leaf'),
1, 5, 1) if min_samples_leaf is None else min_samples_leaf,
bootstrap=bootstrap_oob[0] if bootstrap is None else bootstrap,
oob_score=bootstrap_oob[1] if oob_score is None else oob_score,
n_jobs=n_jobs,
random_state=_random_state(_name('rstate'), random_state),
verbose=verbose,
)
return rval
def sgd(name,
loss=None, #default - 'hinge'
penalty=None, #default - 'l2'
alpha=None, #default - 0.0001
l1_ratio=None, #default - 0.15, must be within [0, 1]
fit_intercept=None, #default - True
n_iter=None, #default - 5
shuffle=None, #default - False
random_state=None, #default - None
epsilon=None,
n_jobs=1, #default - 1 (-1 means all CPUs)
learning_rate=None, #default - 'invscaling'
eta0=None, #default - 0.01
power_t=None, #default - 0.5
class_weight=None,
warm_start=False,
verbose=False,
):
def _name(msg):
return '%s.%s_%s' % (name, 'sgd', msg)
rval = scope.sklearn_SGDClassifier(
loss=hp.pchoice(
_name('loss'),
[ (0.25, 'hinge'),
(0.25, 'log'),
(0.25, 'modified_huber'),
(0.05, 'squared_hinge'),
(0.05, 'perceptron'),
(0.05, 'squared_loss'),
(0.05, 'huber'),
(0.03, 'epsilon_insensitive'),
(0.02, 'squared_epsilon_insensitive') ] ) if loss is None else loss,
penalty=hp.pchoice(
_name('penalty'),
[ (0.40, 'l2'),
(0.35, 'l1'),
(0.25, 'elasticnet') ] ) if penalty is None else penalty,
alpha=hp.loguniform(
_name('alpha'),
np.log(1e-7),
np.log(1)) if alpha is None else alpha,
l1_ratio=hp.uniform(
_name('l1_ratio'),
0, 1 ) if l1_ratio is None else l1_ratio,
fit_intercept=hp.pchoice(
_name('fit_intercept'),
[ (0.8, True), (0.2, False) ]) if fit_intercept is None else fit_intercept,
learning_rate='invscaling' if learning_rate is None else learning_rate,
eta0=hp.loguniform(
_name('eta0'),
np.log(1e-5),
np.log(1e-1)) if eta0 is None else eta0,
power_t=hp.uniform(
_name('power_t'),
0, 1) if power_t is None else power_t,
n_jobs=n_jobs,
verbose=verbose,
)
return rval
def multinomial_nb(name,
alpha=None,
fit_prior=None,
):
def _name(msg):
return '%s.%s_%s' % (name, 'multinomial_nb', msg)
rval = scope.sklearn_MultinomialNB(
alpha=hp.quniform(
_name('alpha'),
0, 1, 0.001 ) if alpha is None else alpha,
fit_prior=hp.choice(
_name('fit_prior'),
[ True, False ] ) if fit_prior is None else fit_prior,
)
return rval
def any_classifier(name):
return hp.choice('%s' % name, [
svc(name + '.svc'),
knn(name + '.knn'),
random_forest(name + '.random_forest'),
extra_trees(name + '.extra_trees'),
sgd(name + '.sgd'),
])
def any_sparse_classifier(name):
return hp.choice('%s' % name, [
svc(name + '.svc'),
sgd(name + '.sgd'),
knn(name + '.knn', sparse_data=True),
multinomial_nb(name + '.multinomial_nb')
])
def pca(name, n_components=None, whiten=None, copy=True):
rval = scope.sklearn_PCA(
# -- qloguniform is missing a "scale" parameter so we
# lower the "high" parameter and multiply by 4 out front
n_components=4 * scope.int(
hp.qloguniform(
name + '.n_components',
low=np.log(0.51),
high=np.log(30.5),
q=1.0)) if n_components is None else n_components,
whiten=hp_bool(
name + '.whiten',
) if whiten is None else whiten,
copy=copy,
)
return rval
def standard_scaler(name, with_mean=None, with_std=None):
rval = scope.sklearn_StandardScaler(
with_mean=hp_bool(
name + '.with_mean',
) if with_mean is None else with_mean,
with_std=hp_bool(
name + '.with_std',
) if with_std is None else with_std,
)
return rval
def tfidf(name,
analyzer=None,
ngram_range=None,
stop_words=None,
lowercase=None,
max_df=1.0,
min_df=1,
max_features=None,
binary=None,
norm=None,
use_idf=False,
smooth_idf=False,
sublinear_tf=False,
):
def _name(msg):
return '%s.%s_%s' % (name, 'tfidf', msg)
max_ngram=scope.int( hp.quniform(
_name('max_ngram'),
1, 4, 1 ) )
rval = scope.sklearn_Tfidf(
stop_words=hp.choice(
_name('stop_words'),
[ 'english', None ] ) if analyzer is None else analyzer,
lowercase=hp_bool(
_name('lowercase'),
) if lowercase is None else lowercase,
max_df=max_df,
min_df=min_df,
binary=hp_bool(
_name('binary'),
) if binary is None else binary,
ngram_range=(1,max_ngram) if ngram_range is None else ngram_range,
norm=norm,
use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf,
)
return rval
def min_max_scaler(name, feature_range=None, copy=True):
if feature_range is None:
feature_range = (
hp.choice(name + '.feature_min', [-1.0, 0.0]),
1.0)
rval = scope.sklearn_MinMaxScaler(
feature_range=feature_range,
copy=copy,
)
return rval
def normalizer(name, norm=None):
rval = scope.sklearn_Normalizer(
norm=hp.choice(
name + '.with_mean',
['l1', 'l2'],
) if norm is None else norm,
)
return rval
def one_hot_encoder(name,
n_values=None,
categorical_features=None,
dtype=None):
rval = scope.sklearn_OneHotEncoder(
n_values='auto' if n_values is None else n_values,
categorical_features=('all'
if categorical_features is None
else categorical_features),
dtype=np.float if dtype is None else dtype,
)
return rval
def rbm(name,
n_components=None,
learning_rate=None,
batch_size=None,
n_iter=None,
verbose=False,
random_state=None):
rval = scope.sklearn_BernoulliRBM(
n_components=scope.int(
hp.qloguniform(
name + '.n_components',
low=np.log(0.51),
high=np.log(999.5),
q=1.0)) if n_components is None else n_components,
learning_rate=hp.lognormal(
name + '.learning_rate',
np.log(0.01),
np.log(10),
) if learning_rate is None else learning_rate,
batch_size=scope.int(
hp.qloguniform(
name + '.batch_size',
np.log(1),
np.log(100),
q=1,
)) if batch_size is None else batch_size,
n_iter=scope.int(
hp.qloguniform(
name + '.n_iter',
np.log(1),
np.log(1000), # -- max sweeps over the *whole* train set
q=1,
)) if n_iter is None else n_iter,
verbose=verbose,
random_state=_random_state(name + '.rstate', random_state),
)
return rval
def colkmeans(name,
n_clusters=None,
init=None,
n_init=None,
max_iter=None,
tol=None,
precompute_distances=True,
verbose=0,
random_state=None,
copy_x=True,
n_jobs=1):
rval = scope.sklearn_ColumnKMeans(
n_clusters=scope.int(
hp.qloguniform(
name + '.n_clusters',
low=np.log(1.51),
high=np.log(19.5),
q=1.0)) if n_clusters is None else n_clusters,
init=hp.choice(
name + '.init',
['k-means++', 'random'],
) if init is None else init,
n_init=hp.choice(
name + '.n_init',
[1, 2, 10, 20],
) if n_init is None else n_init,
max_iter=scope.int(
hp.qlognormal(
name + '.max_iter',
np.log(300),
np.log(10),
q=1,
)) if max_iter is None else max_iter,
tol=hp.lognormal(
name + '.tol',
np.log(0.0001),
np.log(10),
) if tol is None else tol,
precompute_distances=precompute_distances,
verbose=verbose,
random_state=random_state,
copy_x=copy_x,
n_jobs=n_jobs,
)
return rval
#XXX: todo GaussianRandomProjection
#XXX: todo SparseRandomProjection
def any_preprocessing(name):
"""Generic pre-processing appropriate for a wide variety of data
"""
return hp.choice('%s' % name, [
[pca(name + '.pca')],
[standard_scaler(name + '.standard_scaler')],
[min_max_scaler(name + '.min_max_scaler')],
[normalizer(name + '.normalizer')],
[],
# -- not putting in one-hot because it can make vectors huge
#[one_hot_encoder(name + '.one_hot_encoder')],
])
def any_text_preprocessing(name):
"""Generic pre-processing appropriate for text data
"""
return hp.choice('%s' % name, [
[tfidf(name + '.tfidf')],
])
def generic_space(name='space'):
model = hp.pchoice('%s' % name, [
(.8, {'preprocessing': [pca(name + '.pca')],
'classifier': any_classifier(name + '.pca_clsf')
}),
(.2, {'preprocessing': [min_max_scaler(name + '.min_max_scaler')],
'classifier': any_classifier(name + '.min_max_clsf'),
}),
])
return as_apply({'model': model})
# -- flake8 eof
|
|
#!/usr/bin/python
# Copyright 2012 William Yu
# [email protected]
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
#
"""
This is a demonstration file created to show how to obtain flow
and port statistics from OpenFlow 1.0-enabled switches. The flow
statistics handler contains a summary of web-only traffic.
"""
# standard includes
from pox.core import core
from pox.lib.util import dpidToStr
import pox.openflow.libopenflow_01 as of
from collections import defaultdict
# include as part of the betta branch
from pox.openflow.of_json import *
from pox.forwarding.my_l2_multi import Switch
time_period = 5 # time between stats requests
threshhold = 2000000 # = 3Mbit/s na 8 dla all
paths = defaultdict(lambda:defaultdict(lambda:[]))
log = core.getLogger()
sws = {} # switches
#host_switch_pair = defaultdict(lambda:None)
# get paths from my_l2_multi module
def get_paths():
global sws
from pox.forwarding.my_l2_multi import switches
if sws != switches:
sws = switches
log.debug("NOT EQUAL - switches has changed since last time we checked")
# to do - > add some clearing for stats
else:
# log.debug("EQUAL")
pass
for sw in sws.values():
#log.debug("Switch %s, ports %s", dpidToStr(sw.dpid), sw.ports)
pass
global paths
from pox.forwarding.my_l2_multi import all_cooked_paths
if paths != all_cooked_paths:
paths = all_cooked_paths
log.debug("NOT EQUAL - paths has changed since last time we checked")
# to do - > add some clearing for stats
else:
log.debug("EQUAL - paths has not changed since last time we checked")
from pox.forwarding.my_l2_multi import path_map
global path_map
from pox.forwarding.my_l2_multi import host_switch_pair
global host_switch_pair
# when _handle_portstats_received will receive stats for port on switch
# it will send them here to be applied for paths,
# stats here are bytes sent by this port
def apply_stats_to_paths(switch, port, stats):
# global paths
# log.debug("Checking switch %s port %s ", switch, port )
# for src in sws.values():
# for dst in sws.values():
# for path in paths[src][dst]:
# for switch_port_pair in path:
# #log.debug("switch-port pair %s, %s", dpidToStr(switch_port_pair[0].dpid), switch_port_pair[1] )
# if switch == dpidToStr(switch_port_pair[0].dpid) and port == switch_port_pair[1]:
# # log.debug("switch-port pair %s, %s", dpidToStr(switch_port_pair[0].dpid), switch_port_pair[1] )
# # log.debug(path)
# # switch_port_pair.append(stats) -> this isn't working, what is better?
# # to do -> how append stats?
# # print stats
# pass
from pox.forwarding.my_l2_multi import CookedPath
for cookedpathobj in CookedPath:
for switch_port_pair in cookedpathobj.cooked_path:
if switch == dpidToStr(switch_port_pair[0].dpid) and port == switch_port_pair[2]:
cookedpathobj.bytes_diff_list[cookedpathobj.cooked_path.index(switch_port_pair)] = \
stats - cookedpathobj.bytes_diff_list[cookedpathobj.cooked_path.index(switch_port_pair)]
# log.debug("Switch-port pair %s, %s", dpidToStr(switch_port_pair[0].dpid), switch_port_pair[2])
# log.debug("Bytes sent overall: %s", stats)
log.debug("Path: %s", cookedpathobj.cooked_path)
log.debug("Bytes diff list: %s", cookedpathobj.bytes_diff_list)
cookedpathobj.path_coefficient = max(cookedpathobj.bytes_diff_list[:-1])
# log.debug("Path coeff: %s", cookedpathobj.path_coefficient)
# handler for timer function that sends the requests to all the
# switches connected to the controller.
def _timer_func ():
get_paths()
for connection in core.openflow._connections.values():
connection.send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))
connection.send(of.ofp_stats_request(body=of.ofp_port_stats_request()))
log.debug("Sent %i flow/port stats request(s)", len(core.openflow._connections))
# handler to display flow statistics received in JSON format
# structure of event.stats is defined by ofp_flow_stats()
def _handle_flowstats_received (event):
stats = flow_stats_to_list(event.stats)
#log.debug("FlowStatsReceived from %s: %s",
# dpidToStr(event.connection.dpid), stats)
for flow_stats in event.stats:
# log.debug("Bytes in flow match%s: %s",
# flow_stats.match, flow_stats.byte_count)
# ALL THIS HAS TO BE CHECK YET !!! - > no duplications, add flow deleting after some time, etc.
# We want to gather stats for flow only in switch connected to src host
# to avoid duplication
if host_switch_pair[flow_stats.match.dl_src][0] == event.connection.dpid:
# log.debug("Flow stats found ", flow_stats.match.dl_src, host_switch_pair[flow_stats.match.dl_src], event.connection.dpid)
# Only IP flows
if flow_stats.match.dl_type == 0x800:
log.debug('IP Matched')
flow_match5 = [flow_stats.match.nw_proto, flow_stats.match.nw_src, flow_stats.match.nw_dst, \
flow_stats.match.tp_src, flow_stats.match.tp_dst]
from pox.forwarding.my_l2_multi import flow_list
for flow in flow_list:
#print "Flow match stat", flow_match5
#print "Flow match List", flow.match, "\n"
if flow.match5 == flow_match5:
log.debug("Flow 5 Match found")
if flow.changed == 1:
break # we only change path once
# TO DO -> handle timeouts, different switches etc.
# we want to take stats only from switch connected to host to avoid complications
flow.byte_diff = flow_stats.byte_count - flow.byte_count
log.debug("Bytes: received from stats %s, from this flow last checked %s, diff %s, bandwith in bits %s",
flow_stats.byte_count, flow.byte_count, flow.byte_diff, flow.byte_diff/time_period*8)
flow.byte_count = flow_stats.byte_count
if flow.byte_diff/time_period*8 > threshhold:
log.debug("Uuuuuu, found big flow! %s", flow.match)
print "Sw src, sw dst: ", flow.switch_src, flow.switch_dst
intermediate = path_map[flow.switch_src][flow.switch_dst][1]
if intermediate is None:
print "Directly connected"
best_path = find_best_path(flow.switch_src, flow.switch_dst)
print "best path, flow path:"
print best_path, "\n", flow.path
if best_path != flow.path and best_path is not None:
print "\nPath of big flow is not the best path - moved!\n"
Switch.delete_path(sws[event.connection.dpid], flow.path, flow.match)
Switch._install_path(sws[event.connection.dpid], best_path, flow.match)
flow.path = best_path
flow.changed = 1
break
# handler to display port statistics received in JSON format
def _handle_portstats_received (event):
stats = flow_stats_to_list(event.stats)
# log.debug("PortStatsReceived from %s: %s",
# dpidToStr(event.connection.dpid), stats)
for f in event.stats:
if int(f.port_no)<65534:
apply_stats_to_paths(dpidToStr(event.connection.dpid), f.port_no, f.tx_bytes)
def find_best_path(src, dst):
best_path_coeff = None
best_path = None
from pox.forwarding.my_l2_multi import CookedPath
print "Cooked paths:"
for cookedpathobj in CookedPath:
if cookedpathobj.switch_src == src and cookedpathobj.switch_dst == dst:
print cookedpathobj.cooked_path
print cookedpathobj.bytes_diff_list, cookedpathobj.path_coefficient
if best_path_coeff is None:
best_path_coeff = cookedpathobj.path_coefficient
best_path = cookedpathobj.cooked_path
log.debug("Best path: %s, coeff: %s", best_path, best_path_coeff)
elif cookedpathobj.path_coefficient < best_path_coeff:
best_path_coeff = cookedpathobj.path_coefficient
best_path = cookedpathobj.cooked_path
log.debug("Best path: %s, coeff: %s", best_path, best_path_coeff)
return best_path
# main functiont to launch the module
def launch ():
from pox.lib.recoco import Timer
# attach handsers to listners
core.openflow.addListenerByName("FlowStatsReceived",
_handle_flowstats_received)
core.openflow.addListenerByName("PortStatsReceived",
_handle_portstats_received)
# timer set to execute every five seconds
Timer(time_period, _timer_func, recurring=True)
|
|
# The one and only GameBot (tm)
#
# Written by SplatsCreations
# Date Written: 30th July 2009
# Email: [email protected]
#
# Uses twisted matrix from ----> http://twistedmatrix.com/
# if running on windows also needs the win32api module
# from ----> http://python.net/crew/mhammond/win32/
# but probably not if on linux but haven't tested it on linux yet.
#
# Developed using python version 2.6.5
# Written using Twisted version 8.2.0
# - if doesn't work and you have a twisted version less than that then
# you will need a version upgrade
#
from __future__ import absolute_import
VERSION = 0.95
SCRIPTURE_COLOUR = ''
from string import split
import string
# should be database agnostic - remove import
#import sqlite3
import random
from types import TupleType, UnicodeType
from time import clock
from bot.pluginDespatch import Plugin
from random import randint
from django.db import transaction
from django.db.models import Min, Max
from .models import GameGames, GameUsers, GameSolveAttempts
from bibleapp.models import BibleTranslations, BibleBooks, BibleVerses
import os
import re
import sys
import datetime
from optparse import OptionParser
# Process commands from the user. All messages directed to the bot
# end up here.
# Currently handles these commands:
# !gamed played
# - lists the games played in the database
# !restart
# - restart the currently selected game
# !join
# - join a multi-user game
# !start
# - start a multi-user game after everyone has joined
# !stop
# !end
# !endgame
# - stop a game that is in progress
import shutil
import logging
from django.conf import settings
from bibleapp.bot_plugin import get_book
logger = logging.getLogger(__name__)
logging.config.dictConfig(settings.LOGGING)
def blank_out_letters(letters, text):
""" Takes a scripture and blanks out text with underscores
except for letters"""
lowers = string.lowercase[0:26]
uppers = string.uppercase[0:26]
show_letters = ''
for ii in range(0,26):
ch = chr(ord('a') + ii)
if ch in letters:
show_letters += ch
else:
show_letters += '-'
for ii in range(0,26):
ch = chr(ord('A') + ii)
if ch.lower() in letters:
show_letters += ch
else:
show_letters += '-'
tr_table = string.maketrans(lowers+uppers, show_letters)
resp = string.translate(text, tr_table)
return resp
class ScriptureChallenge(Plugin):
""" The class created by the factory class for handling non game specific
commands """
plugin = ('sg', 'Scripture Challenge Game')
def __init__(self, *args):
super(ScriptureChallenge, self).__init__(*args)
self.commands = (\
('games played', self.games_played, "Change this description"),
('challenge', self.challenge, "Change this description"),
('examine\s+(\d+)', self.examine, "Change this description"),
('join', self.join, "Change this description"),
('start with (?P<translation>\w+)$', self.start1, "Change this description"),
('start with (?P<translation>\w+) (?P<book>\w+)', self.start1, "Change this description"),
('stop|end|endgame', self.stop, "Change this description"),
('restart', self.restart, "Change this description"),
('hash', self.hash, "Change this description"),
)
self.servername = self.irc_conn.factory.network
self.rooms_hash = {}
def write_log (self, text):
logger.info(text)
def joined(self, chan):
logger.info("Scripture challenge joined %s" % (chan,))
self.rooms_hash[chan.lower()] = {}
rooms_hash = self.rooms_hash[chan.lower()]
rooms_hash['GameStarted'] = False
rooms_hash['NicksInGame'] = []
def userLeft(self, user, channel):
"""
Called when I see another user leaving a channel.
"""
self.write_log( "userLeft %s %s - deleting hash entry " % (channel, user))
room_hash = self.rooms_hash[channel.lower()]
NicksInGame = room_hash['NicksInGame']
if user in NicksInGame:
self.say(channel, user + " has left the game.")
idx = room_hash['NickCurrentTurn']
currTurnPlayer = NicksInGame[idx]
nextPlayer = currTurnPlayer
NicksInGame.remove(user)
if len(NicksInGame) == 0:
self.end_game(channel)
elif user != currTurnPlayer:
room_hash['NickCurrentTurn'] = NicksInGame.index(currTurnPlayer)
else:
# If it was the last player in the list that left and
# it was his turn then special case. Then we move to
# the first player.
if idx >= len(NicksInGame):
room_hash['NickCurrentTurn'] = 0
else:
pass
idx = room_hash['NickCurrentTurn']
nextPlayer = NicksInGame[idx]
self.say(channel, "Play moves to " + nextPlayer)
# If its not the last player we don't need to do anything
# as then when the list is reduced it moves to the next player
room_hash['current_user'] = nextPlayer
self.user_left(channel)
def userQuit(self, user, quitMessage):
"""
Called when I see another user disconnect from the network.
"""
pass
def userRenamed(self, oldname, newname):
"""
A user changed their name from oldname to newname.
"""
for room_hash in self.rooms_hash.values():
NicksInGame = room_hash['NicksInGame']
if oldname in NicksInGame:
idx = NicksInGame.index(oldname)
NicksInGame[idx] = newname
def show_server_hash(self, channel):
self.say(channel, "------ Server Hash ------")
for k in self.rooms_hash.keys():
self.say(channel, " " + k + " " + str(self.rooms_hash[k]))
self.say(channel, "-------------------------")
def reset_server_hash(self, channel):
""" Stop any game in its tracks and reset back to no game """
rooms_hash = self.rooms_hash[channel.lower()]
rooms_hash['NickUsing'] = None
rooms_hash['NicksInGame'] = []
rooms_hash['NickCurrentTurn'] = 0
rooms_hash['GameStarted'] = False
def start_game(self, channel, nickname):
""" Start or restart a game or a multi-user game """
if channel not in self.rooms_hash:
self.rooms_hash[chan.lower()] = {}
rooms_hash = self.rooms_hash[chan.lower()]
rooms_hash['NicksInGame'].append(nickname)
rooms_hash['letters_given'] = []
rooms_hash['currScrip'] = None
rooms_hash['explain'] = None
rooms_hash['round'] = 0
rooms_hash['nickUsing'] = None
rooms_hash['nicksInGame'] = []
rooms_hash['nickCurrentTurn'] = 0
rooms_hash['solve_state'] = None # to enable @list to work on startup
self.say(channel, "You are now playing Scripture Challenge ")
self.say(channel, ' ')
rooms_hash['active_game'].greeting(channel)
rooms_hash['GameStarted'] = False
self.say(channel, ' ')
self.say(channel, 'Type !join now to participate in this game')
self.say(channel, 'When everyone is ready type !start to begin')
def privmsg(self, user, chan, msg):
logger.debug("privmsg " + str((user, chan, msg)))
short_nick = split(user, '!')[0]
if chan.lower() in self.rooms_hash:
rooms_hash = self.rooms_hash[chan.lower()]
#
# Make sure a user can only talk to game if its their turn
# in a multi user game
if rooms_hash['GameStarted']:
Exp_Idx = rooms_hash['NickCurrentTurn']
if Exp_Idx == None:
Expected_Nick = None
else:
Expected_Nick = rooms_hash['NicksInGame'][Exp_Idx]
if short_nick.lower() == Expected_Nick.lower():
self.handle_command(chan, short_nick, msg)
else:
self.non_turn_based_command(chan, short_nick, msg)
def games_played(self, regex, chan, nick, **kwargs):
self.game_list(chan)
def challenge(self, regex, chan, nick, **kwargs):
self.rooms_hash[chan.lower()] = {}
rooms_hash = self.rooms_hash[chan.lower()]
rooms_hash['GameStarted'] = False
rooms_hash['NicksInGame'] = []
rooms_hash['NickCurrentTurn'] = 0
self.say(chan, "== Scripture Challenge Version %s ==" % (VERSION,))
self.say(chan, " -- Courtesy of SplatsCreations")
self.say(chan, " http://www.splats-world.pw/wp/chat/scripture-challenge-game/")
def examine(self, regex, chan, nick, **kwargs):
game_id = int(regex.group(1))
self.examine(chan, game_id)
def join(self, regex, chan, nick, **kwargs):
rooms_hash = self.rooms_hash[chan.lower()]
if not rooms_hash['GameStarted']:
# It is only meaningful to join a game once.
if not nick.lower() in map(lambda x: x.lower(), rooms_hash['NicksInGame']):
rooms_hash['NicksInGame'].append(nick)
self.say(chan, nick + ' has joined game')
else:
self.say(chan, nick + ' has already joined game')
else:
self.say(chan, nick + ', game has already started, cannot join.')
def start1(self, regex, chan, nick, **kwargs):
translation = regex.group('translation')
try:
trans = BibleTranslations.objects.get(name = translation)
except BibleTranslations.DoesNotExist:
self.say(chan, "Translation {} not known".format(translation))
return
try:
book_name = regex.group('book')
book_name = get_book(translation, book_name)
except IndexError:
book_name = None
if book_name:
book = BibleBooks.objects.get(trans = trans, canonical = book_name)
verse_range_data = BibleVerses.objects.filter(trans = trans, book=book).aggregate(Min('id'), Max('id'))
else:
verse_range_data = BibleVerses.objects.filter(trans = trans).aggregate(Min('id'), Max('id'))
v1 = verse_range_data['id__min']
v2 = verse_range_data['id__max']
rooms_hash = self.rooms_hash[chan.lower()]
if 'NicksInGame' not in rooms_hash or \
len(rooms_hash['NicksInGame']) == 0:
self.say(chan, 'No one has yet joined game.')
elif not rooms_hash['GameStarted']:
NicksInGame = rooms_hash['NicksInGame']
if 'NickCurrentTurn' in rooms_hash:
NickCurrentTurn = rooms_hash['NickCurrentTurn']
else:
rooms_hash['NickCurrentTurn'] = 0
NickCurrentTurn = 0
CurrNick = NicksInGame[NickCurrentTurn]
NickCurrentTurn = 0
game = self._create_game(NicksInGame)
self.say(chan, 'Game started...')
self.say(chan, ' ')
rooms_hash['game'] = game
rooms_hash['current_user'] = CurrNick
rooms_hash['Round'] = 0
rooms_hash['GameStarted'] = True
rooms_hash['NickCurrentTurn'] = NickCurrentTurn
self.start(chan, v1, v2)
else:
self.chan(chan, 'Game already started')
def stop(self, regex, chan, nick, **kwargs):
self.end_game(chan)
def restart(self, regex, chan, nick, **kwargs):
self.start_game(nick)
def hash(self, regex, chan, nick, **kwargs):
self.show_server_hash(chan)
def _create_game(self, NicksInGame):
# Record the new game to the database
with transaction.atomic():
game = GameGames()
game.save()
for nick in NicksInGame:
host = self.irc_conn.nicks_db.get_host(nick)
gu = GameUsers(game = game, nick = nick, host = host)
gu.save()
return game
def advance_user(self, chan):
""" callback from game module in multiuser game to advance to next user
in a multi user game """
rooms_hash = self.rooms_hash[chan.lower()]
NicksInGame = rooms_hash['NicksInGame']
NumNicks = len(rooms_hash['NicksInGame'])
NickCurrentTurn = rooms_hash['NickCurrentTurn']
NickCurrentTurn += 1
if NickCurrentTurn >= NumNicks:
NickCurrentTurn = 0
rooms_hash['Round'] += 1
rooms_hash['NickCurrentTurn'] = NickCurrentTurn
CurrNick = NicksInGame[NickCurrentTurn]
logger.debug ( "advance user - next nick = "+ CurrNick)
rooms_hash['current_user'] = CurrNick
def end_game(self, channel, win=False):
""" callback for when the game has come to a conclusion
(ie win or lose). When a game finishes we want to stop
stuff from happening. """
rooms_hash = self.rooms_hash[channel.lower()]
if rooms_hash['GameStarted']:
self.say(channel, "Game Finished.")
game = rooms_hash['game']
# Find the winner if any
if win:
currTurn = rooms_hash['NickCurrentTurn']
nick = rooms_hash['NicksInGame'][currTurn]
winner = GameUsers.objects.get(game=game, nick=nick)
else:
winner = None
with transaction.atomic():
game.winner = winner
game.num_rounds = rooms_hash['Round']
game.save()
self.reset_server_hash(channel)
def send_reply(self, channel, msg):
""" short hand method for sending message to channel """
self.say(channel, msg)
def examine(self, channel, game_id):
""" examine the game """
try:
game = GameGames.objects.get(id = game_id)
except GameGames.DoesNotExist:
self.say(channel, "Game not in database")
return
usrs = [ user.nick for user in game.gameusers_set.all() ]
dt = game.timestamp
dstr = dt.strftime("%d-%b-%Y %I:%M%p")
format_s = "[%s] %s ref=\"%s\" rounds = %s players = [ %s ]" % \
(game.id, dstr, game.ref, game.num_rounds, ", ".join(usrs))
format_s = str(format_s)
self.say(channel, format_s)
attempts = [attempt for attempt in game.gamesolveattempts_set.all()]
for attempt in attempts:
fmt = str(" [%s] %s" % (attempt.id, attempt.user.nick))
self.say(channel, fmt)
self.say(channel, " attempt = \"%s\"" % str(attempt.attempt))
self.say(channel, " result = " + str(attempt.reason))
def game_list(self, channel):
""" !games played - list all games that the bot knows about """
games = GameGames.objects.order_by('-timestamp').all()[:5]
for game in games:
dt = game.timestamp
usrs = [ user.nick for user in game.gameusers_set.all() ]
dstr = dt.strftime("%d-%b-%Y %I:%M%p")
if game.winner:
format_s = "[%s] %s ref=\"%s\" rounds=%s winner=%s players=[ %s ]" % \
(game.id, dstr, game.ref, game.num_rounds, game.winner.nick,
", ".join(usrs))
else:
format_s = "[%s] %s ref=\"%s\" rounds = %s players=[ %s ]" % \
(game.id, dstr, game.ref, game.num_rounds, ", ".join(usrs))
format_s = str(format_s)
self.say(channel, format_s)
usr_list = str(" User list : %s " % ", ".join(usrs))
self.say(channel, usr_list)
self.say(channel, "--- end of list ---")
def greeting(self, channel):
""" Called when the game is first started """
self.say(channel, \
"\x032This is the game of scripture challenge. This is a multi user game " + \
"but can also be played as a single user game. " + \
"The object is to guess the scripture. (Its a little bit like hangman " + \
"and a bit like the TV show Jeopardy) " + \
"Each player has turns at choosing a letter and after each letter has " + \
"the option of solving the scripture.")
self.say(channel, " ")
self.say(channel, \
"\x032 Type !redisplay to re-display the scripture at any time. " + \
"This may be useful if the scripture has been scrolled off the " + \
"top of the window. ")
def _display_scripture(self, channel):
rooms_hash = self.rooms_hash[channel.lower()]
verse = rooms_hash['currScrip'].verse_text.encode("utf8", "replace")
script_disp = blank_out_letters(rooms_hash['letters_given'],
verse)
self.say(channel, "The scripture as it currently stands:")
self.say(channel, SCRIPTURE_COLOUR + script_disp)
self.say(channel, rooms_hash['current_user'] + ', choose a letter by typing !<letter> where letter is a to z')
def start(self, channel, begin_id, end_id):
""" Called when the multi user game is ready to begin
and all users have signed up """
rooms_hash = self.rooms_hash[channel.lower()]
rooms_hash['explain'] = None
random_scripture = BibleVerses.objects.filter(id__gte = begin_id, id__lt = end_id).order_by("?").first()
ref = "{} {}:{}".format(random_scripture.book.long_book_name,
random_scripture.chapter,
random_scripture.verse)
text = random_scripture.verse_text
logger.info('Scipture chosen : ' + ref + "," + text)
rooms_hash['currScrip'] = random_scripture
rooms_hash['currScrip'].ref = ref # slightly hacky
tr_table = string.maketrans('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', '-' * (26*2))
rooms_hash['letters_given'] = []
game = rooms_hash['game']
game.ref = ref
game.scripture = text
game.num_rounds = 0
game.save()
self._display_scripture(channel)
rooms_hash['solve_state'] = None
def handle_command(self, channel, nick, command):
rooms_hash = self.rooms_hash[channel.lower()]
short_nick = nick.split('!')[0]
letter_mch = re.match('^![a-z]\s*$|^[a-z]\s*$', command, re.IGNORECASE)
if letter_mch:
if letter_mch.group(0)[0] == '!':
letter = letter_mch.group(0)[1].lower()
else:
letter = letter_mch.group(0)[0].lower()
if letter in rooms_hash['letters_given']:
self.say(channel, "This letter has already been chosen.")
self.say(channel, rooms_hash['current_user'] + ', Please try again.')
else:
rooms_hash['letters_given'].append(letter)
logger.debug('letters given = ' + str(rooms_hash['letters_given']))
verse = rooms_hash['currScrip'].verse_text.encode("utf8", "replace")
script_disp = blank_out_letters(rooms_hash['letters_given'], verse)
self.say(channel, "The scripture as it currently stands:")
self.say(channel, SCRIPTURE_COLOUR+script_disp)
self.say(channel, rooms_hash['current_user'] + ', Do you wish to solve? (Please type a yes or no answer)')
rooms_hash['solve_state'] = "yes_no_answer"
elif rooms_hash['solve_state'] == "yes_no_answer":
yes_no_mch = re.match('^yes$|^no', command, re.IGNORECASE)
if yes_no_mch:
resp = yes_no_mch.group(0).lower()
if resp == "no":
self.advance_user(channel)
rooms_hash['letters_given'].sort()
letters_given = str(rooms_hash['letters_given'])
self.say(channel, "Letters currently used are : " + letters_given)
self.say(channel, rooms_hash['current_user'] + ', choose a letter by typing !<letter> where letter is a to z')
rooms_hash['solve_state'] = None
else:
self.say(channel, "Type your answer being sure to get spelling correct")
rooms_hash['solve_state'] = "solve_response"
elif rooms_hash['solve_state'] == "solve_response":
rooms_hash['solve_state'] = None
# find the current nick by matching on nick in game_users
# in database. In future maybe use host name mask
nick = rooms_hash['current_user']
game = rooms_hash['game']
user = GameUsers.objects.get(game = game, nick = nick)
with transaction.atomic():
# If user hasn't renamed their nick (oh well)
# (If they didn't we won't bother recording their attempt,
# may change in future.)
reason_str = ""
gsa = GameSolveAttempts(game = game, user=user,
attempt = command.strip())
user_words = re.split('\s+', command.strip())
scrip_words = re.split('\s+',rooms_hash['currScrip'].verse_text.strip())
uwl = len(user_words)
swl = len(scrip_words)
if uwl == swl: # If we have the right number of words for the verse
scripMatch = True
for ii, wrd in enumerate(scrip_words):
uword = user_words[ii]
iii = 0
isMatch = True
for ch in wrd:
if ch.isalpha():
while iii < len(uword):
if uword[iii].isalpha(): break
iii += 1
if (iii == len(uword)):
isMatch = False
break
if ch.lower() != uword[iii].lower():
isMatch = False
break
else:
iii += 1
if not isMatch:
reason_str = "'%s' != '%s'" % (uword, wrd)
break
if scripMatch:
reason_str = "Correctly solved"
self.say(channel, "Well done you have correctly solved the scripture")
self.say(channel, rooms_hash['currScrip'].verse_text)
self.say(channel, rooms_hash['currScrip'].ref)
self.end_game(channel, win=True)
else:
self.say(channel, "Sorry, your attempt at solving did not succeed")
# try this
if len(rooms_hash['NicksInGame']) > 1:
self.advance_user(channel)
self._display_scripture(channel)
rooms_hash['solve_state'] = None
else:
# end of try
self.say(channel, rooms_hash['currScrip'].verse_text)
self.say(channel, rooms_hash['currScrip'].ref)
self.end_game(channel)
else:
reason_str = "word count mismatch"
if len(rooms_hash['NicksInGame']) > 1:
self.say(channel, "Sorry, your attempt at solving did not succeed")
self.advance_user(channel)
self._display_scripture(channel)
rooms_hash['solve_state'] = None
else:
# end of try
self.say(channel, "Sorry, you did not have the correct number of words")
self.say(channel, rooms_hash['currScrip'].verse_text)
self.say(channel, rooms_hash['currScrip'].ref)
self.say(channel, "Number of your words = %d " % uwl)
self.say(channel, "Number of words in scripture = %d " % swl)
self.end_game(channel)
gsa.reason = reason_str
gsa.save()
def non_turn_based_command(self, channel, nick, command):
rooms_hash = self.rooms_hash[channel.lower()]
repost_mch = re.match('!repost|!redisplay', command, re.IGNORECASE)
explain_mch = re.match('!explain', command, re.IGNORECASE)
short_nick = nick.split('!')[0]
if repost_mch:
if not rooms_hash['currScrip']:
self.say(channel, "There is no current scipture to display.")
else:
script_disp = blank_out_letters(self.letters_given,
str(rooms_hash['currScrip'].verse_text.encode("utf8", "replace")))
self.say(channel, "The scripture as it currently stands:")
self.say(channel, SCRIPTURE_COLOUR+script_disp)
self.say(channel, "Letters currently used are : " + str(self.letters_given))
self.say(channel, "It is " + self.current_user + "'s turn.")
if not rooms_hash['solve_state']:
self.say(channel, "Waiting for a letter to be chosen.")
elif rooms_hash['solve_state'] == "yes_no_answer":
self.say(channel, "Waiting for a yes/no answer in solving.")
elif rooms_hash['solve_state'] == "solve_response":
self.say(channel, "Waiting for the scripture solution to be type in.")
self.say(channel, "CAUTION: Anything you type in chat may be mistaken as a response.")
elif explain_mch:
if not rooms_hash['explain']:
self.say(channel, "Nothing to explain!")
else:
self.say(channel, "User Words " + rooms_hash['explain'][0][0])
self.say(channel, "Scripture Words " + rooms_hash['explain'][0][1])
if len(rooms_hash['explain']) > 1:
for elmt in rooms_hash['explain'][1:]:
self.say(channel, elmt[1] + elmt[0] + elmt[2])
self.say(channel, "End of explanation.")
def user_left(self,channel):
""" Called if a user in game leaves the game """
rooms_hash = self.rooms_hash[channel.lower()]
rooms_hash['solve_state'] = None
if rooms_hash['GameStarted']:
self._display_scripture(channel)
|
|
#from .model_structure import dgraphModel, dgraphTypes, dgraphUID, dgraphID, dgraphString, dgraphInt, dgraphFloat, dgraphBool, dgraphGeo, dgraphDate
from .flags_structure import dgraphFlags
from textwrap import dedent
import random
import hashlib
import time
import copy
# Generate Random
SECRET_KEY = 'CC9S5AIiEtFvqn1Rg3YxryaVVmvxDWLecUVq94BezrwcwY25MT'
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyz''ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
if not using_sysrandom:
random.seed(
hashlib.sha256(
("%s%s%s" % (
random.getstate(),
time.time(),
SECRET_KEY)).encode('utf-8')
).digest())
return ''.join(random.choice(allowed_chars) for i in range(length))
def _name(cls):
if hasattr(cls, "__name__"):
return cls.__name__
else:
return cls.__class__.__name__
# Tasks: Storing data and properties, generating schema and nornmal output
# DgraphTypes
class dgraphTypes():
validFlags = ["index", "reverse", "schema", "require"]
validTypes = {t: "<class '{0}'>".format(
t) for t in ["str", "int", "float", "bool"]}
validSchemaTypes = [
"string", "int", "float", "bool", "id", "date", "geo", "uid"]
def __init__(self, *args, **kwargs):
# Initiation Flags
flags = {f: False for f in self.validFlags}
flags["default"] = None
for k in list(kwargs):
if k in self.validFlags and kwargs[k] == True:
flags[k] = True
if "default" in kwargs:
flags["default"] = kwargs["default"]
self.requireSchema = any(
[value for key, value in flags.items() if key != "require"])
# print(flags["reverse"])
self.flags = type('flags', (object,), flags)
self.qflags = None
self.varName = None
self.setget = None
self.prefix = ""
@classmethod
def invalidType(cls, typ, input, requestType=None):
if not str(type(input)) == cls.validTypes[typ] and not str(type(input)) == "<class 'tuple'>" and not (requestType=="delete" and (input == "*" or input == None)):
raise BaseException("Invalid type bypassed")
else:
cls.type = typ
def setValue(self, uid, name, value, requestType=None):
self.invalidType(self.vtype, value, requestType)
new = copy.copy(self)
new.uid = uid
new.name = name
new._value = value
new.type = requestType
new.setget = 0
return new
def queryValue(self, name, qflags=None, requestType=None):
new = copy.copy(self)
new.name = name
new.qflags = qflags
new.type = requestType
new.setget = 1
return new
def setVar(self, name=None):
if name == None:
name = get_random_string()
self.varName = name
return self
#Input#
def _prechcheckVar(self, var):
if self.type == "delete" and var == None:
return "*"
else:
return '"{0}"'.format(var)
def _genSingleInput(self, uid, name, value):
if self.varName != None:
return "{3} AS {0} <{1}> {2}".format(uid, name, value, self.varName)
else:
return "{0} <{1}> {2}".format(uid, name, value)
####Parser#####
def _genSchema(self, name, vtype, flags):
return "{0}: {1} {2}".format(name, vtype, flags)
def _genInput(self):
uid = self.uid
name = "{0}{1}".format(self.prefix, self.name)
value = self._value
vtype = self.vtype
stype = self.schemaType
vset = []
if isinstance(self._value, (tuple)):
if isinstance(self._value[0], (tuple)):
for t in self._value:
vset.append(
self._genSingleInput(uid, name, '''{0}@{1}'''.format(self._prechcheckVar(t[0]), t[1])))
else:
vset.append(
self._genSingleInput(uid, name, '''{0}@{1}'''.format(self._prechcheckVar(value[0]), value[1])))
elif value.__class__.__bases__[0].__name__ == "dgraphModel":
vset.append(
self._genSingleInput(uid, name, '''{0}'''.format(value._uid)))
else:
vset.append(
self._genSingleInput(uid, name, '''{0}'''.format(self._prechcheckVar(value))))
if self.requireSchema == True:
_index = getattr(self.flags, "index")
_flags = []
if _index != False:
if self.schemaType in ("string", "id"):
if _index == True:
_tp = "term"
else:
_tp = _index
else:
if _index == True:
_tp = self.schemaType
else:
_tp = _index
_flags.append("index({0})".format(_tp))
_flags += [v for v in ["reverse"]
if getattr(self.flags, v) == True]
_flags = " ".join(
["@{0}".format(v) for v in _flags])
schema = self._genSchema(name, stype, _flags)
return (".\n".join(vset), schema)
else:
return (".\n".join(vset), None)
#Output#
def _genOutput(self):
_querySub = '''{0} {1}{{
{2}
}}'''
_querySubVar = '''{3} AS {0} {1}{{
{2}
}}'''
_queryPart = '''{0} {1}'''
_queryPartVar = '''{2} AS {0} {1}'''
# ToDo: Input generation for other models
# if val.__class__.__bases__[0].__name__ == "dgraphTypes":
# varName = value.__bases__._regVar(val.__name__)
# value = callableM.setValue(uid, name, 'var({0})'.format(varName), vtype)
# self._set.append(value)
flags = self.qflags
if flags != None:
flags._prefix = self.prefix
stype = self.schemaType
name = "{0}{1}".format(self.prefix, self.name)
varName = self.varName
_inner = []
if flags == None:
flags = ""
if stype == 'uid':
if flags._sub == []:
raise "No child query properties defined"
_sub = "\n".join([s.gen for s in flags.sub])
if varName != None:
_inner.append(_querySubVar.format(name, flags, _sub, varName))
else:
_inner.append(_querySub.format(name, flags, _sub))
else:
if varName != None:
_inner.append(_queryPartVar.format(name, flags, varName))
else:
_inner.append(_queryPart.format(name, flags))
return "\n".join(_inner)
@property
def gen(self):
if self.setget == 0:
return self._genInput()
else:
return self._genOutput()
class dgraphUID(dgraphTypes):
schemaType = "uid"
vtype = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
def setValue(self, uid, name, value, requestType=None):
if not _name(value.__class__.__base__) == "dgraphModel":
raise BaseException("Invalid type bypassed")
new = copy.copy(self)
new.uid = uid
new.name = name
new._value = value
new.type = requestType
new.setget = 0
return new
class dgraphID(dgraphTypes):
schemaType = "id"
vtype = "int"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class dgraphString(dgraphTypes):
schemaType = "string"
vtype = "str"
def __init__(self, *args, **kwargs):
super().__init__( *args, **kwargs)
class dgraphPassword(dgraphTypes):
schemaType = "string"
vtype = "str"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def value(self):
return '''"{0}"^^<pwd:password>'''.format(self._value)
class dgraphInt(dgraphTypes):
schemaType = "int"
vtype = "int"
def __init__(self, *args, **kwargs):
super().__init__( *args, **kwargs)
class dgraphFloat(dgraphTypes):
schemaType = "float"
vtype = "float"
def __init__(self, *args, **kwargs):
super().__init__( *args, **kwargs)
class dgraphBool(dgraphTypes):
schemaType = "bool"
vtype = "bool"
def __init__(self, *args, **kwargs):
super().__init__( *args, **kwargs)
class dgraphGeo(dgraphTypes):
schemaType = "geo"
vtype = "str"
def __init__(self, *args, **kwargs):
super().__init__( *args, **kwargs)
class dgraphDate(dgraphTypes):
schemaType = "date"
vtype = "str"
def __init__(self, *args, **kwargs):
super().__init__( *args, **kwargs)
# Model generator
class dgraphModel():
disableSchema = False
validModelTypes = [_name(m)for m in [dgraphUID, dgraphID, dgraphString,
dgraphInt, dgraphFloat, dgraphBool, dgraphGeo, dgraphDate, dgraphPassword]]
pushTypes = ("set", "delete")
pullTypes = ("query", "recurse", "var")
def __init__(self, *args, **kwargs):
self.modelName = str(self.__class__.__name__)
self.meta = type('metaType', (object,), {})
if hasattr(self, '__meta__'):
self.__meta__()
def _name(self, name):
return name
##################
#dgraphModel.push#
##################
def _push(self, vtype, *args, **kwargs):
if hasattr(self.meta, "staticID"):
uid = "<{0}>".format(self.meta.staticID)
skipRequire = True
else:
if "uid" in kwargs:
uid = "<{0}>".format(kwargs["uid"])
skipRequire = True
else:
uid = "_:{0}".format(get_random_string())
self._uid = uid
if not hasattr(self, "_set"):
self._set = []
_set = []
for name in dir(self):
callableM = getattr(self, name)
if callableM.__class__.__bases__[0].__name__ == "dgraphTypes":
if name in kwargs:
callName = _name(callableM)
if callName in self.validModelTypes:
val = kwargs[name]
value = callableM.setValue(
uid, self._name(name), val, vtype)
_set.append(value)
elif callableM.flags.require == True and skipRequire != True:
if callableM.flags.default == None:
raise Exception("Value {0} required.".format(name))
else:
val = callableM.flags.default
value = callableM.setValue(uid, self._name(name), val, vtype)
_set.append(value)
self._set.append(
type('dgraphPushData', (object,), {"type": vtype, "data": _set, "additional": {"root": self.meta.root, "model":self.modelName, "uid":uid}}))
return self
@classmethod
def set(cls, *args, **kwargs):
self = cls.__new__(cls, *args, **kwargs)
self.__init__()
resp = self._push("set", *args, **kwargs)
return resp
@classmethod
def delete(cls, *args, **kwargs):
self = cls.__new__(cls, *args, **kwargs)
self.__init__()
resp = self._push("delete", *args, **kwargs)
return resp
@classmethod
def uid(cls, uid):
self = cls.__new__(cls, *args, **kwargs)
self.__init__()
self._uid = uid
return self
##################
#dgraphModel.pull#
##################
def _pull(self, vtype, start, *args, **kwargs):
self.__init__()
if not hasattr(self, "_set"):
self._set = []
if hasattr(self.meta, "staticID"):
start = dgraphFlags.init(start.name, id=self.meta.staticID)
_getall = ('*' in args)
_set = []
for name in dir(self):
if name in kwargs:
callableM = getattr(self, name)
flags = kwargs[name]
_set.append(
callableM.queryValue(flags._name(name), flags, vtype))
elif name in args:
callableM = getattr(self, name)
_set.append(
callableM.queryValue(self._name(name), None, vtype))
elif _getall == True and getattr(self, name).__class__.__bases__[0].__name__ == "dgraphTypes":
callableM = getattr(self, name)
_set.append(
callableM.queryValue(self._name(name), None, vtype))
self._set.append(
type('dgraphPullData', (object,), {"type": vtype, "data": _set, "start": start, "additional": {"model":self.modelName, "root": self.meta.root}}))
return self
@classmethod
def query(cls, responceStart, *args, **kwargs):
self = cls.__new__(cls, *args, **kwargs)
return self._pull("query", responceStart, *args, **kwargs)
@classmethod
def recurse(cls, iterStart, *args, **kwargs):
self = cls.__new__(cls, *args, **kwargs)
return self._pull("recurse", iterStart, *args, **kwargs)
@classmethod
def var(self, varStart, *args, **kwargs):
self = cls.__new__(cls, *args, **kwargs)
return self._pull("var", varStart, *args, **kwargs)
def generate(self, alternativeSet=None):
_set = alternativeSet or self._set
_itSet = []
_flagParms = ["index", "reverse", "schema"]
_getSection = '''{0}({1}){{
{2}
}}'''
_getBasic = '''
{
%s
}
'''
_setBasicnSchema = '''
mutation {
schema {
%s .
}
%s{
%s .
}
}
'''
_setBasic = '''
mutation {
%s{
%s .
}
}
'''
# List generation
for s in _set:
stype = s.type
data = s.data
if stype in self.pushTypes:
for d in data:
d.prefix = self.modelName + "_"
_itSet.append((stype, *d.gen))
elif stype in self.pullTypes:
_inner = []
s.start._prefix = self.modelName + "_"
sflags = s.start.flags
if stype == "recurse":
sname = "recurse"
else:
sname = s.start.name
for d in data:
d.prefix = self.modelName + "_"
_inner.append(d.gen)
_itSet.append(
(stype, _getSection.format(sname, sflags, "\n".join(_inner)), None))
if s.additional["root"] != None:
if s.__name__ == "dgraphPullData":
pass
elif s.__name__ == "dgraphPushData":
_rootParts = reversed(s.additional["root"].split("."))
_link = self
for idx, _p in enumerate(_rootParts):
_sp = _p.split(":")
if _sp[0] == "<*>":
_uid = "_:{0}".format(get_random_string())
_link = dgraphUID().setValue(
_uid, _sp[1], _link, stype)
else:
_link = dgraphUID().setValue(
"<{0}>".format(_sp[0]), _sp[1], _link, stype)
_itSet.append((stype, *_link.gen))
_itSet.append((None, None, None))
_set = []
_schema = []
_last = _itSet[0][0]
_generator = ""
# List to Output
for idx, (t, s, sc) in enumerate(_itSet):
if t != _last:
# Set
if _last in ("set", "delete"):
if len(_schema) > 0 and not self.disableSchema == True:
_generator += dedent(_setBasicnSchema %
(" .\n".join(_schema), _last, " .\n".join(_set)))
else:
_generator += dedent(_setBasic %
(_last, " .\n".join(_set)))
# Query
elif _last in ("query", "recurse"):
_generator += dedent(_getBasic % ("\n".join(_set)))
if sc != None:
_schema = [sc]
else:
_schema = []
_set = [s]
_last = t
else:
if sc != None:
_schema.append(sc)
_set.append(s)
return _generator
def join(self, *args):
_self = copy.copy(self)
_set = copy.copy(_self._set)
for a in args:
_set += a._set
_self._set = _set
return _self
def __str__(self):
return self.generate()
def __unicode__(self):
return self.__str__()
#UniqueIDGenerator
class idGen():
def __init__(self, prefix, idlength = 8):
self.secret = SECRET_KEY
self.idlength = idlength
self.prefix = str(prefix)
self.allowed_chars='abcdefghijklmnopqrstuvwxyz''ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
def __call__(self, uniquifier):
if not isinstance(uniquifier, list):
uniquifier = [uniquifier]
return self.transform(self.prefix, uniquifier)
def transform(self, prefix, ident):
sha256key = [i for i in hashlib.sha256('{0}_{1}{2}'.format(prefix, ".".join([str(i) for i in ident]), self.secret).encode('utf-8')).hexdigest() if i in self.allowed_chars]
sha256prefix = [i for i in hashlib.sha256('{0}_{1}'.format(prefix, self.secret).encode('utf-8')).hexdigest() if i in self.allowed_chars]
return '{0}.{1}'.format("".join(sha256prefix[:4]), "".join(sha256key[:self.idlength]))
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for DRAC periodic tasks
"""
import mock
from ironic.common import driver_factory
from ironic.conductor import task_manager
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules.drac import common as drac_common
from ironic.drivers.modules.drac import raid as drac_raid
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.drivers.modules.drac import utils as test_utils
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_drac_info()
class DracPeriodicTaskTestCase(db_base.DbTestCase):
def setUp(self):
super(DracPeriodicTaskTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_drac')
self.node = obj_utils.create_test_node(self.context,
driver='fake_drac',
driver_info=INFO_DICT)
self.driver = driver_factory.get_driver("fake_drac")
self.job = {
'id': 'JID_001436912645',
'name': 'ConfigBIOS:BIOS.Setup.1-1',
'start_time': '00000101000000',
'until_time': 'TIME_NA',
'message': 'Job in progress',
'state': 'Running',
'percent_complete': 34}
self.virtual_disk = {
'id': 'Disk.Virtual.0:RAID.Integrated.1-1',
'name': 'disk 0',
'description': 'Virtual Disk 0 on Integrated RAID Controller 1',
'controller': 'RAID.Integrated.1-1',
'raid_level': '1',
'size_mb': 571776,
'state': 'ok',
'raid_state': 'online',
'span_depth': 1,
'span_length': 2,
'pending_operations': None
}
@mock.patch.object(task_manager, 'acquire', autospec=True)
def test__query_raid_config_job_status(self, mock_acquire):
# mock node.driver_internal_info
driver_internal_info = {'raid_config_job_ids': ['42']}
self.node.driver_internal_info = driver_internal_info
self.node.save()
# mock manager
mock_manager = mock.Mock()
node_list = [(self.node.uuid, 'pxe_drac',
{'raid_config_job_ids': ['42']})]
mock_manager.iter_nodes.return_value = node_list
# mock task_manager.acquire
task = mock.Mock(node=self.node,
driver=self.driver)
mock_acquire.return_value = mock.MagicMock(
__enter__=mock.MagicMock(return_value=task))
# mock _check_node_raid_jobs
self.driver.raid._check_node_raid_jobs = mock.Mock()
self.driver.raid._query_raid_config_job_status(mock_manager,
self.context)
self.driver.raid._check_node_raid_jobs.assert_called_once_with(task)
@mock.patch.object(task_manager, 'acquire', autospec=True)
def test__query_raid_config_job_status_no_config_jobs(self, mock_acquire):
# mock manager
mock_manager = mock.Mock()
node_list = [(self.node.uuid, 'pxe_drac', {})]
mock_manager.iter_nodes.return_value = node_list
# mock task_manager.acquire
task = mock.Mock(node=self.node,
driver=self.driver)
mock_acquire.return_value = mock.MagicMock(
__enter__=mock.MagicMock(return_value=task))
# mock _check_node_raid_jobs
self.driver.raid._check_node_raid_jobs = mock.Mock()
self.driver.raid._query_raid_config_job_status(mock_manager, None)
self.assertEqual(0, self.driver.raid._check_node_raid_jobs.call_count)
def test__query_raid_config_job_status_no_nodes(self):
# mock manager
mock_manager = mock.Mock()
node_list = []
mock_manager.iter_nodes.return_value = node_list
# mock _check_node_raid_jobs
self.driver.raid._check_node_raid_jobs = mock.Mock()
self.driver.raid._query_raid_config_job_status(mock_manager, None)
self.assertEqual(0, self.driver.raid._check_node_raid_jobs.call_count)
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
def test__check_node_raid_jobs_without_update(self, mock_get_drac_client):
# mock node.driver_internal_info
driver_internal_info = {'raid_config_job_ids': ['42']}
self.node.driver_internal_info = driver_internal_info
self.node.save()
# mock task
task = mock.Mock(node=self.node)
# mock dracclient.get_job
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
mock_client.get_job.return_value = test_utils.dict_to_namedtuple(
values=self.job)
self.driver.raid._check_node_raid_jobs(task)
mock_client.get_job.assert_called_once_with('42')
self.assertEqual(0, mock_client.list_virtual_disks.call_count)
self.node.refresh()
self.assertEqual(['42'],
self.node.driver_internal_info['raid_config_job_ids'])
self.assertEqual({}, self.node.raid_config)
self.assertEqual(False, self.node.maintenance)
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
@mock.patch.object(drac_raid.DracRAID, 'get_logical_disks',
spec_set=True, autospec=True)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean')
def test__check_node_raid_jobs_with_completed_job(
self, mock_notify_conductor_resume_clean,
mock_get_logical_disks, mock_get_drac_client):
expected_logical_disk = {'size_gb': 558,
'raid_level': '1',
'name': 'disk 0'}
# mock node.driver_internal_info
driver_internal_info = {'raid_config_job_ids': ['42']}
self.node.driver_internal_info = driver_internal_info
self.node.save()
# mock task
task = mock.Mock(node=self.node, context=self.context)
# mock dracclient.get_job
self.job['state'] = 'Completed'
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
mock_client.get_job.return_value = test_utils.dict_to_namedtuple(
values=self.job)
# mock driver.raid.get_logical_disks
mock_get_logical_disks.return_value = {
'logical_disks': [expected_logical_disk]
}
self.driver.raid._check_node_raid_jobs(task)
mock_client.get_job.assert_called_once_with('42')
self.node.refresh()
self.assertEqual([],
self.node.driver_internal_info['raid_config_job_ids'])
self.assertEqual([expected_logical_disk],
self.node.raid_config['logical_disks'])
mock_notify_conductor_resume_clean.assert_called_once_with(task)
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
def test__check_node_raid_jobs_with_failed_job(self, mock_get_drac_client):
# mock node.driver_internal_info
driver_internal_info = {'raid_config_job_ids': ['42']}
self.node.driver_internal_info = driver_internal_info
self.node.save()
# mock task
task = mock.Mock(node=self.node, context=self.context)
# mock dracclient.get_job
self.job['state'] = 'Failed'
self.job['message'] = 'boom'
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
mock_client.get_job.return_value = test_utils.dict_to_namedtuple(
values=self.job)
# mock dracclient.list_virtual_disks
mock_client.list_virtual_disks.return_value = [
test_utils.dict_to_namedtuple(values=self.virtual_disk)]
self.driver.raid._check_node_raid_jobs(task)
mock_client.get_job.assert_called_once_with('42')
self.assertEqual(0, mock_client.list_virtual_disks.call_count)
self.node.refresh()
self.assertEqual([],
self.node.driver_internal_info['raid_config_job_ids'])
self.assertEqual({}, self.node.raid_config)
task.process_event.assert_called_once_with('fail')
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
@mock.patch.object(drac_raid.DracRAID, 'get_logical_disks',
spec_set=True, autospec=True)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean')
def test__check_node_raid_jobs_with_completed_job_already_failed(
self, mock_notify_conductor_resume_clean,
mock_get_logical_disks, mock_get_drac_client):
expected_logical_disk = {'size_gb': 558,
'raid_level': '1',
'name': 'disk 0'}
# mock node.driver_internal_info
driver_internal_info = {'raid_config_job_ids': ['42'],
'raid_config_job_failure': True}
self.node.driver_internal_info = driver_internal_info
self.node.save()
# mock task
task = mock.Mock(node=self.node, context=self.context)
# mock dracclient.get_job
self.job['state'] = 'Completed'
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
mock_client.get_job.return_value = test_utils.dict_to_namedtuple(
values=self.job)
# mock driver.raid.get_logical_disks
mock_get_logical_disks.return_value = {
'logical_disks': [expected_logical_disk]
}
self.driver.raid._check_node_raid_jobs(task)
mock_client.get_job.assert_called_once_with('42')
self.node.refresh()
self.assertEqual([],
self.node.driver_internal_info['raid_config_job_ids'])
self.assertNotIn('raid_config_job_failure',
self.node.driver_internal_info)
self.assertNotIn('logical_disks', self.node.raid_config)
task.process_event.assert_called_once_with('fail')
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
@mock.patch.object(drac_raid.DracRAID, 'get_logical_disks',
spec_set=True, autospec=True)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean')
def test__check_node_raid_jobs_with_multiple_jobs_completed(
self, mock_notify_conductor_resume_clean,
mock_get_logical_disks, mock_get_drac_client):
expected_logical_disk = {'size_gb': 558,
'raid_level': '1',
'name': 'disk 0'}
# mock node.driver_internal_info
driver_internal_info = {'raid_config_job_ids': ['42', '36']}
self.node.driver_internal_info = driver_internal_info
self.node.save()
# mock task
task = mock.Mock(node=self.node, context=self.context)
# mock dracclient.get_job
self.job['state'] = 'Completed'
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
mock_client.get_job.return_value = test_utils.dict_to_namedtuple(
values=self.job)
# mock driver.raid.get_logical_disks
mock_get_logical_disks.return_value = {
'logical_disks': [expected_logical_disk]
}
self.driver.raid._check_node_raid_jobs(task)
mock_client.get_job.assert_has_calls([mock.call('42'),
mock.call('36')])
self.node.refresh()
self.assertEqual([],
self.node.driver_internal_info['raid_config_job_ids'])
self.assertNotIn('raid_config_job_failure',
self.node.driver_internal_info)
self.assertEqual([expected_logical_disk],
self.node.raid_config['logical_disks'])
mock_notify_conductor_resume_clean.assert_called_once_with(task)
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
@mock.patch.object(drac_raid.DracRAID, 'get_logical_disks',
spec_set=True, autospec=True)
@mock.patch.object(agent_base_vendor, '_notify_conductor_resume_clean')
def test__check_node_raid_jobs_with_multiple_jobs_failed(
self, mock_notify_conductor_resume_clean,
mock_get_logical_disks, mock_get_drac_client):
expected_logical_disk = {'size_gb': 558,
'raid_level': '1',
'name': 'disk 0'}
# mock node.driver_internal_info
driver_internal_info = {'raid_config_job_ids': ['42', '36']}
self.node.driver_internal_info = driver_internal_info
self.node.save()
# mock task
task = mock.Mock(node=self.node, context=self.context)
# mock dracclient.get_job
self.job['state'] = 'Completed'
failed_job = self.job.copy()
failed_job['state'] = 'Failed'
failed_job['message'] = 'boom'
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
mock_client.get_job.side_effect = [
test_utils.dict_to_namedtuple(values=failed_job),
test_utils.dict_to_namedtuple(values=self.job)]
# mock driver.raid.get_logical_disks
mock_get_logical_disks.return_value = {
'logical_disks': [expected_logical_disk]
}
self.driver.raid._check_node_raid_jobs(task)
mock_client.get_job.assert_has_calls([mock.call('42'),
mock.call('36')])
self.node.refresh()
self.assertEqual([],
self.node.driver_internal_info['raid_config_job_ids'])
self.assertNotIn('raid_config_job_failure',
self.node.driver_internal_info)
self.assertNotIn('logical_disks', self.node.raid_config)
task.process_event.assert_called_once_with('fail')
|
|
"""
Functions for applying functions that act on arrays to xarray's labeled data.
"""
from __future__ import absolute_import, division, print_function
from distutils.version import LooseVersion
import functools
import itertools
import operator
from collections import Counter
import numpy as np
from . import duck_array_ops, utils, dtypes
from .alignment import deep_align
from .merge import expand_and_merge_variables
from .pycompat import OrderedDict, dask_array_type, basestring
from .utils import is_dict_like
_DEFAULT_FROZEN_SET = frozenset()
_NO_FILL_VALUE = utils.ReprObject('<no-fill-value>')
_DEFAULT_NAME = utils.ReprObject('<default-name>')
_JOINS_WITHOUT_FILL_VALUES = frozenset({'inner', 'exact'})
class _UFuncSignature(object):
"""Core dimensions signature for a given function.
Based on the signature provided by generalized ufuncs in NumPy.
Attributes
----------
input_core_dims : tuple[tuple]
Core dimension names on each input variable.
output_core_dims : tuple[tuple]
Core dimension names on each output variable.
"""
def __init__(self, input_core_dims, output_core_dims=((),)):
self.input_core_dims = tuple(tuple(a) for a in input_core_dims)
self.output_core_dims = tuple(tuple(a) for a in output_core_dims)
self._all_input_core_dims = None
self._all_output_core_dims = None
self._all_core_dims = None
@property
def all_input_core_dims(self):
if self._all_input_core_dims is None:
self._all_input_core_dims = frozenset(
dim for dims in self.input_core_dims for dim in dims)
return self._all_input_core_dims
@property
def all_output_core_dims(self):
if self._all_output_core_dims is None:
self._all_output_core_dims = frozenset(
dim for dims in self.output_core_dims for dim in dims)
return self._all_output_core_dims
@property
def all_core_dims(self):
if self._all_core_dims is None:
self._all_core_dims = (self.all_input_core_dims |
self.all_output_core_dims)
return self._all_core_dims
@property
def num_inputs(self):
return len(self.input_core_dims)
@property
def num_outputs(self):
return len(self.output_core_dims)
def __eq__(self, other):
try:
return (self.input_core_dims == other.input_core_dims and
self.output_core_dims == other.output_core_dims)
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def __repr__(self):
return ('%s(%r, %r)'
% (type(self).__name__,
list(self.input_core_dims),
list(self.output_core_dims)))
def __str__(self):
lhs = ','.join('({})'.format(','.join(dims))
for dims in self.input_core_dims)
rhs = ','.join('({})'.format(','.join(dims))
for dims in self.output_core_dims)
return '{}->{}'.format(lhs, rhs)
def to_gufunc_string(self):
"""Create an equivalent signature string for a NumPy gufunc.
Unlike __str__, handles dimensions that don't map to Python
identifiers.
"""
all_dims = self.all_core_dims
dims_map = dict(zip(sorted(all_dims), range(len(all_dims))))
input_core_dims = [['dim%d' % dims_map[dim] for dim in core_dims]
for core_dims in self.input_core_dims]
output_core_dims = [['dim%d' % dims_map[dim] for dim in core_dims]
for core_dims in self.output_core_dims]
alt_signature = type(self)(input_core_dims, output_core_dims)
return str(alt_signature)
def result_name(objects):
# type: List[object] -> Any
# use the same naming heuristics as pandas:
# https://github.com/blaze/blaze/issues/458#issuecomment-51936356
names = {getattr(obj, 'name', _DEFAULT_NAME) for obj in objects}
names.discard(_DEFAULT_NAME)
if len(names) == 1:
name, = names
else:
name = None
return name
def _get_coord_variables(args):
input_coords = []
for arg in args:
try:
coords = arg.coords
except AttributeError:
pass # skip this argument
else:
coord_vars = getattr(coords, 'variables', coords)
input_coords.append(coord_vars)
return input_coords
def build_output_coords(
args, # type: list
signature, # type: _UFuncSignature
exclude_dims=frozenset(), # type: set
):
"""Build output coordinates for an operation.
Parameters
----------
args : list
List of raw operation arguments. Any valid types for xarray operations
are OK, e.g., scalars, Variable, DataArray, Dataset.
signature : _UfuncSignature
Core dimensions signature for the operation.
exclude_dims : optional set
Dimensions excluded from the operation. Coordinates along these
dimensions are dropped.
Returns
-------
OrderedDict of Variable objects with merged coordinates.
"""
# type: (...) -> List[OrderedDict[Any, Variable]]
input_coords = _get_coord_variables(args)
if exclude_dims:
input_coords = [OrderedDict((k, v) for k, v in coord_vars.items()
if exclude_dims.isdisjoint(v.dims))
for coord_vars in input_coords]
if len(input_coords) == 1:
# we can skip the expensive merge
unpacked_input_coords, = input_coords
merged = OrderedDict(unpacked_input_coords)
else:
merged = expand_and_merge_variables(input_coords)
output_coords = []
for output_dims in signature.output_core_dims:
dropped_dims = signature.all_input_core_dims - set(output_dims)
if dropped_dims:
filtered = OrderedDict((k, v) for k, v in merged.items()
if dropped_dims.isdisjoint(v.dims))
else:
filtered = merged
output_coords.append(filtered)
return output_coords
def apply_dataarray_ufunc(func, *args, **kwargs):
"""apply_dataarray_ufunc(func, *args, signature, join='inner',
exclude_dims=frozenset())
"""
from .dataarray import DataArray
signature = kwargs.pop('signature')
join = kwargs.pop('join', 'inner')
exclude_dims = kwargs.pop('exclude_dims', _DEFAULT_FROZEN_SET)
if kwargs:
raise TypeError('apply_dataarray_ufunc() got unexpected keyword '
'arguments: %s' % list(kwargs))
if len(args) > 1:
args = deep_align(args, join=join, copy=False, exclude=exclude_dims,
raise_on_invalid=False)
name = result_name(args)
result_coords = build_output_coords(args, signature, exclude_dims)
data_vars = [getattr(a, 'variable', a) for a in args]
result_var = func(*data_vars)
if signature.num_outputs > 1:
out = tuple(DataArray(variable, coords, name=name, fastpath=True)
for variable, coords in zip(result_var, result_coords))
else:
coords, = result_coords
out = DataArray(result_var, coords, name=name, fastpath=True)
return out
def ordered_set_union(all_keys):
# type: List[Iterable] -> Iterable
result_dict = OrderedDict()
for keys in all_keys:
for key in keys:
result_dict[key] = None
return result_dict.keys()
def ordered_set_intersection(all_keys):
# type: List[Iterable] -> Iterable
intersection = set(all_keys[0])
for keys in all_keys[1:]:
intersection.intersection_update(keys)
return [key for key in all_keys[0] if key in intersection]
def assert_and_return_exact_match(all_keys):
first_keys = all_keys[0]
for keys in all_keys[1:]:
if keys != first_keys:
raise ValueError(
'exact match required for all data variable names, '
'but %r != %r' % (keys, first_keys))
return first_keys
_JOINERS = {
'inner': ordered_set_intersection,
'outer': ordered_set_union,
'left': operator.itemgetter(0),
'right': operator.itemgetter(-1),
'exact': assert_and_return_exact_match,
}
def join_dict_keys(objects, how='inner'):
# type: (Iterable[Union[Mapping, Any]], str) -> Iterable
joiner = _JOINERS[how]
all_keys = [obj.keys() for obj in objects if hasattr(obj, 'keys')]
return joiner(all_keys)
def collect_dict_values(objects, keys, fill_value=None):
# type: (Iterable[Union[Mapping, Any]], Iterable, Any) -> List[list]
return [[obj.get(key, fill_value)
if is_dict_like(obj)
else obj
for obj in objects]
for key in keys]
def _as_variables_or_variable(arg):
try:
return arg.variables
except AttributeError:
try:
return arg.variable
except AttributeError:
return arg
def _unpack_dict_tuples(
result_vars, # type: Mapping[Any, Tuple[Variable]]
num_outputs, # type: int
):
# type: (...) -> Tuple[Dict[Any, Variable]]
out = tuple(OrderedDict() for _ in range(num_outputs))
for name, values in result_vars.items():
for value, results_dict in zip(values, out):
results_dict[name] = value
return out
def apply_dict_of_variables_ufunc(func, *args, **kwargs):
"""apply_dict_of_variables_ufunc(func, *args, signature, join='inner',
fill_value=None):
"""
signature = kwargs.pop('signature')
join = kwargs.pop('join', 'inner')
fill_value = kwargs.pop('fill_value', None)
if kwargs:
raise TypeError('apply_dict_of_variables_ufunc() got unexpected '
'keyword arguments: %s' % list(kwargs))
args = [_as_variables_or_variable(arg) for arg in args]
names = join_dict_keys(args, how=join)
grouped_by_name = collect_dict_values(args, names, fill_value)
result_vars = OrderedDict()
for name, variable_args in zip(names, grouped_by_name):
result_vars[name] = func(*variable_args)
if signature.num_outputs > 1:
return _unpack_dict_tuples(result_vars, signature.num_outputs)
else:
return result_vars
def _fast_dataset(variables, coord_variables):
# type: (OrderedDict[Any, Variable], Mapping[Any, Variable]) -> Dataset
"""Create a dataset as quickly as possible.
Beware: the `variables` OrderedDict is modified INPLACE.
"""
from .dataset import Dataset
variables.update(coord_variables)
coord_names = set(coord_variables)
return Dataset._from_vars_and_coord_names(variables, coord_names)
def apply_dataset_ufunc(func, *args, **kwargs):
"""apply_dataset_ufunc(func, *args, signature, join='inner',
dataset_join='inner', fill_value=None,
exclude_dims=frozenset(), keep_attrs=False):
If dataset_join != 'inner', a non-default fill_value must be supplied
by the user. Otherwise a TypeError is raised.
"""
from .dataset import Dataset
signature = kwargs.pop('signature')
join = kwargs.pop('join', 'inner')
dataset_join = kwargs.pop('dataset_join', 'inner')
fill_value = kwargs.pop('fill_value', None)
exclude_dims = kwargs.pop('exclude_dims', _DEFAULT_FROZEN_SET)
keep_attrs = kwargs.pop('keep_attrs', False)
first_obj = args[0] # we'll copy attrs from this in case keep_attrs=True
if (dataset_join not in _JOINS_WITHOUT_FILL_VALUES and
fill_value is _NO_FILL_VALUE):
raise TypeError('to apply an operation to datasets with different '
'data variables with apply_ufunc, you must supply the '
'dataset_fill_value argument.')
if kwargs:
raise TypeError('apply_dataset_ufunc() got unexpected keyword '
'arguments: %s' % list(kwargs))
if len(args) > 1:
args = deep_align(args, join=join, copy=False, exclude=exclude_dims,
raise_on_invalid=False)
list_of_coords = build_output_coords(args, signature, exclude_dims)
args = [getattr(arg, 'data_vars', arg) for arg in args]
result_vars = apply_dict_of_variables_ufunc(
func, *args, signature=signature, join=dataset_join,
fill_value=fill_value)
if signature.num_outputs > 1:
out = tuple(_fast_dataset(*args)
for args in zip(result_vars, list_of_coords))
else:
coord_vars, = list_of_coords
out = _fast_dataset(result_vars, coord_vars)
if keep_attrs and isinstance(first_obj, Dataset):
if isinstance(out, tuple):
out = tuple(ds._copy_attrs_from(first_obj) for ds in out)
else:
out._copy_attrs_from(first_obj)
return out
def _iter_over_selections(obj, dim, values):
"""Iterate over selections of an xarray object in the provided order."""
from .groupby import _dummy_copy
dummy = None
for value in values:
try:
obj_sel = obj.sel(**{dim: value})
except (KeyError, IndexError):
if dummy is None:
dummy = _dummy_copy(obj)
obj_sel = dummy
yield obj_sel
def apply_groupby_ufunc(func, *args):
from .groupby import GroupBy, peek_at
from .variable import Variable
groupbys = [arg for arg in args if isinstance(arg, GroupBy)]
assert groupbys, 'must have at least one groupby to iterate over'
first_groupby = groupbys[0]
if any(not first_groupby._group.equals(gb._group) for gb in groupbys[1:]):
raise ValueError('apply_ufunc can only perform operations over '
'multiple GroupBy objets at once if they are all '
'grouped the same way')
grouped_dim = first_groupby._group.name
unique_values = first_groupby._unique_coord.values
iterators = []
for arg in args:
if isinstance(arg, GroupBy):
iterator = (value for _, value in arg)
elif hasattr(arg, 'dims') and grouped_dim in arg.dims:
if isinstance(arg, Variable):
raise ValueError(
'groupby operations cannot be performed with '
'xarray.Variable objects that share a dimension with '
'the grouped dimension')
iterator = _iter_over_selections(arg, grouped_dim, unique_values)
else:
iterator = itertools.repeat(arg)
iterators.append(iterator)
applied = (func(*zipped_args) for zipped_args in zip(*iterators))
applied_example, applied = peek_at(applied)
combine = first_groupby._combine
if isinstance(applied_example, tuple):
combined = tuple(combine(output) for output in zip(*applied))
else:
combined = combine(applied)
return combined
def unified_dim_sizes(variables, exclude_dims=frozenset()):
# type: Iterable[Variable] -> OrderedDict[Any, int]
dim_sizes = OrderedDict()
for var in variables:
if len(set(var.dims)) < len(var.dims):
raise ValueError('broadcasting cannot handle duplicate '
'dimensions on a variable: %r' % list(var.dims))
for dim, size in zip(var.dims, var.shape):
if dim not in exclude_dims:
if dim not in dim_sizes:
dim_sizes[dim] = size
elif dim_sizes[dim] != size:
raise ValueError('operands cannot be broadcast together '
'with mismatched lengths for dimension '
'%r: %s vs %s'
% (dim, dim_sizes[dim], size))
return dim_sizes
SLICE_NONE = slice(None)
# A = TypeVar('A', numpy.ndarray, dask.array.Array)
def broadcast_compat_data(variable, broadcast_dims, core_dims):
# type: (Variable[A], tuple, tuple) -> A
data = variable.data
old_dims = variable.dims
new_dims = broadcast_dims + core_dims
if new_dims == old_dims:
# optimize for the typical case
return data
set_old_dims = set(old_dims)
missing_core_dims = [d for d in core_dims if d not in set_old_dims]
if missing_core_dims:
raise ValueError(
'operand to apply_ufunc has required core dimensions %r, but '
'some of these are missing on the input variable: %r'
% (list(core_dims), missing_core_dims))
set_new_dims = set(new_dims)
unexpected_dims = [d for d in old_dims if d not in set_new_dims]
if unexpected_dims:
raise ValueError('operand to apply_ufunc encountered unexpected '
'dimensions %r on an input variable: these are core '
'dimensions on other input or output variables'
% unexpected_dims)
# for consistency with numpy, keep broadcast dimensions to the left
old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims)
reordered_dims = old_broadcast_dims + core_dims
if reordered_dims != old_dims:
order = tuple(old_dims.index(d) for d in reordered_dims)
data = duck_array_ops.transpose(data, order)
if new_dims != reordered_dims:
key_parts = []
for dim in new_dims:
if dim in set_old_dims:
key_parts.append(SLICE_NONE)
elif key_parts:
# no need to insert new axes at the beginning that are already
# handled by broadcasting
key_parts.append(np.newaxis)
data = data[tuple(key_parts)]
return data
def apply_variable_ufunc(func, *args, **kwargs):
"""apply_variable_ufunc(func, *args, signature, exclude_dims=frozenset())
"""
from .variable import Variable
signature = kwargs.pop('signature')
exclude_dims = kwargs.pop('exclude_dims', _DEFAULT_FROZEN_SET)
dask = kwargs.pop('dask', 'forbidden')
output_dtypes = kwargs.pop('output_dtypes', None)
output_sizes = kwargs.pop('output_sizes', None)
keep_attrs = kwargs.pop('keep_attrs', False)
if kwargs:
raise TypeError('apply_variable_ufunc() got unexpected keyword '
'arguments: %s' % list(kwargs))
dim_sizes = unified_dim_sizes((a for a in args if hasattr(a, 'dims')),
exclude_dims=exclude_dims)
broadcast_dims = tuple(dim for dim in dim_sizes
if dim not in signature.all_core_dims)
output_dims = [broadcast_dims + out for out in signature.output_core_dims]
input_data = [broadcast_compat_data(arg, broadcast_dims, core_dims)
if isinstance(arg, Variable)
else arg
for arg, core_dims in zip(args, signature.input_core_dims)]
if any(isinstance(array, dask_array_type) for array in input_data):
if dask == 'forbidden':
raise ValueError('apply_ufunc encountered a dask array on an '
'argument, but handling for dask arrays has not '
'been enabled. Either set the ``dask`` argument '
'or load your data into memory first with '
'``.load()`` or ``.compute()``')
elif dask == 'parallelized':
input_dims = [broadcast_dims + dims
for dims in signature.input_core_dims]
numpy_func = func
def func(*arrays):
return _apply_with_dask_atop(
numpy_func, arrays, input_dims, output_dims,
signature, output_dtypes, output_sizes)
elif dask == 'allowed':
pass
else:
raise ValueError('unknown setting for dask array handling in '
'apply_ufunc: {}'.format(dask))
result_data = func(*input_data)
if signature.num_outputs > 1:
output = []
for dims, data in zip(output_dims, result_data):
var = Variable(dims, data)
if keep_attrs and isinstance(args[0], Variable):
var.attrs.update(args[0].attrs)
output.append(var)
return tuple(output)
else:
dims, = output_dims
var = Variable(dims, result_data)
if keep_attrs and isinstance(args[0], Variable):
var.attrs.update(args[0].attrs)
return var
def _apply_with_dask_atop(func, args, input_dims, output_dims, signature,
output_dtypes, output_sizes=None):
import dask.array as da
if signature.num_outputs > 1:
raise NotImplementedError('multiple outputs from apply_ufunc not yet '
"supported with dask='parallelized'")
if output_dtypes is None:
raise ValueError('output dtypes (output_dtypes) must be supplied to '
"apply_func when using dask='parallelized'")
if not isinstance(output_dtypes, list):
raise TypeError('output_dtypes must be a list of objects coercible to '
'numpy dtypes, got {}'.format(output_dtypes))
if len(output_dtypes) != signature.num_outputs:
raise ValueError('apply_ufunc arguments output_dtypes and '
'output_core_dims must have the same length: {} vs {}'
.format(len(output_dtypes), signature.num_outputs))
(dtype,) = output_dtypes
if output_sizes is None:
output_sizes = {}
new_dims = signature.all_output_core_dims - signature.all_input_core_dims
if any(dim not in output_sizes for dim in new_dims):
raise ValueError("when using dask='parallelized' with apply_ufunc, "
'output core dimensions not found on inputs must '
'have explicitly set sizes with ``output_sizes``: {}'
.format(new_dims))
for n, (data, core_dims) in enumerate(
zip(args, signature.input_core_dims)):
if isinstance(data, dask_array_type):
# core dimensions cannot span multiple chunks
for axis, dim in enumerate(core_dims, start=-len(core_dims)):
if len(data.chunks[axis]) != 1:
raise ValueError(
'dimension {!r} on {}th function argument to '
"apply_ufunc with dask='parallelized' consists of "
'multiple chunks, but is also a core dimension. To '
'fix, rechunk into a single dask array chunk along '
'this dimension, i.e., ``.rechunk({})``, but beware '
'that this may significantly increase memory usage.'
.format(dim, n, {dim: -1}))
(out_ind,) = output_dims
atop_args = []
for arg, dims in zip(args, input_dims):
# skip leading dimensions that are implicitly added by broadcasting
ndim = getattr(arg, 'ndim', 0)
trimmed_dims = dims[-ndim:] if ndim else ()
atop_args.extend([arg, trimmed_dims])
return da.atop(func, out_ind, *atop_args, dtype=dtype, concatenate=True,
new_axes=output_sizes)
def apply_array_ufunc(func, *args, **kwargs):
"""apply_array_ufunc(func, *args, dask='forbidden')
"""
dask = kwargs.pop('dask', 'forbidden')
if kwargs:
raise TypeError('apply_array_ufunc() got unexpected keyword '
'arguments: %s' % list(kwargs))
if any(isinstance(arg, dask_array_type) for arg in args):
if dask == 'forbidden':
raise ValueError('apply_ufunc encountered a dask array on an '
'argument, but handling for dask arrays has not '
'been enabled. Either set the ``dask`` argument '
'or load your data into memory first with '
'``.load()`` or ``.compute()``')
elif dask == 'parallelized':
raise ValueError("cannot use dask='parallelized' for apply_ufunc "
'unless at least one input is an xarray object')
elif dask == 'allowed':
pass
else:
raise ValueError('unknown setting for dask array handling: {}'
.format(dask))
return func(*args)
def apply_ufunc(func, *args, **kwargs):
"""apply_ufunc(func : Callable,
*args : Any,
input_core_dims : Optional[Sequence[Sequence]] = None,
output_core_dims : Optional[Sequence[Sequence]] = ((),),
exclude_dims : Collection = frozenset(),
vectorize : bool = False,
join : str = 'exact',
dataset_join : str = 'exact',
dataset_fill_value : Any = _NO_FILL_VALUE,
keep_attrs : bool = False,
kwargs : Mapping = None,
dask : str = 'forbidden',
output_dtypes : Optional[Sequence] = None,
output_sizes : Optional[Mapping[Any, int]] = None)
Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
(``.data``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs, you
must set ``output_core_dims`` as well.
*args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
input_core_dims : Sequence[Sequence], optional
List of the same length as ``args`` giving the list of core dimensions
on each input argument that should not be broadcast. By default, we
assume there are no core dimensions on any input arguments.
For example, ``input_core_dims=[[], ['time']]`` indicates that all
dimensions on the first argument and all dimensions other than 'time'
on the second argument should be broadcast.
Core dimensions are automatically moved to the last axes of input
variables before applying ``func``, which facilitates using NumPy style
generalized ufuncs [2]_.
output_core_dims : List[tuple], optional
List of the same length as the number of output arguments from
``func``, giving the list of core dimensions on each output that were
not broadcast on the inputs. By default, we assume that ``func``
outputs exactly one array, with axes corresponding to each broadcast
dimension.
Core dimensions are assumed to appear as the last dimensions of each
output in the provided order.
exclude_dims : set, optional
Core dimensions on the inputs to exclude from alignment and
broadcasting entirely. Any input coordinates along these dimensions
will be dropped. Each excluded dimension must also appear in
``input_core_dims`` for at least one argument.
vectorize : bool, optional
If True, then assume ``func`` only takes arrays defined over core
dimensions as input and vectorize it automatically with
:py:func:`numpy.vectorize`. This option exists for convenience, but is
almost always slower than supplying a pre-vectorized function.
Using this option requires NumPy version 1.12 or newer.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining the indexes of the passed objects along each
dimension, and the variables of Dataset objects with mismatched
data variables:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
- 'exact': data variables on all Dataset objects must match exactly
dataset_fill_value : optional
Value used in place of missing variables on Dataset inputs when the
datasets do not share the exact same ``data_vars``. Required if
``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
keep_attrs: boolean, Optional
Whether to copy attributes from the first argument to the output.
kwargs: dict, optional
Optional keyword arguments passed directly on to call ``func``.
dask: 'forbidden', 'allowed' or 'parallelized', optional
How to handle applying to objects containing lazy data in the form of
dask arrays:
- 'forbidden' (default): raise an error if a dask array is encountered.
- 'allowed': pass dask arrays directly on to ``func``.
- 'parallelized': automatically parallelize ``func`` if any of the
inputs are a dask array. If used, the ``output_dtypes`` argument must
also be provided. Multiple output arguments are not yet supported.
output_dtypes : list of dtypes, optional
Optional list of output dtypes. Only used if dask='parallelized'.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
Examples
--------
For illustrative purposes only, here are examples of how you could use
``apply_ufunc`` to write functions to (very nearly) replicate existing
xarray functionality:
Calculate the vector magnitude of two arguments::
def magnitude(a, b):
func = lambda x, y: np.sqrt(x ** 2 + y ** 2)
return xr.apply_func(func, a, b)
Compute the mean (``.mean``) over one dimension::
def mean(obj, dim):
# note: apply always moves core dimensions to the end
return apply_ufunc(np.mean, obj,
input_core_dims=[[dim]],
kwargs={'axis': -1})
Inner product over a specific dimension::
def _inner(x, y):
result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
return result[..., 0, 0]
def inner_product(a, b, dim):
return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
Stack objects along a new dimension (like ``xr.concat``)::
def stack(objects, dim, new_coord):
# note: this version does not stack coordinates
func = lambda *x: np.stack(x, axis=-1)
result = apply_ufunc(func, *objects,
output_core_dims=[[dim]],
join='outer',
dataset_fill_value=np.nan)
result[dim] = new_coord
return result
If your function is not vectorized but can be applied only to core
dimensions, you can use ``vectorize=True`` to turn into a vectorized
function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
terribly fast. Here we'll use it to calculate the distance between
empirical samples from two probability distributions, using a scipy
function that needs to be applied to vectors::
import scipy.stats
def earth_mover_distance(first_samples,
second_samples,
dim='ensemble'):
return apply_ufunc(scipy.stats.wasserstein_distance,
first_samples, second_samples,
input_core_dims=[[dim], [dim]],
vectorize=True)
Most of NumPy's builtin functions already broadcast their inputs
appropriately for use in `apply`. You may find helper functions such as
numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also
works well with numba's vectorize and guvectorize. Further explanation with
examples are provided in the xarray documentation [3].
See also
--------
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
References
----------
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
.. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
.. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation
""" # noqa: E501 # don't error on that URL one line up
from .groupby import GroupBy
from .dataarray import DataArray
from .variable import Variable
input_core_dims = kwargs.pop('input_core_dims', None)
output_core_dims = kwargs.pop('output_core_dims', ((),))
vectorize = kwargs.pop('vectorize', False)
join = kwargs.pop('join', 'exact')
dataset_join = kwargs.pop('dataset_join', 'exact')
keep_attrs = kwargs.pop('keep_attrs', False)
exclude_dims = kwargs.pop('exclude_dims', frozenset())
dataset_fill_value = kwargs.pop('dataset_fill_value', _NO_FILL_VALUE)
kwargs_ = kwargs.pop('kwargs', None)
dask = kwargs.pop('dask', 'forbidden')
output_dtypes = kwargs.pop('output_dtypes', None)
output_sizes = kwargs.pop('output_sizes', None)
if kwargs:
raise TypeError('apply_ufunc() got unexpected keyword arguments: %s'
% list(kwargs))
if input_core_dims is None:
input_core_dims = ((),) * (len(args))
signature = _UFuncSignature(input_core_dims, output_core_dims)
if exclude_dims and not exclude_dims <= signature.all_core_dims:
raise ValueError('each dimension in `exclude_dims` must also be a '
'core dimension in the function signature')
if kwargs_:
func = functools.partial(func, **kwargs_)
if vectorize:
if signature.all_core_dims:
# we need the signature argument
if LooseVersion(np.__version__) < '1.12': # pragma: no cover
raise NotImplementedError(
'numpy 1.12 or newer required when using vectorize=True '
'in xarray.apply_ufunc with non-scalar output core '
'dimensions.')
func = np.vectorize(func,
otypes=output_dtypes,
signature=signature.to_gufunc_string(),
excluded=set(kwargs))
else:
func = np.vectorize(func,
otypes=output_dtypes,
excluded=set(kwargs))
variables_ufunc = functools.partial(apply_variable_ufunc, func,
signature=signature,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
dask=dask,
output_dtypes=output_dtypes,
output_sizes=output_sizes)
if any(isinstance(a, GroupBy) for a in args):
# kwargs has already been added into func
this_apply = functools.partial(apply_ufunc, func,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
exclude_dims=exclude_dims,
join=join,
dataset_join=dataset_join,
dataset_fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
dask=dask)
return apply_groupby_ufunc(this_apply, *args)
elif any(is_dict_like(a) for a in args):
return apply_dataset_ufunc(variables_ufunc, *args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
fill_value=dataset_fill_value,
dataset_join=dataset_join,
keep_attrs=keep_attrs)
elif any(isinstance(a, DataArray) for a in args):
return apply_dataarray_ufunc(variables_ufunc, *args,
signature=signature,
join=join,
exclude_dims=exclude_dims)
elif any(isinstance(a, Variable) for a in args):
return variables_ufunc(*args)
else:
return apply_array_ufunc(func, *args, dask=dask)
def dot(*arrays, **kwargs):
""" dot(*arrays, dims=None)
Generalized dot product for xarray objects. Like np.einsum, but
provides a simpler interface based on array dimensions.
Parameters
----------
arrays: DataArray (or Variable) objects
Arrays to compute.
dims: str or tuple of strings, optional
Which dimensions to sum over.
If not speciified, then all the common dimensions are summed over.
**kwargs: dict
Additional keyword arguments passed to numpy.einsum or
dask.array.einsum
Returns
-------
dot: DataArray
Examples
--------
>>> da_a = xr.DataArray(np.arange(3 * 4).reshape(3, 4), dims=['a', 'b'])
>>> da_b = xr.DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5),
>>> dims=['a', 'b', 'c'])
>>> da_c = xr.DataArray(np.arange(5 * 6).reshape(5, 6), dims=['c', 'd'])
>>>
>>> xr.dot(da_a, da_b, dims=['a', 'b']).dims
('c', )
>>> xr.dot(da_a, da_b, dims=['a']).dims
('b', 'c')
>>> xr.dot(da_a, da_b, da_c, dims=['b', 'c']).dims
('a', 'd')
"""
from .dataarray import DataArray
from .variable import Variable
dims = kwargs.pop('dims', None)
if any(not isinstance(arr, (Variable, DataArray)) for arr in arrays):
raise TypeError('Only xr.DataArray and xr.Variable are supported.'
'Given {}.'.format([type(arr) for arr in arrays]))
if len(arrays) == 0:
raise TypeError('At least one array should be given.')
if isinstance(dims, basestring):
dims = (dims, )
common_dims = set.intersection(*[set(arr.dims) for arr in arrays])
all_dims = []
for arr in arrays:
all_dims += [d for d in arr.dims if d not in all_dims]
einsum_axes = 'abcdefghijklmnopqrstuvwxyz'
dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)}
if dims is None:
# find dimensions that occur more than one times
dim_counts = Counter()
for arr in arrays:
dim_counts.update(arr.dims)
dims = tuple(d for d, c in dim_counts.items() if c > 1)
dims = tuple(dims) # make dims a tuple
# dimensions to be parallelized
broadcast_dims = tuple(d for d in all_dims
if d in common_dims and d not in dims)
input_core_dims = [[d for d in arr.dims if d not in broadcast_dims]
for arr in arrays]
output_core_dims = [tuple(d for d in all_dims if d not in
dims + broadcast_dims)]
# construct einsum subscripts, such as '...abc,...ab->...c'
# Note: input_core_dims are always moved to the last position
subscripts_list = ['...' + ''.join([dim_map[d] for d in ds]) for ds
in input_core_dims]
subscripts = ','.join(subscripts_list)
subscripts += '->...' + ''.join([dim_map[d] for d in output_core_dims[0]])
# subscripts should be passed to np.einsum as arg, not as kwargs. We need
# to construct a partial function for apply_ufunc to work.
func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs)
result = apply_ufunc(func, *arrays,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
dask='allowed')
return result.transpose(*[d for d in all_dims if d in result.dims])
def where(cond, x, y):
"""Return elements from `x` or `y` depending on `cond`.
Performs xarray-like broadcasting across input arguments.
Parameters
----------
cond : scalar, array, Variable, DataArray or Dataset with boolean dtype
When True, return values from `x`, otherwise returns values from `y`.
x, y : scalar, array, Variable, DataArray or Dataset
Values from which to choose. All dimension coordinates on these objects
must be aligned with each other and with `cond`.
Returns
-------
In priority order: Dataset, DataArray, Variable or array, whichever
type appears as an input argument.
Examples
--------
>>> cond = xr.DataArray([True, False], dims=['x'])
>>> x = xr.DataArray([1, 2], dims=['y'])
>>> xr.where(cond, x, 0)
<xarray.DataArray (x: 2, y: 2)>
array([[1, 2],
[0, 0]])
Dimensions without coordinates: x, y
See also
--------
numpy.where : corresponding numpy function
Dataset.where, DataArray.where : equivalent methods
"""
# alignment for three arguments is complicated, so don't support it yet
return apply_ufunc(duck_array_ops.where,
cond, x, y,
join='exact',
dataset_join='exact',
dask='allowed')
|
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import os
import numpy as np
from collections import defaultdict, OrderedDict
from neon.layers import LookupTable, RecurrentSum, RecurrentLast, Linear, Bias, GeneralizedCost
from neon.initializers import Gaussian, Constant
from neon.data import ArrayIterator
from neon.data.text_preprocessing import clean_string
from neon.optimizers import GradientDescentMomentum
from neon.transforms import SumSquared
from neon.callbacks.callbacks import Callbacks
from neon.models import Model
from neon.util.persist import load_obj, save_obj
from neon import logger as neon_logger
class SentenceVector(object):
"""
A container class of sentence vectors for easy query similar sentences etc.
"""
def __init__(self, vectors, text):
"""
Initialize a SentenceVectors class object
Arguments:
vectors (ndarray, (#sentences, vector dimension)): sentence vectors
text (list, #sentences): sentence texts
"""
self.vectors = vectors
self.text = text
if isinstance(self.text, list):
assert self.vectors.shape[0] == len(self.text)
elif isinstance(self.text, np.ndarray):
assert self.vectors.shape[0] == self.text.shape[0]
norms = np.linalg.norm(self.vectors, axis=1)
self.vectors = self.vectors / norms.reshape(-1, 1)
def find_similar_idx(self, query, n=10):
"""
Find similar sentences by vector distances
metric = dot(vectors_of_vectors, vectors_of_target_vector)
Uses a precomputed vectors of the vectors
Parameters
Arguments:
query (ndarray): query sentence vector
n (int): top n number of neighbors
Returns:
position in self.vocab_w2id
cosine similarity
"""
query = query / np.linalg.norm(query)
metrics = np.dot(self.vectors, query.T)
best = np.argsort(metrics.ravel())[::-1][:n]
best_metrics = metrics[best]
return best, best_metrics
def find_similar(self, query, n=10):
"""
Find similar sentences by vector distances
metric = dot(vectors_of_vectors, vectors_of_target_vector)
Uses a precomputed vectors of the vectors
Parameters
Arguments:
query (ndarray): query sentence vector
n (int): top n number of neighbors
Returns:
position in self.vocab_w2id
cosine similarity
"""
query = query / np.linalg.norm(query)
metrics = np.dot(self.vectors, query.T)
best = np.argsort(metrics.ravel())[::-1][:n]
best_metrics = metrics[best]
nearest = [self.text[b] for b in best.tolist()]
return nearest, best_metrics
def find_similar_with_idx(self, idx, n=10):
if isinstance(idx, list):
best = []
for i in idx:
best += self.find_similar_with_idx(i, n)
return best
else:
query = self.vectors[idx]
metrics = np.dot(self.vectors, query.T)
best = np.argsort(metrics.ravel())[::-1][:n]
return best.tolist()
def prep_data(raw_input, input_type, max_len, vocab, dtype='int32',
index_from=2, oov=1):
"""
Transforms the raw received input data to put it in the required
format for running through neon.
Args:
raw_input (blob): input data contents ex. a stream of text
input_type (str): type for input data file
max_len (int): max sentence length to deal with
vocab (dict): vocabulary file
dtype (type, optional): type for each element of a tensor. Defaults to
float32
Returns:
Tensor: neon input data file of appropriate shape.
"""
dtype = np.dtype(dtype)
if input_type == "text":
in_shape = (max_len, 1)
tokens = tokenize(raw_input)
sent_inp = np.array(
[oov if t not in vocab else (vocab[t] + index_from) for t in tokens])
l = min(len(sent_inp), max_len)
xbuf = np.zeros(in_shape, dtype=dtype)
xbuf[-l:] = sent_inp[-l:].reshape(-1, 1)
return xbuf
else:
raise ValueError("Unsupported data type: %s" % input_type)
def tokenize(s, eos=True):
s = clean_string(s)
if eos and len(s) > 0:
return (s + ' <eos>').strip().split()
else:
return s.strip().split()
def get_google_word2vec_W(fname, vocab, index_from=2):
"""
Extract the embedding matrix from the given word2vec binary file and use this
to initalize a new embedding matrix for words found in vocab.
Conventions are to save indices for pad, oov, etc.:
index 0: pad
index 1: oov (or <unk>)
Often cases, the <eos> has already been in the preprocessed data, so no need
to save an index for <eos>
"""
f = open(fname, 'rb')
header = f.readline()
orig_w2v_size, embedding_dim = map(int, header.split())
binary_len = np.dtype('float32').itemsize * embedding_dim
vocab_size = len(vocab) + index_from
W = np.zeros((vocab_size, embedding_dim))
found_words_idx = defaultdict(int)
found_words = defaultdict(int)
for i in range(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == b' ':
word = (b''.join(word)).decode('utf-8')
break
if ch != b'\n':
word.append(ch)
if word in vocab:
wrd_id = vocab[word] + index_from
if wrd_id < vocab_size:
W[wrd_id] = np.fromstring(
f.read(binary_len), dtype='float32')
found_words_idx[wrd_id] += 1
found_words[word] += 1
else:
f.read(binary_len)
cnt = 0
for wrd_id in range(vocab_size):
if wrd_id not in found_words_idx:
cnt += 1
W[wrd_id] = np.random.uniform(-1.0, 1.0, embedding_dim)
unfound_words = list()
for wrd in vocab:
if wrd not in found_words:
unfound_words += [wrd]
assert cnt + len(found_words_idx) == vocab_size
f.close()
return W, embedding_dim, found_words
def compute_vocab_expansion(orig_word_vectors, w2v_W, w2v_vocab, word_idict):
neon_logger.display("Learning linear mapping from w2v -> rnn embedding...")
clf = train_regressor(orig_word_vectors, w2v_W, w2v_vocab)
neon_logger.display("Constructing map...")
init_embed = apply_regressor(clf, w2v_W, w2v_vocab, orig_word_vectors, word_idict)
return init_embed
def get_w2v_vocab(fname, max_vocab_size, cache=True):
"""
Get ordered dict of vocab from google word2vec
"""
if cache:
cache_fname = fname.split('.')[0] + ".vocab"
if os.path.isfile(cache_fname):
vocab, vocab_size = load_obj(cache_fname)
neon_logger.display("Word2Vec vocab cached, size is: {}".format(vocab_size))
return vocab, vocab_size
with open(fname, 'rb') as f:
header = f.readline()
vocab_size, embed_dim = map(int, header.split())
binary_len = np.dtype('float32').itemsize * embed_dim
neon_logger.display("Word2Vec vocab size is: {}".format(vocab_size))
vocab_size = min(max_vocab_size, vocab_size)
neon_logger.display("Reducing vocab size to: {}".format(vocab_size))
vocab = OrderedDict()
for i, line in enumerate(range(vocab_size)):
word = []
while True:
ch = f.read(1)
if ch == b' ':
word = (b''.join(word)).decode('utf-8')
break
if ch != b'\n':
word.append(ch)
f.read(binary_len)
vocab[word] = i
if cache:
save_obj((vocab, vocab_size), cache_fname)
return vocab, vocab_size
def get_embeddings(lookup_layer, word_idict):
"""
Extract RNN embeddings from the lookup layer of the model
Function modified from:
https://github.com/ryankiros/skip-thoughts/blob/master/training/tools.py
"""
f_emb = lookup_layer.W.get()
d = OrderedDict()
for i in range(f_emb.shape[0]):
ff = f_emb[i].flatten()
d[word_idict[i]] = ff
return d
def train_regressor(orig_wordvecs, w2v_W, w2v_vocab):
"""
Return regressor to map word2vec to RNN word space
Function modified from:
https://github.com/ryankiros/skip-thoughts/blob/master/training/tools.py
"""
# Gather all words from word2vec that appear in wordvecs
d = defaultdict(lambda: 0)
for w in w2v_vocab.keys():
d[w] = 1
shared = OrderedDict()
count = 0
for w in list(orig_wordvecs.keys())[:-2]:
if d[w] > 0:
shared[w] = count
count += 1
# Get the vectors for all words in 'shared'
w2v = np.zeros((len(shared), 300), dtype='float32')
sg = np.zeros((len(shared), 620), dtype='float32')
for w in shared.keys():
w2v[shared[w]] = w2v_W[w2v_vocab[w]]
sg[shared[w]] = orig_wordvecs[w]
train_set = ArrayIterator(X=w2v, y=sg, make_onehot=False)
layers = [Linear(nout=620, init=Gaussian(loc=0.0, scale=0.1)),
Bias(init=Constant(0.0))]
clf = Model(layers=layers)
# regression model is trained using default global batch size
cost = GeneralizedCost(costfunc=SumSquared())
opt = GradientDescentMomentum(0.1, 0.9, gradient_clip_value=5.0)
callbacks = Callbacks(clf)
clf.fit(train_set, num_epochs=20, optimizer=opt, cost=cost, callbacks=callbacks)
return clf
def apply_regressor(clf, w2v_W, w2v_vocab, orig_word_vectors, word_idict):
"""
Map words from word2vec into RNN word space
Function modifed from:
https://github.com/ryankiros/skip-thoughts/blob/master/training/tools.py
"""
init_embed = clf.be.zeros((len(w2v_vocab), 620), dtype=np.float32)
word_vec = clf.be.empty((300, 1))
# to apply the regression model for expansion, can only do one word at a time
# so to set the actual batch_size to be 1
actual_bsz = 1
clf.set_batch_size(N=actual_bsz)
for i, w in enumerate(w2v_vocab.keys()):
word_vec.set(w2v_W[w2v_vocab[w]].reshape(300, 1))
init_embed[w2v_vocab[w]][:] = clf.fprop(word_vec)[:, :actual_bsz]
word_vec_l = clf.be.empty((620, 1))
for w in word_idict.values():
if w in w2v_vocab:
word_vec_l.set(orig_word_vectors[w])
init_embed[w2v_vocab[w]][:] = word_vec_l
return init_embed.get()
def load_sent_encoder(model_dict, expand_vocab=False, orig_vocab=None,
w2v_vocab=None, w2v_path=None, use_recur_last=False):
"""
Custom function to load the model saved from skip-thought vector training
and reconstruct another model just using the LUT and encoding layer for
transfering sentence representations.
Arguments:
model_dict: saved s2v model dict
expand_vocab: Bool to indicate if w2v vocab expansion should be attempted
orig_vocab: If using expand_vocab, original vocabulary dict is needed for expansion
w2v_vocab: If using expand_vocab, w2v vocab dict
w2v_path: Path to trained w2v binary (GoogleNews)
use_recur_last: If True a RecurrentLast layer is used as the final layer, if False
a RecurrentSum layer is used as the last layer of the returned model.
"""
embed_dim = model_dict['model']['config']['embed_dim']
model_train = Model(model_dict)
# RecurrentLast should be used for semantic similarity evaluation
if use_recur_last:
last_layer = RecurrentLast()
else:
last_layer = RecurrentSum()
if expand_vocab:
assert orig_vocab and w2v_vocab, ("All vocabs and w2v_path " +
"need to be specified when using expand_vocab")
neon_logger.display("Computing vocab expansion regression...")
# Build inverse word dictionary (word -> index)
word_idict = dict()
for kk, vv in orig_vocab.items():
# Add 2 to the index to allow for padding and oov tokens as 0 and 1
word_idict[vv + 2] = kk
word_idict[0] = ''
word_idict[1] = 'UNK'
# Create dictionary of word -> vec
orig_word_vecs = get_embeddings(model_train.layers.layer_dict['lookupTable'], word_idict)
# Load GooleNews w2v weights
w2v_W, w2v_dim, _ = get_google_word2vec_W(w2v_path, w2v_vocab)
# Compute the expanded vocab lookup table from a linear mapping of
# words2vec into RNN word space
init_embed = compute_vocab_expansion(orig_word_vecs, w2v_W, w2v_vocab, word_idict)
init_embed_dev = model_train.be.array(init_embed)
w2v_vocab_size = len(w2v_vocab)
table = LookupTable(vocab_size=w2v_vocab_size, embedding_dim=embed_dim,
init=init_embed_dev, pad_idx=0)
model = Model(layers=[table,
model_train.layers.layer_dict['encoder'],
last_layer])
else:
model = Model(layers=[model_train.layers.layer_dict['lookupTable'],
model_train.layers.layer_dict['encoder'],
last_layer])
return model
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import six
from six.moves import http_client
from keystone.common import extension as keystone_extension
import keystone.conf
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import rest
from keystone.tests.unit.schema import v2
CONF = keystone.conf.CONF
class CoreApiTests(object):
def assertValidError(self, error):
self.assertIsNotNone(error.get('code'))
self.assertIsNotNone(error.get('title'))
self.assertIsNotNone(error.get('message'))
def assertValidVersion(self, version):
self.assertIsNotNone(version)
self.assertIsNotNone(version.get('id'))
self.assertIsNotNone(version.get('status'))
self.assertIsNotNone(version.get('updated'))
def assertValidExtension(self, extension):
self.assertIsNotNone(extension)
self.assertIsNotNone(extension.get('name'))
self.assertIsNotNone(extension.get('namespace'))
self.assertIsNotNone(extension.get('alias'))
self.assertIsNotNone(extension.get('updated'))
def assertValidExtensionLink(self, link):
self.assertIsNotNone(link.get('rel'))
self.assertIsNotNone(link.get('type'))
self.assertIsNotNone(link.get('href'))
def assertValidTenant(self, tenant):
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
self.assertNotIn('domain_id', tenant)
self.assertNotIn('parent_id', tenant)
def assertValidUser(self, user):
self.assertIsNotNone(user.get('id'))
self.assertIsNotNone(user.get('name'))
def assertValidRole(self, tenant):
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
def test_public_not_found(self):
r = self.public_request(
path='/%s' % uuid.uuid4().hex,
expected_status=http_client.NOT_FOUND)
self.assertValidErrorResponse(r)
def test_admin_not_found(self):
r = self.admin_request(
path='/%s' % uuid.uuid4().hex,
expected_status=http_client.NOT_FOUND)
self.assertValidErrorResponse(r)
def test_public_multiple_choice(self):
r = self.public_request(path='/', expected_status=300)
self.assertValidMultipleChoiceResponse(r)
def test_admin_multiple_choice(self):
r = self.admin_request(path='/', expected_status=300)
self.assertValidMultipleChoiceResponse(r)
def test_public_version(self):
r = self.public_request(path='/v2.0/')
self.assertValidVersionResponse(r)
def test_admin_version(self):
r = self.admin_request(path='/v2.0/')
self.assertValidVersionResponse(r)
def test_public_extensions(self):
r = self.public_request(path='/v2.0/extensions')
self.assertValidExtensionListResponse(
r, keystone_extension.PUBLIC_EXTENSIONS)
def test_admin_extensions(self):
r = self.admin_request(path='/v2.0/extensions')
self.assertValidExtensionListResponse(
r, keystone_extension.ADMIN_EXTENSIONS)
def test_admin_extensions_returns_not_found(self):
self.admin_request(path='/v2.0/extensions/invalid-extension',
expected_status=http_client.NOT_FOUND)
def test_public_osksadm_extension_returns_not_found(self):
self.public_request(path='/v2.0/extensions/OS-KSADM',
expected_status=http_client.NOT_FOUND)
def test_admin_osksadm_extension(self):
r = self.admin_request(path='/v2.0/extensions/OS-KSADM')
self.assertValidExtensionResponse(
r, keystone_extension.ADMIN_EXTENSIONS)
def test_authenticate(self):
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
'tenantId': self.tenant_bar['id'],
},
},
expected_status=http_client.OK)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_authenticate_unscoped(self):
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
},
},
expected_status=http_client.OK)
self.assertValidAuthenticationResponse(r)
def test_get_tenants_for_token(self):
r = self.public_request(path='/v2.0/tenants',
token=self.get_scoped_token())
self.assertValidTenantListResponse(r)
def test_validate_token(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tokens/%(token_id)s' % {
'token_id': token,
},
token=token)
self.assertValidAuthenticationResponse(r)
def test_invalid_token_returns_not_found(self):
token = self.get_scoped_token()
self.admin_request(
path='/v2.0/tokens/%(token_id)s' % {
'token_id': 'invalid',
},
token=token,
expected_status=http_client.NOT_FOUND)
def test_validate_token_service_role(self):
self.md_foobar = self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_service['id'],
self.role_service['id'])
token = self.get_scoped_token(
tenant_id=default_fixtures.SERVICE_TENANT_ID)
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token)
self.assertValidAuthenticationResponse(r)
def test_remove_role_revokes_token(self):
self.md_foobar = self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_service['id'],
self.role_service['id'])
token = self.get_scoped_token(
tenant_id=default_fixtures.SERVICE_TENANT_ID)
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token)
self.assertValidAuthenticationResponse(r)
self.assignment_api.remove_role_from_user_and_project(
self.user_foo['id'],
self.tenant_service['id'],
self.role_service['id'])
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token,
expected_status=http_client.UNAUTHORIZED)
def test_validate_token_belongs_to(self):
token = self.get_scoped_token()
path = ('/v2.0/tokens/%s?belongsTo=%s' % (token,
self.tenant_bar['id']))
r = self.admin_request(path=path, token=token)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_validate_token_no_belongs_to_still_returns_catalog(self):
token = self.get_scoped_token()
path = ('/v2.0/tokens/%s' % token)
r = self.admin_request(path=path, token=token)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_validate_token_head(self):
"""The same call as above, except using HEAD.
There's no response to validate here, but this is included for the
sake of completely covering the core API.
"""
token = self.get_scoped_token()
self.admin_request(
method='HEAD',
path='/v2.0/tokens/%(token_id)s' % {
'token_id': token,
},
token=token,
expected_status=http_client.OK)
def test_endpoints(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tokens/%(token_id)s/endpoints' % {
'token_id': token,
},
token=token)
self.assertValidEndpointListResponse(r)
def test_get_tenant(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants/%(tenant_id)s' % {
'tenant_id': self.tenant_bar['id'],
},
token=token)
self.assertValidTenantResponse(r)
def test_get_tenant_by_name(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants?name=%(tenant_name)s' % {
'tenant_name': self.tenant_bar['name'],
},
token=token)
self.assertValidTenantResponse(r)
def test_get_user_roles_with_tenant(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {
'tenant_id': self.tenant_bar['id'],
'user_id': self.user_foo['id'],
},
token=token)
self.assertValidRoleListResponse(r)
def test_get_user_roles_without_tenant(self):
token = self.get_scoped_token()
self.admin_request(
path='/v2.0/users/%(user_id)s/roles' % {
'user_id': self.user_foo['id'],
},
token=token, expected_status=http_client.NOT_IMPLEMENTED)
def test_get_user(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
},
token=token)
self.assertValidUserResponse(r)
def test_get_user_by_name(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/users?name=%(user_name)s' % {
'user_name': self.user_foo['name'],
},
token=token)
self.assertValidUserResponse(r)
def test_create_update_user_invalid_enabled_type(self):
# Enforce usage of boolean for 'enabled' field
token = self.get_scoped_token()
# Test CREATE request
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'enabled': "False",
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
# In JSON, 0|1 are not booleans
'enabled': 0,
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
# Test UPDATE request
path = '/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
}
r = self.admin_request(
method='PUT',
path=path,
body={
'user': {
'enabled': "False",
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
r = self.admin_request(
method='PUT',
path=path,
body={
'user': {
# In JSON, 0|1 are not booleans
'enabled': 1,
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
def test_create_update_user_valid_enabled_type(self):
# Enforce usage of boolean for 'enabled' field
token = self.get_scoped_token()
# Test CREATE request
self.admin_request(method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'enabled': False,
},
},
token=token,
expected_status=http_client.OK)
def test_error_response(self):
"""Trigger assertValidErrorResponse by convention."""
self.public_request(path='/v2.0/tenants',
expected_status=http_client.UNAUTHORIZED)
def test_invalid_parameter_error_response(self):
token = self.get_scoped_token()
bad_body = {
'OS-KSADM:service%s' % uuid.uuid4().hex: {
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
},
}
res = self.admin_request(method='POST',
path='/v2.0/OS-KSADM/services',
body=bad_body,
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(res)
res = self.admin_request(method='POST',
path='/v2.0/users',
body=bad_body,
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(res)
def _get_user_id(self, r):
"""Helper method to return user ID from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def _get_role_id(self, r):
"""Helper method to return a role ID from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def _get_role_name(self, r):
"""Helper method to return role NAME from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def _get_project_id(self, r):
"""Helper method to return project ID from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def assertNoRoles(self, r):
"""Helper method to assert No Roles.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def test_update_user_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'tenantId': self.tenant_bar['id'],
'enabled': True,
},
},
token=token,
expected_status=http_client.OK)
user_id = self._get_user_id(r.result)
# Check if member_role is in tenant_bar
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=http_client.OK)
self.assertEqual(CONF.member_role_name, self._get_role_name(r.result))
# Create a new tenant
r = self.admin_request(
method='POST',
path='/v2.0/tenants',
body={
'tenant': {
'name': 'test_update_user',
'description': 'A description ...',
'enabled': True,
},
},
token=token,
expected_status=http_client.OK)
project_id = self._get_project_id(r.result)
# Update user's tenant
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': project_id,
},
},
token=token,
expected_status=http_client.OK)
# 'member_role' should be in new_tenant
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': project_id,
'user_id': user_id
},
token=token,
expected_status=http_client.OK)
self.assertEqual('_member_', self._get_role_name(r.result))
# 'member_role' should not be in tenant_bar any more
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=http_client.OK)
self.assertNoRoles(r.result)
def test_update_user_with_invalid_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': 'test_invalid_tenant',
'password': uuid.uuid4().hex,
'tenantId': self.tenant_bar['id'],
'enabled': True,
},
},
token=token,
expected_status=http_client.OK)
user_id = self._get_user_id(r.result)
# Update user with an invalid tenant
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': 'abcde12345heha',
},
},
token=token,
expected_status=http_client.NOT_FOUND)
def test_update_user_with_invalid_tenant_no_prev_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': 'test_invalid_tenant',
'password': uuid.uuid4().hex,
'enabled': True,
},
},
token=token,
expected_status=http_client.OK)
user_id = self._get_user_id(r.result)
# Update user with an invalid tenant
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': 'abcde12345heha',
},
},
token=token,
expected_status=http_client.NOT_FOUND)
def test_update_user_with_old_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'tenantId': self.tenant_bar['id'],
'enabled': True,
},
},
token=token,
expected_status=http_client.OK)
user_id = self._get_user_id(r.result)
# Check if member_role is in tenant_bar
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=http_client.OK)
self.assertEqual(CONF.member_role_name, self._get_role_name(r.result))
# Update user's tenant with old tenant id
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': self.tenant_bar['id'],
},
},
token=token,
expected_status=http_client.OK)
# 'member_role' should still be in tenant_bar
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=http_client.OK)
self.assertEqual('_member_', self._get_role_name(r.result))
def test_authenticating_a_user_with_no_password(self):
token = self.get_scoped_token()
username = uuid.uuid4().hex
# create the user
self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': username,
'enabled': True,
},
},
token=token)
# fail to authenticate
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': username,
'password': 'password',
},
},
},
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
def test_www_authenticate_header(self):
r = self.public_request(
path='/v2.0/tenants',
expected_status=http_client.UNAUTHORIZED)
self.assertEqual('Keystone uri="http://localhost"',
r.headers.get('WWW-Authenticate'))
def test_www_authenticate_header_host(self):
test_url = 'http://%s:4187' % uuid.uuid4().hex
self.config_fixture.config(public_endpoint=test_url)
r = self.public_request(
path='/v2.0/tenants',
expected_status=http_client.UNAUTHORIZED)
self.assertEqual('Keystone uri="%s"' % test_url,
r.headers.get('WWW-Authenticate'))
class LegacyV2UsernameTests(object):
"""Test to show the broken username behavior in V2.
The V2 API is documented to use `username` instead of `name`. The
API forced used to use name and left the username to fall into the
`extra` field.
These tests ensure this behavior works so fixes to `username`/`name`
will be backward compatible.
"""
def create_user(self, **user_attrs):
"""Create a users and returns the response object.
:param user_attrs: attributes added to the request body (optional)
"""
token = self.get_scoped_token()
body = {
'user': {
'name': uuid.uuid4().hex,
'enabled': True,
},
}
body['user'].update(user_attrs)
return self.admin_request(
method='POST',
path='/v2.0/users',
token=token,
body=body,
expected_status=http_client.OK)
def test_create_with_extra_username(self):
"""The response for creating a user will contain the extra fields."""
fake_username = uuid.uuid4().hex
r = self.create_user(username=fake_username)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(fake_username, user.get('username'))
def test_get_returns_username_from_extra(self):
"""The response for getting a user will contain the extra fields."""
token = self.get_scoped_token()
fake_username = uuid.uuid4().hex
r = self.create_user(username=fake_username)
id_ = self.get_user_attribute_from_response(r, 'id')
r = self.admin_request(path='/v2.0/users/%s' % id_, token=token)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(fake_username, user.get('username'))
def test_update_returns_new_username_when_adding_username(self):
"""The response for updating a user will contain the extra fields.
This is specifically testing for updating a username when a value
was not previously set.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'username': 'new_username',
'enabled': enabled,
},
},
expected_status=http_client.OK)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual('new_username', user.get('username'))
def test_update_returns_new_username_when_updating_username(self):
"""The response for updating a user will contain the extra fields.
This tests updating a username that was previously set.
"""
token = self.get_scoped_token()
r = self.create_user(username='original_username')
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'username': 'new_username',
'enabled': enabled,
},
},
expected_status=http_client.OK)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual('new_username', user.get('username'))
def test_username_is_always_returned_create(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
r = self.create_user()
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_is_always_returned_get(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
r = self.admin_request(path='/v2.0/users/%s' % id_, token=token)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_is_always_returned_get_by_name(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
name = self.get_user_attribute_from_response(r, 'name')
r = self.admin_request(path='/v2.0/users?name=%s' % name, token=token)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_is_always_returned_update_no_username_provided(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'enabled': enabled,
},
},
expected_status=http_client.OK)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_updated_username_is_returned(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'enabled': enabled,
},
},
expected_status=http_client.OK)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_can_be_used_instead_of_name_create(self):
token = self.get_scoped_token()
r = self.admin_request(
method='POST',
path='/v2.0/users',
token=token,
body={
'user': {
'username': uuid.uuid4().hex,
'enabled': True,
},
},
expected_status=http_client.OK)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_can_be_used_instead_of_name_update(self):
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
new_username = uuid.uuid4().hex
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'username': new_username,
'enabled': enabled,
},
},
expected_status=http_client.OK)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(new_username, user.get('name'))
self.assertEqual(user.get('name'), user.get('username'))
class RestfulTestCase(rest.RestfulTestCase):
def setUp(self):
super(RestfulTestCase, self).setUp()
# TODO(termie): add an admin user to the fixtures and use that user
# override the fixtures, for now
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_admin['id'])
class V2TestCase(object):
def config_overrides(self):
super(V2TestCase, self).config_overrides()
self.config_fixture.config(
group='catalog',
driver='templated',
template_file=unit.dirs.tests('default_catalog.templates'))
def _get_user_id(self, r):
return r['user']['id']
def _get_role_name(self, r):
return r['roles'][0]['name']
def _get_role_id(self, r):
return r['roles'][0]['id']
def _get_project_id(self, r):
return r['tenant']['id']
def _get_token_id(self, r):
return r.result['access']['token']['id']
def assertNoRoles(self, r):
self.assertEqual([], r['roles'])
def assertValidErrorResponse(self, r):
self.assertIsNotNone(r.result.get('error'))
self.assertValidError(r.result['error'])
self.assertEqual(r.result['error']['code'], r.status_code)
def assertValidExtension(self, extension, expected):
super(V2TestCase, self).assertValidExtension(extension)
descriptions = [ext['description'] for ext in expected.values()]
description = extension.get('description')
self.assertIsNotNone(description)
self.assertIn(description, descriptions)
self.assertIsNotNone(extension.get('links'))
self.assertNotEmpty(extension.get('links'))
for link in extension.get('links'):
self.assertValidExtensionLink(link)
def assertValidExtensionListResponse(self, r, expected):
self.assertIsNotNone(r.result.get('extensions'))
self.assertIsNotNone(r.result['extensions'].get('values'))
self.assertNotEmpty(r.result['extensions'].get('values'))
for extension in r.result['extensions']['values']:
self.assertValidExtension(extension, expected)
def assertValidExtensionResponse(self, r, expected):
self.assertValidExtension(r.result.get('extension'), expected)
def assertValidUser(self, user):
super(V2TestCase, self).assertValidUser(user)
self.assertNotIn('default_project_id', user)
if 'tenantId' in user:
# NOTE(morganfainberg): tenantId should never be "None", it gets
# filtered out of the object if it is there. This is suspenders
# and a belt check to avoid unintended regressions.
self.assertIsNotNone(user.get('tenantId'))
def assertValidAuthenticationResponse(self, r,
require_service_catalog=False):
self.assertIsNotNone(r.result.get('access'))
self.assertIsNotNone(r.result['access'].get('token'))
self.assertIsNotNone(r.result['access'].get('user'))
# validate token
self.assertIsNotNone(r.result['access']['token'].get('id'))
self.assertIsNotNone(r.result['access']['token'].get('expires'))
tenant = r.result['access']['token'].get('tenant')
if tenant is not None:
# validate tenant
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
# validate user
self.assertIsNotNone(r.result['access']['user'].get('id'))
self.assertIsNotNone(r.result['access']['user'].get('name'))
if require_service_catalog:
# roles are only provided with a service catalog
roles = r.result['access']['user'].get('roles')
self.assertNotEmpty(roles)
for role in roles:
self.assertIsNotNone(role.get('name'))
serviceCatalog = r.result['access'].get('serviceCatalog')
# validate service catalog
if require_service_catalog:
self.assertIsNotNone(serviceCatalog)
if serviceCatalog is not None:
self.assertIsInstance(serviceCatalog, list)
if require_service_catalog:
self.assertNotEmpty(serviceCatalog)
for service in r.result['access']['serviceCatalog']:
# validate service
self.assertIsNotNone(service.get('name'))
self.assertIsNotNone(service.get('type'))
# services contain at least one endpoint
self.assertIsNotNone(service.get('endpoints'))
self.assertNotEmpty(service['endpoints'])
for endpoint in service['endpoints']:
# validate service endpoint
self.assertIsNotNone(endpoint.get('publicURL'))
def assertValidTenantListResponse(self, r):
self.assertIsNotNone(r.result.get('tenants'))
self.assertNotEmpty(r.result['tenants'])
for tenant in r.result['tenants']:
self.assertValidTenant(tenant)
self.assertIsNotNone(tenant.get('enabled'))
self.assertIn(tenant.get('enabled'), [True, False])
def assertValidUserResponse(self, r):
self.assertIsNotNone(r.result.get('user'))
self.assertValidUser(r.result['user'])
def assertValidTenantResponse(self, r):
self.assertIsNotNone(r.result.get('tenant'))
self.assertValidTenant(r.result['tenant'])
def assertValidRoleListResponse(self, r):
self.assertIsNotNone(r.result.get('roles'))
self.assertNotEmpty(r.result['roles'])
for role in r.result['roles']:
self.assertValidRole(role)
def assertValidVersion(self, version):
super(V2TestCase, self).assertValidVersion(version)
self.assertIsNotNone(version.get('links'))
self.assertNotEmpty(version.get('links'))
for link in version.get('links'):
self.assertIsNotNone(link.get('rel'))
self.assertIsNotNone(link.get('href'))
self.assertIsNotNone(version.get('media-types'))
self.assertNotEmpty(version.get('media-types'))
for media in version.get('media-types'):
self.assertIsNotNone(media.get('base'))
self.assertIsNotNone(media.get('type'))
def assertValidMultipleChoiceResponse(self, r):
self.assertIsNotNone(r.result.get('versions'))
self.assertIsNotNone(r.result['versions'].get('values'))
self.assertNotEmpty(r.result['versions']['values'])
for version in r.result['versions']['values']:
self.assertValidVersion(version)
def assertValidVersionResponse(self, r):
self.assertValidVersion(r.result.get('version'))
def assertValidEndpointListResponse(self, r):
self.assertIsNotNone(r.result.get('endpoints'))
self.assertNotEmpty(r.result['endpoints'])
for endpoint in r.result['endpoints']:
self.assertIsNotNone(endpoint.get('id'))
self.assertIsNotNone(endpoint.get('name'))
self.assertIsNotNone(endpoint.get('type'))
self.assertIsNotNone(endpoint.get('publicURL'))
self.assertIsNotNone(endpoint.get('internalURL'))
self.assertIsNotNone(endpoint.get('adminURL'))
def get_user_from_response(self, r):
return r.result.get('user')
def get_user_attribute_from_response(self, r, attribute_name):
return r.result['user'][attribute_name]
def test_service_crud_requires_auth(self):
"""Service CRUD should return unauthorized without an X-Auth-Token."""
# values here don't matter because it will be unauthorized before
# they're checked (bug 1006822).
service_path = '/v2.0/OS-KSADM/services/%s' % uuid.uuid4().hex
service_body = {
'OS-KSADM:service': {
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
},
}
r = self.admin_request(method='GET',
path='/v2.0/OS-KSADM/services',
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
r = self.admin_request(method='POST',
path='/v2.0/OS-KSADM/services',
body=service_body,
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
r = self.admin_request(method='GET',
path=service_path,
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
r = self.admin_request(method='DELETE',
path=service_path,
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
def test_user_role_list_requires_auth(self):
"""User role list return unauthorized without an X-Auth-Token."""
# values here don't matter because it will be unauthorized before
# they're checked (bug 1006815).
path = '/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {
'tenant_id': uuid.uuid4().hex,
'user_id': uuid.uuid4().hex,
}
r = self.admin_request(path=path,
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
def test_fetch_revocation_list_nonadmin_fails(self):
self.admin_request(
method='GET',
path='/v2.0/tokens/revoked',
expected_status=http_client.UNAUTHORIZED)
def test_fetch_revocation_list_admin_200(self):
token = self.get_scoped_token()
r = self.admin_request(
method='GET',
path='/v2.0/tokens/revoked',
token=token,
expected_status=http_client.OK)
self.assertValidRevocationListResponse(r)
def assertValidRevocationListResponse(self, response):
self.assertIsNotNone(response.result['signed'])
def test_create_update_user_invalid_enabled_type(self):
# Enforce usage of boolean for 'enabled' field
token = self.get_scoped_token()
# Test CREATE request
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
# In JSON, "true|false" are not boolean
'enabled': "true",
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
# Test UPDATE request
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
},
body={
'user': {
# In JSON, "true|false" are not boolean
'enabled': "true",
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
def test_authenticating_a_user_with_an_OSKSADM_password(self):
token = self.get_scoped_token()
username = uuid.uuid4().hex
password = uuid.uuid4().hex
# create the user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': username,
'OS-KSADM:password': password,
'enabled': True,
},
},
token=token)
# successfully authenticate
self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': username,
'password': password,
},
},
},
expected_status=http_client.OK)
# ensure password doesn't leak
user_id = r.result['user']['id']
r = self.admin_request(
method='GET',
path='/v2.0/users/%s' % user_id,
token=token,
expected_status=http_client.OK)
self.assertNotIn('OS-KSADM:password', r.result['user'])
def test_updating_a_user_with_an_OSKSADM_password(self):
token = self.get_scoped_token()
user_id = self.user_foo['id']
password = uuid.uuid4().hex
# update the user
self.admin_request(
method='PUT',
path='/v2.0/users/%s/OS-KSADM/password' % user_id,
body={
'user': {
'password': password,
},
},
token=token,
expected_status=http_client.OK)
# successfully authenticate
self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': password,
},
},
},
expected_status=http_client.OK)
def test_enable_or_disable_user(self):
token = self.get_scoped_token()
user_id = self.user_badguy['id']
self.assertFalse(self.user_badguy['enabled'])
def _admin_request(body, status):
resp = self.admin_request(
method='PUT',
path='/v2.0/users/%s/OS-KSADM/enabled' % user_id,
token=token,
body=body,
expected_status=status)
return resp
# Enable the user.
body = {'user': {'enabled': True}}
resp = _admin_request(body, http_client.OK)
self.assertTrue(resp.json['user']['enabled'])
# Disable the user.
body = {'user': {'enabled': False}}
resp = _admin_request(body, http_client.OK)
self.assertFalse(resp.json['user']['enabled'])
# Attributes other than `enabled` should still work due to bug 1607751
body = {
'user': {
'description': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True
}
}
_admin_request(body, http_client.OK)
# `enabled` is boolean, type other than boolean is not allowed.
body = {'user': {'enabled': uuid.uuid4().hex}}
_admin_request(body, http_client.BAD_REQUEST)
class V2TestCaseUUID(V2TestCase, RestfulTestCase, CoreApiTests,
LegacyV2UsernameTests):
def config_overrides(self):
super(V2TestCaseUUID, self).config_overrides()
self.config_fixture.config(group='token', provider='uuid')
class V2TestCaseFernet(V2TestCase, RestfulTestCase, CoreApiTests,
LegacyV2UsernameTests):
def config_overrides(self):
super(V2TestCaseFernet, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet')
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
CONF.fernet_tokens.max_active_keys
)
)
def test_fetch_revocation_list_md5(self):
self.skipTest('Revocation lists do not support Fernet')
def test_fetch_revocation_list_sha256(self):
self.skipTest('Revocation lists do not support Fernet')
class TestFernetTokenProviderV2(RestfulTestCase):
def setUp(self):
super(TestFernetTokenProviderV2, self).setUp()
# Add catalog data
self.region = unit.new_region_ref()
self.region_id = self.region['id']
self.catalog_api.create_region(self.region)
self.service = unit.new_service_ref()
self.service_id = self.service['id']
self.catalog_api.create_service(self.service_id, self.service)
self.endpoint = unit.new_endpoint_ref(service_id=self.service_id,
interface='public',
region_id=self.region_id)
self.endpoint_id = self.endpoint['id']
self.catalog_api.create_endpoint(self.endpoint_id, self.endpoint)
def assertValidUnscopedTokenResponse(self, r):
v2.unscoped_validator.validate(r.json['access'])
def assertValidScopedTokenResponse(self, r):
v2.scoped_validator.validate(r.json['access'])
# Used by RestfulTestCase
def _get_token_id(self, r):
return r.result['access']['token']['id']
def new_project_ref(self):
return {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'domain_id': 'default',
'enabled': True}
def config_overrides(self):
super(TestFernetTokenProviderV2, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet')
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
CONF.fernet_tokens.max_active_keys
)
)
def test_authenticate_unscoped_token(self):
unscoped_token = self.get_unscoped_token()
# Fernet token must be of length 255 per usability requirements
self.assertLess(len(unscoped_token), 255)
def test_validate_unscoped_token(self):
# Grab an admin token to validate with
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
admin_token = self.get_scoped_token(tenant_id=project_ref['id'])
unscoped_token = self.get_unscoped_token()
path = ('/v2.0/tokens/%s' % unscoped_token)
resp = self.admin_request(
method='GET',
path=path,
token=admin_token,
expected_status=http_client.OK)
self.assertValidUnscopedTokenResponse(resp)
def test_authenticate_scoped_token(self):
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project_ref['id'], self.role_service['id'])
token = self.get_scoped_token(tenant_id=project_ref['id'])
# Fernet token must be of length 255 per usability requirements
self.assertLess(len(token), 255)
def test_validate_scoped_token(self):
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
project2_ref = self.new_project_ref()
self.resource_api.create_project(project2_ref['id'], project2_ref)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project2_ref['id'], self.role_member['id'])
admin_token = self.get_scoped_token(tenant_id=project_ref['id'])
member_token = self.get_scoped_token(tenant_id=project2_ref['id'])
path = ('/v2.0/tokens/%s?belongsTo=%s' % (member_token,
project2_ref['id']))
# Validate token belongs to project
resp = self.admin_request(
method='GET',
path=path,
token=admin_token,
expected_status=http_client.OK)
self.assertValidScopedTokenResponse(resp)
def test_token_authentication_and_validation(self):
"""Test token authentication for Fernet token provider.
Verify that token authentication returns validate response code and
valid token belongs to project.
"""
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
unscoped_token = self.get_unscoped_token()
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
token_id = unscoped_token
if six.PY2:
token_id = token_id.encode('ascii')
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'tenantName': project_ref['name'],
'token': {
'id': token_id,
}
}
},
expected_status=http_client.OK)
token_id = self._get_token_id(r)
path = ('/v2.0/tokens/%s?belongsTo=%s' % (token_id, project_ref['id']))
# Validate token belongs to project
resp = self.admin_request(
method='GET',
path=path,
token=self.get_admin_token(),
expected_status=http_client.OK)
self.assertValidScopedTokenResponse(resp)
def test_rescoped_tokens_maintain_original_expiration(self):
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
resp = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'tenantName': project_ref['name'],
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
}
},
# NOTE(lbragstad): This test may need to be refactored if Keystone
# decides to disallow rescoping using a scoped token.
expected_status=http_client.OK)
original_token = resp.result['access']['token']['id']
original_expiration = resp.result['access']['token']['expires']
resp = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'tenantName': project_ref['name'],
'token': {
'id': original_token,
}
}
},
expected_status=http_client.OK)
rescoped_token = resp.result['access']['token']['id']
rescoped_expiration = resp.result['access']['token']['expires']
self.assertNotEqual(original_token, rescoped_token)
self.assertEqual(original_expiration, rescoped_expiration)
self.assertValidScopedTokenResponse(resp)
|
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify_libcloud import (CosmoOnLibcloudDriver,
LibcloudKeypairController,
LibcloudSGController,
LibcloudFloatingIpController,
LibcloudServerController,
LibcloudValidator)
from libcloud.compute.types import NodeState
from os.path import expanduser
import os
import time
from fabric.api import put, env
from libcloud.compute.types import KeyPairDoesNotExistError
import tempfile
import shutil
from fabric.context_managers import settings
import errno
import json
CREATE_IF_MISSING = 'create_if_missing'
TIMEOUT = 3000
# declare which ports should be opened during provisioning
EXTERNAL_MGMT_PORTS = (22, 8100, 80) # SSH, REST service (TEMP), REST and UI
INTERNAL_MGMT_PORTS = (5555, 5672, 53229, 8101) # Riemann, RabbitMQ, FileServer, Internal REST # NOQA
INTERNAL_AGENT_PORTS = (22,)
class EC2CosmoOnLibcloudDriver(CosmoOnLibcloudDriver):
def __init__(self, provider_config, provider_context, connector):
super(EC2CosmoOnLibcloudDriver, self)\
.__init__(provider_config, provider_context)
self.keypair_controller = EC2LibcloudKeypairController(connector)
self.sg_controller = EC2LibcloudSGController(connector)
self.floating_ip_controller =\
EC2LibcloudFloatingIpController(connector)
self.util_controller = EC2LibcloudUtilController(connector)
self.server_controller = EC2LibcloudServerController(
connector, util_controller=self.util_controller)
def copy_files_to_manager(self, mgmt_ip, ssh_key, ssh_user):
def _copy(userhome_on_management, agents_key_path, connection_config):
ssh_config = self.config['cloudify']['bootstrap']['ssh']
env.user = ssh_user
env.key_filename = ssh_key
env.abort_on_prompts = False
env.connection_attempts = ssh_config['connection_attempts']
env.keepalive = 0
env.linewise = False
env.pool_size = 0
env.skip_bad_hosts = False
env.timeout = ssh_config['socket_timeout']
env.forward_agent = True
env.status = False
env.disable_known_hosts = False
tempdir = tempfile.mkdtemp()
put(agents_key_path, userhome_on_management + '/.ssh')
connection_file_path = _make_json_file(tempdir,
'connection_config',
connection_config)
put(connection_file_path, userhome_on_management)
shutil.rmtree(tempdir)
def _make_json_file(tempdir, file_basename, data):
file_path = os.path.join(tempdir, file_basename + '.json')
with open(file_path, 'w') as f:
json.dump(data, f)
return file_path
compute_config = self.config['compute']
mgmt_server_config = compute_config['management_server']
with settings(host_string=mgmt_ip):
_copy(
mgmt_server_config['userhome_on_management'],
expanduser(compute_config['agent_servers']['agents_keypair'][
'private_key_path']), self.config['connection'])
def create_topology(self):
resources = {}
self.provider_context['resources'] = resources
compute_config = self.config['compute']
mng_conf = compute_config['management_server']
inst_conf = mng_conf['instance']
# Security group for Cosmo created instances
asgconf = self.config['networking']['agents_security_group']
description = 'Cosmo created machines'
asg, agent_sg_created = self.sg_controller\
.create_or_ensure_exists_log_resources(asgconf,
asgconf['name'],
resources,
'agents_security_group',
description=description)
asg_id = asg['group_id'] if agent_sg_created else asg.id
# Security group for Cosmo manager, allows created
# instances -> manager communication
msgconf = self.config['networking']['management_security_group']
sg_rules = \
[{'port': p, 'group_id': asg_id} for p in INTERNAL_MGMT_PORTS] + \
[{'port': p, 'cidr': msgconf['cidr']} for p in EXTERNAL_MGMT_PORTS]
rsrc_name = 'management_security_group'
description = 'Cosmo Manager'
msg, msg_created = self.sg_controller\
.create_or_ensure_exists_log_resources(msgconf,
msgconf['name'],
resources,
rsrc_name,
description=description,
rules=sg_rules)
msg_id = msg['group_id'] if msg_created else msg.id
# Add rules to agent security group. (Happens here because we need
# the management security group id)
if agent_sg_created:
self.sg_controller.add_rules(
[{'port': port, 'group_id': msg_id}
for port in INTERNAL_AGENT_PORTS], asg_id)
# Keypairs setup
mgr_kp_conf = mng_conf['management_keypair']
self.keypair_controller.create_or_ensure_exists_log_resources(
mgr_kp_conf,
mgr_kp_conf['name'],
resources,
'management_keypair',
private_key_path=mgr_kp_conf['private_key_path']
)
agents_kp_conf = compute_config['agent_servers']['agents_keypair']
self.keypair_controller.create_or_ensure_exists_log_resources(
agents_kp_conf,
agents_kp_conf['name'],
resources,
'agents_keypair',
private_key_path=agents_kp_conf['private_key_path']
)
node, created = self.server_controller.\
create_or_ensure_exists_log_resources(
inst_conf,
inst_conf['name'],
resources,
'management_server',
image=inst_conf['image'],
size=inst_conf['size'],
keypair_name=mgr_kp_conf['name'],
security_groups=[msgconf['name']]
)
if 'floating_ip' in mng_conf:
floating_ip_conf = mng_conf['floating_ip']
res_name = 'management_floating_ip'
ip_name = floating_ip_conf['ip']\
if 'ip' in floating_ip_conf else res_name
floating_ip, created = self.floating_ip_controller\
.create_or_ensure_exists_log_resources(floating_ip_conf,
ip_name,
resources,
res_name)
self.floating_ip_controller.associate(node, floating_ip)
node = self.server_controller.get_by_id(node.id)
while not node.public_ips[0] == floating_ip.ip:
self.floating_ip_controller.associate(node, floating_ip)
ssh_key = expanduser(mgr_kp_conf['private_key_path'])
ssh_user = mng_conf['user_on_management']
node = self.server_controller.get_by_id(node.id)
public_ip = node.public_ips[0]
private_ip = node.private_ips[0]
return public_ip, private_ip, ssh_key, ssh_user, self.provider_context
def _delete_resources(self, resources):
deleted_resources = []
not_found_resources = []
failed_to_delete_resources = []
def del_server_resource(resource_name, resource_data):
if resource_data['created']:
resource =\
self.server_controller.get_by_id(resource_data['id'])
if resource is None:
not_found_resources.append(resource_data)
else:
try:
self.server_controller.kill(resource)
deleted_resources.append(resource_data)
del(resources[resource_name])
except:
failed_to_delete_resources.append(resource_data)
def del_floating_ip_resource(resource_name, resource_data):
if resource_data['created']:
resource = self.floating_ip_controller\
.get_by_id(resource_data['id'])
if resource is None:
not_found_resources.append(resource_data)
else:
try:
self.floating_ip_controller.kill(resource)
deleted_resources.append(resource_data)
del(resources[resource_name])
except:
failed_to_delete_resources.append(resource_data)
def del_security_group_resources(sg_resources):
to_delete = []
for key, value in sg_resources.items():
if value['created']:
resource = self.sg_controller.get_by_id(value['id'])
if resource is None:
not_found_resources.append(value)
else:
try:
self.sg_controller.remove_rules(resource)
to_delete.append({'key': key,
'value': value,
'resource': resource})
except:
failed_to_delete_resources.append(value)
for item in to_delete:
try:
self.sg_controller.kill(item['resource'])
deleted_resources.append(item['value'])
del(resources[item['key']])
except Exception:
failed_to_delete_resources.append(item['value'])
def del_key_pair_resource(resource_name, resource_data):
if resource_data['created']:
resource = self.keypair_controller\
.get_by_id(resource_data['id'])
if resource is None:
not_found_resources.append(resource_data)
else:
try:
self.keypair_controller.kill(resource)
deleted_resources.append(resource_data)
del(resources[resource_name])
except:
failed_to_delete_resources.append(resource_data)
# deleting in reversed order to creation order
server_resources = {}
floating_ip_resources = {}
security_group_resources = {}
key_pair_resources = {}
for key, value in resources.items():
resource_type = value['type']
if resource_type == 'key_pair':
key_pair_resources[key] = value
elif resource_type == 'security_group':
security_group_resources[key] = value
elif resource_type == 'floating_ip':
floating_ip_resources[key] = value
elif resource_type == 'server':
server_resources[key] = value
for key, value in server_resources.items():
del_server_resource(key, value)
for key, value in floating_ip_resources.items():
del_floating_ip_resource(key, value)
del_security_group_resources(security_group_resources)
for key, value in key_pair_resources.items():
del_key_pair_resource(key, value)
return (deleted_resources, not_found_resources,
failed_to_delete_resources)
class EC2LibcloudKeypairController(LibcloudKeypairController):
def _ensure_exist(self, name):
keypair = None
try:
keypair = self.driver.get_key_pair(name)
except KeyPairDoesNotExistError:
pass
if keypair:
return keypair.name, keypair
else:
return None, None
def _create(self, name, private_key_path=None):
pk_target_path = expanduser(private_key_path)
if os.path.exists(pk_target_path):
raise RuntimeError("Can't create keypair {0} - local path for "
"private key already exists: {1}"
.format(name, pk_target_path))
keypair = self.driver.create_key_pair(name)
self._mkdir_p(os.path.dirname(pk_target_path))
with open(pk_target_path, 'w') as f:
f.write(keypair.private_key)
os.system('chmod 600 {0}'.format(pk_target_path))
return name, keypair
def _mkdir_p(self, path):
path = expanduser(path)
try:
os.makedirs(path)
except OSError, exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
return
raise
def get_by_id(self, ident):
key_pair_id, key_pair = self._ensure_exist(ident)
return key_pair
def kill(self, item):
self.driver.delete_key_pair(item)
def list(self):
return self.driver.list_key_pairs()
class EC2LibcloudSGController(LibcloudSGController):
def _ensure_exist(self, name):
try:
security_group = self.driver\
.ex_get_security_groups(group_names=[name])
if security_group and security_group[0]:
return security_group[0].id, security_group[0]
except Exception:
pass
return None, None
def add_rules(self, rules, security_group_id):
for rule in rules:
if 'cidr' in rule:
self.driver.ex_authorize_security_group_ingress(
security_group_id,
rule['port'],
rule['port'],
cidr_ips=[rule['cidr']])
elif 'group_id' in rule:
self.driver.ex_authorize_security_group_ingress(
security_group_id,
rule['port'],
rule['port'],
group_pairs=[{'group_id': rule['group_id']}])
def _create(self, name, description=None, rules=None):
if not description:
raise RuntimeError("Must provide description"
" to create security group")
security_group = self.driver.ex_create_security_group(
name,
description)
if security_group and rules:
self.add_rules(rules, security_group['group_id'])
return security_group['group_id'], security_group
def get_by_id(self, ident):
result = self.driver.ex_get_security_groups(group_ids=[ident])
if result:
return result[0]
def get_by_name(self, name):
group_id, group = self._ensure_exist(name)
return group
def remove_rules(self, item):
for rule in item.ingress_rules:
for pair in rule['group_pairs']:
if ('group_id' in pair) and ('group_name' in pair):
pair['group_name'] = ''
self.driver.ex_revoke_security_group_ingress(
id=item.id,
from_port=rule['from_port'],
to_port=rule['to_port'],
group_pairs=rule['group_pairs'],
cidr_ips=rule['cidr_ips'])
for rule in item.egress_rules:
for pair in rule['group_pairs']:
if ('group_id' in pair) and ('group_name' in pair):
pair['group_name'] = ''
self.driver.ex_revoke_security_group_egress(
id=item.id,
from_port=rule['from_port'],
to_port=rule['to_port'],
group_pairs=rule['group_pairs'],
cidr_ips=rule['cidr_ips'])
def kill(self, item):
self.driver.ex_delete_security_group_by_id(item.id)
def list(self):
return self.driver.ex_list_security_groups()
class EC2LibcloudFloatingIpController(LibcloudFloatingIpController):
def _ensure_exist(self, name):
addresses = self.driver.ex_describe_all_addresses()
if addresses:
for item in addresses:
if item.ip.lower() == name.lower():
return item.ip, item
return None, None
def _create(self, name):
address = self.driver.ex_allocate_address()
return address.ip, address
def associate(self, node, ip):
self.driver.ex_associate_address_with_node(node, ip)
def get_by_id(self, ident):
ip_id, ip = self._ensure_exist(ident)
return ip
def kill(self, item):
self.driver.ex_disassociate_address(item)
self.driver.ex_release_address(item)
def is_quota_exceeded(self):
limits = self.driver.ex_get_limits()
elastic_ip_limit = limits['resource']['max-elastic-ips']
total_elastic_ips = len(self.driver.ex_describe_all_addresses())
if total_elastic_ips >= elastic_ip_limit:
return True
return False
class EC2LibcloudServerController(LibcloudServerController):
def __init__(self, connector, util_controller=None):
super(LibcloudServerController, self).__init__(connector)
self.util_controller = util_controller
def _ensure_exist(self, name):
nodes = self.driver.list_nodes()
if nodes:
for node in nodes:
if (node.name.lower() == name.lower())\
and (node.state is NodeState.RUNNING):
return node.id, node
return None, None
def _create(
self,
name,
image=None,
size=None,
keypair_name=None,
security_groups=None):
selected_size = self.util_controller.get_size(size)
selected_image = self.util_controller.get_image(image)
node = self.driver.create_node(name=name,
image=selected_image,
size=selected_size,
ex_keyname=keypair_name,
ex_security_groups=security_groups)
node = self._wait_for_node_to_has_state(node, NodeState.RUNNING)
return node.id, node
def _wait_for_node_to_has_state(self, node, state):
timeout = TIMEOUT
while node.state is not state:
timeout -= 5
if timeout <= 0:
raise RuntimeError('Node failed to obtain state {0} in time'
.format(state))
time.sleep(5)
node = self.get_by_id(node.id)
return node
def get_by_id(self, ident):
result = self.driver.list_nodes(ex_node_ids=[ident])
if result:
return result[0]
def get_by_name(self, name):
nodes = self.driver.list_nodes()
if nodes:
for node in nodes:
if node.name.lower() == name.lower():
return node
def kill(self, item):
self.driver.destroy_node(item)
self._wait_for_node_to_has_state(item, NodeState.TERMINATED)
def list(self):
return self.driver.list_nodes()
class EC2LibcloudUtilController(object):
def __init__(self, connector):
self.driver = connector.get_driver()
def get_size(self, name):
sizes = self.driver.list_sizes()
if sizes:
for item in sizes:
if item.id.lower() == name.lower():
return item
def get_image(self, name):
images = self.driver.list_images(ex_image_ids=[name])
if images:
if images[0]:
return images[0]
class EC2LibcloudValidator(LibcloudValidator):
def __init__(self,
provider_config,
validation_errors,
lgr,
util_controller=None,
floating_ip_controller=None,
server_controller=None):
super(EC2LibcloudValidator, self)\
.__init__(provider_config, validation_errors, lgr)
self.util_controller = util_controller
self.floating_ip_controller = floating_ip_controller
self.server_controller = server_controller
def _validate_connection(self, connection_config):
if 'access_id' not in connection_config:
err = 'config file validation error: connection:' \
' access_id should be set for EC2 cloud'
self.lgr.error('VALIDATION ERROR: ' + err)
self.validation_errors.setdefault('connection', []).append(err)
if 'secret_key' not in connection_config:
err = 'config file validation error: connection:' \
' secret_key should be set for EC2 cloud'
self.lgr.error('VALIDATION ERROR: ' + err)
self.validation_errors.setdefault('connection', []).append(err)
def _validate_networking(self, networking_config):
cidr = networking_config['management_security_group']['cidr']
if not self.validate_cidr_syntax(cidr):
err = 'config file validation error:' \
' networking/management_security_group:' \
' cidr wrong format'
self.lgr.error('VALIDATION ERROR: ' + err)
self.validation_errors.setdefault('networking', []).append(err)
def _validate_floating_ip(self, mng_config):
ip_config = mng_config['floating_ip']
if CREATE_IF_MISSING not in ip_config:
err = 'config file validation error:' \
' management_server/floating_ip:' \
' create_if_missing should be set for EC2 cloud'
self.lgr.error('VALIDATION ERROR: ' + err)
self.validation_errors.setdefault('management_server', [])\
.append(err)
else:
if not ip_config[CREATE_IF_MISSING]:
if 'ip' not in ip_config:
err = 'config file validation error:' \
' management_server/floating_ip:' \
' ip should be set for EC2 cloud'
self.lgr.error('VALIDATION ERROR: ' + err)
self.validation_errors.setdefault('management_server', [])\
.append(err)
else:
ip = ip_config['ip']
if not self.validate_cidr_syntax(ip):
err = 'config file validation error:' \
' management_server/floating_ip:' \
' ip wrong format'
self.lgr.error('VALIDATION ERROR: ' + err)
self.validation_errors\
.setdefault('management_server', []).append(err)
if not self.floating_ip_controller.get(ip):
err = 'config file validation error:' \
' management_server/floating_ip:' \
' can\'t find ip {0} on EC2'.format(ip)
self.lgr.error('VALIDATION ERROR: ' + err)
self.validation_errors\
.setdefault('management_server', []).append(err)
else:
quota_exceeded = self.floating_ip_controller\
.is_quota_exceeded()
if quota_exceeded:
err = 'config file validation error:' \
' resource elastic-ip quota limit exceeded:' \
' can\'t allocate new elastic-ip'
self.lgr.error('VALIDATION ERROR: ' + err)
self.validation_errors \
.setdefault('management_server', []).append(err)
def _validate_instance(self, instance_config):
if 'size' not in instance_config:
err = 'config file validation error:' \
' management_server/instance:' \
' size should be set for EC2 cloud'
self.lgr.error('VALIDATION ERROR: ' + err)
self.validation_errors.setdefault('management_server', [])\
.append(err)
image_name = instance_config['image']
image = self.util_controller.get_image(image_name)
if not image:
err = 'config file validation error:' \
' management_server/instance:' \
' image \'{0}\' does not exist on EC2'\
.format(image_name)
self.lgr.error('VALIDATION ERROR: ' + err)
self.validation_errors.setdefault('management_server', [])\
.append(err)
size_name = instance_config['size']
size = self.util_controller.get_size(size_name)
if not size:
err = 'config file validation error:' \
' management_server/instance:' \
' size \'{0}\' does not exist on EC2'\
.format(size_name)
self.lgr.error('VALIDATION ERROR: ' + err)
self.validation_errors.setdefault('management_server', [])\
.append(err)
instance_name = instance_config['name']
instance = self.server_controller.get_by_name(instance_name)
if instance and\
(instance.state not in [NodeState.RUNNING,
NodeState.TERMINATED]):
err = 'config file validation error:' \
' management_server should be in state Running'
self.lgr.error('VALIDATION ERROR: ' + err)
self.validation_errors.setdefault('management_server', [])\
.append(err)
def _validate_compute(self, compute_config):
mng_config = compute_config['management_server']
if 'floating_ip' in mng_config:
self._validate_floating_ip(mng_config)
instance_config = mng_config['instance']
self._validate_instance(instance_config)
def validate(self):
connection_config = self.provider_config['connection']
self._validate_connection(connection_config)
networking_config = self.provider_config['networking']
self._validate_networking(networking_config)
compute_config = self.provider_config['compute']
self._validate_compute(compute_config)
|
|
"""An abstract class for entities."""
from abc import ABC
import asyncio
from datetime import datetime, timedelta
import functools as ft
import logging
from timeit import default_timer as timer
from typing import Any, Awaitable, Dict, Iterable, List, Optional
from homeassistant.config import DATA_CUSTOMIZE
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_PICTURE,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_DEFAULT_NAME,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import CALLBACK_TYPE, Context, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError, NoEntitySpecifiedError
from homeassistant.helpers.entity_platform import EntityPlatform
from homeassistant.helpers.entity_registry import RegistryEntry
from homeassistant.helpers.event import Event, async_track_entity_registry_updated_event
from homeassistant.helpers.typing import StateType
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util, ensure_unique_string, slugify
_LOGGER = logging.getLogger(__name__)
SLOW_UPDATE_WARNING = 10
DATA_ENTITY_SOURCE = "entity_info"
SOURCE_CONFIG_ENTRY = "config_entry"
SOURCE_PLATFORM_CONFIG = "platform_config"
@callback
@bind_hass
def entity_sources(hass: HomeAssistant) -> Dict[str, Dict[str, str]]:
"""Get the entity sources."""
return hass.data.get(DATA_ENTITY_SOURCE, {})
def generate_entity_id(
entity_id_format: str,
name: Optional[str],
current_ids: Optional[List[str]] = None,
hass: Optional[HomeAssistant] = None,
) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
return async_generate_entity_id(entity_id_format, name, current_ids, hass)
@callback
def async_generate_entity_id(
entity_id_format: str,
name: Optional[str],
current_ids: Optional[Iterable[str]] = None,
hass: Optional[HomeAssistant] = None,
) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
name = (name or DEVICE_DEFAULT_NAME).lower()
preferred_string = entity_id_format.format(slugify(name))
if current_ids is not None:
return ensure_unique_string(preferred_string, current_ids)
if hass is None:
raise ValueError("Missing required parameter current_ids or hass")
test_string = preferred_string
tries = 1
while hass.states.get(test_string):
tries += 1
test_string = f"{preferred_string}_{tries}"
return test_string
class Entity(ABC):
"""An abstract class for Home Assistant entities."""
# SAFE TO OVERWRITE
# The properties and methods here are safe to overwrite when inheriting
# this class. These may be used to customize the behavior of the entity.
entity_id = None # type: str
# Owning hass instance. Will be set by EntityPlatform
hass: Optional[HomeAssistant] = None
# Owning platform instance. Will be set by EntityPlatform
platform: Optional[EntityPlatform] = None
# If we reported if this entity was slow
_slow_reported = False
# If we reported this entity is updated while disabled
_disabled_reported = False
# Protect for multiple updates
_update_staged = False
# Process updates in parallel
parallel_updates: Optional[asyncio.Semaphore] = None
# Entry in the entity registry
registry_entry: Optional[RegistryEntry] = None
# Hold list for functions to call on remove.
_on_remove: Optional[List[CALLBACK_TYPE]] = None
# Context
_context: Optional[Context] = None
_context_set: Optional[datetime] = None
# If entity is added to an entity platform
_added = False
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return True
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return None
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return None
@property
def state(self) -> StateType:
"""Return the state of the entity."""
return STATE_UNKNOWN
@property
def capability_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the capability attributes.
Attributes that explain the capabilities of an entity.
Implemented by component base class. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes.
Implemented by component base class. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return device specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def device_info(self) -> Optional[Dict[str, Any]]:
"""Return device specific attributes.
Implemented by platform classes.
"""
return None
@property
def device_class(self) -> Optional[str]:
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
@property
def unit_of_measurement(self) -> Optional[str]:
"""Return the unit of measurement of this entity, if any."""
return None
@property
def icon(self) -> Optional[str]:
"""Return the icon to use in the frontend, if any."""
return None
@property
def entity_picture(self) -> Optional[str]:
"""Return the entity picture to use in the frontend, if any."""
return None
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return False
@property
def force_update(self) -> bool:
"""Return True if state updates should be forced.
If True, a state change will be triggered anytime the state property is
updated, not just when the value changes.
"""
return False
@property
def supported_features(self) -> Optional[int]:
"""Flag supported features."""
return None
@property
def context_recent_time(self) -> timedelta:
"""Time that a context is considered recent."""
return timedelta(seconds=5)
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return True
# DO NOT OVERWRITE
# These properties and methods are either managed by Home Assistant or they
# are used to perform a very specific function. Overwriting these may
# produce undesirable effects in the entity's operation.
@property
def enabled(self) -> bool:
"""Return if the entity is enabled in the entity registry.
If an entity is not part of the registry, it cannot be disabled
and will therefore always be enabled.
"""
return self.registry_entry is None or not self.registry_entry.disabled
@callback
def async_set_context(self, context: Context) -> None:
"""Set the context the entity currently operates under."""
self._context = context
self._context_set = dt_util.utcnow()
async def async_update_ha_state(self, force_refresh: bool = False) -> None:
"""Update Home Assistant with current state of entity.
If force_refresh == True will update entity before setting state.
This method must be run in the event loop.
"""
if self.hass is None:
raise RuntimeError(f"Attribute hass is None for {self}")
if self.entity_id is None:
raise NoEntitySpecifiedError(
f"No entity id specified for entity {self.name}"
)
# update entity data
if force_refresh:
try:
await self.async_device_update()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Update for %s fails", self.entity_id)
return
self._async_write_ha_state()
@callback
def async_write_ha_state(self) -> None:
"""Write the state to the state machine."""
if self.hass is None:
raise RuntimeError(f"Attribute hass is None for {self}")
if self.entity_id is None:
raise NoEntitySpecifiedError(
f"No entity id specified for entity {self.name}"
)
self._async_write_ha_state()
@callback
def _async_write_ha_state(self) -> None:
"""Write the state to the state machine."""
if self.registry_entry and self.registry_entry.disabled_by:
if not self._disabled_reported:
self._disabled_reported = True
assert self.platform is not None
_LOGGER.warning(
"Entity %s is incorrectly being triggered for updates while it is disabled. This is a bug in the %s integration",
self.entity_id,
self.platform.platform_name,
)
return
start = timer()
attr = self.capability_attributes
attr = dict(attr) if attr else {}
if not self.available:
state = STATE_UNAVAILABLE
else:
sstate = self.state
state = STATE_UNKNOWN if sstate is None else str(sstate)
attr.update(self.state_attributes or {})
attr.update(self.device_state_attributes or {})
unit_of_measurement = self.unit_of_measurement
if unit_of_measurement is not None:
attr[ATTR_UNIT_OF_MEASUREMENT] = unit_of_measurement
entry = self.registry_entry
# pylint: disable=consider-using-ternary
name = (entry and entry.name) or self.name
if name is not None:
attr[ATTR_FRIENDLY_NAME] = name
icon = (entry and entry.icon) or self.icon
if icon is not None:
attr[ATTR_ICON] = icon
entity_picture = self.entity_picture
if entity_picture is not None:
attr[ATTR_ENTITY_PICTURE] = entity_picture
assumed_state = self.assumed_state
if assumed_state:
attr[ATTR_ASSUMED_STATE] = assumed_state
supported_features = self.supported_features
if supported_features is not None:
attr[ATTR_SUPPORTED_FEATURES] = supported_features
device_class = self.device_class
if device_class is not None:
attr[ATTR_DEVICE_CLASS] = str(device_class)
end = timer()
if end - start > 0.4 and not self._slow_reported:
self._slow_reported = True
extra = ""
if "custom_components" in type(self).__module__:
extra = "Please report it to the custom component author."
else:
extra = (
"Please create a bug report at "
"https://github.com/home-assistant/home-assistant/issues?q=is%3Aopen+is%3Aissue"
)
if self.platform:
extra += (
f"+label%3A%22integration%3A+{self.platform.platform_name}%22"
)
_LOGGER.warning(
"Updating state for %s (%s) took %.3f seconds. %s",
self.entity_id,
type(self),
end - start,
extra,
)
# Overwrite properties that have been set in the config file.
assert self.hass is not None
if DATA_CUSTOMIZE in self.hass.data:
attr.update(self.hass.data[DATA_CUSTOMIZE].get(self.entity_id))
# Convert temperature if we detect one
try:
unit_of_measure = attr.get(ATTR_UNIT_OF_MEASUREMENT)
units = self.hass.config.units
if (
unit_of_measure in (TEMP_CELSIUS, TEMP_FAHRENHEIT)
and unit_of_measure != units.temperature_unit
):
prec = len(state) - state.index(".") - 1 if "." in state else 0
temp = units.temperature(float(state), unit_of_measure)
state = str(round(temp) if prec == 0 else round(temp, prec))
attr[ATTR_UNIT_OF_MEASUREMENT] = units.temperature_unit
except ValueError:
# Could not convert state to float
pass
if (
self._context_set is not None
and dt_util.utcnow() - self._context_set > self.context_recent_time
):
self._context = None
self._context_set = None
self.hass.states.async_set(
self.entity_id, state, attr, self.force_update, self._context
)
def schedule_update_ha_state(self, force_refresh: bool = False) -> None:
"""Schedule an update ha state change task.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
"""
assert self.hass is not None
self.hass.add_job(self.async_update_ha_state(force_refresh)) # type: ignore
@callback
def async_schedule_update_ha_state(self, force_refresh: bool = False) -> None:
"""Schedule an update ha state change task.
This method must be run in the event loop.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
"""
if force_refresh:
assert self.hass is not None
self.hass.async_create_task(self.async_update_ha_state(force_refresh))
else:
self.async_write_ha_state()
async def async_device_update(self, warning: bool = True) -> None:
"""Process 'update' or 'async_update' from entity.
This method is a coroutine.
"""
if self._update_staged:
return
self._update_staged = True
# Process update sequential
if self.parallel_updates:
await self.parallel_updates.acquire()
assert self.hass is not None
if warning:
update_warn = self.hass.loop.call_later(
SLOW_UPDATE_WARNING,
_LOGGER.warning,
"Update of %s is taking over %s seconds",
self.entity_id,
SLOW_UPDATE_WARNING,
)
try:
# pylint: disable=no-member
if hasattr(self, "async_update"):
await self.async_update() # type: ignore
elif hasattr(self, "update"):
await self.hass.async_add_executor_job(self.update) # type: ignore
finally:
self._update_staged = False
if warning:
update_warn.cancel()
if self.parallel_updates:
self.parallel_updates.release()
@callback
def async_on_remove(self, func: CALLBACK_TYPE) -> None:
"""Add a function to call when entity removed."""
if self._on_remove is None:
self._on_remove = []
self._on_remove.append(func)
async def async_removed_from_registry(self) -> None:
"""Run when entity has been removed from entity registry.
To be extended by integrations.
"""
@callback
def add_to_platform_start(
self,
hass: HomeAssistant,
platform: EntityPlatform,
parallel_updates: Optional[asyncio.Semaphore],
) -> None:
"""Start adding an entity to a platform."""
if self._added:
raise HomeAssistantError(
f"Entity {self.entity_id} cannot be added a second time to an entity platform"
)
self.hass = hass
self.platform = platform
self.parallel_updates = parallel_updates
self._added = True
@callback
def add_to_platform_abort(self) -> None:
"""Abort adding an entity to a platform."""
self.hass = None
self.platform = None
self.parallel_updates = None
self._added = False
async def add_to_platform_finish(self) -> None:
"""Finish adding an entity to a platform."""
await self.async_internal_added_to_hass()
await self.async_added_to_hass()
self.async_write_ha_state()
async def async_remove(self) -> None:
"""Remove entity from Home Assistant."""
assert self.hass is not None
if self.platform and not self._added:
raise HomeAssistantError(
f"Entity {self.entity_id} async_remove called twice"
)
self._added = False
if self._on_remove is not None:
while self._on_remove:
self._on_remove.pop()()
await self.async_internal_will_remove_from_hass()
await self.async_will_remove_from_hass()
self.hass.states.async_remove(self.entity_id, context=self._context)
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
To be extended by integrations.
"""
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
To be extended by integrations.
"""
async def async_internal_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
Not to be extended by integrations.
"""
assert self.hass is not None
if self.platform:
info = {"domain": self.platform.platform_name}
if self.platform.config_entry:
info["source"] = SOURCE_CONFIG_ENTRY
info["config_entry"] = self.platform.config_entry.entry_id
else:
info["source"] = SOURCE_PLATFORM_CONFIG
self.hass.data.setdefault(DATA_ENTITY_SOURCE, {})[self.entity_id] = info
if self.registry_entry is not None:
# This is an assert as it should never happen, but helps in tests
assert (
not self.registry_entry.disabled_by
), f"Entity {self.entity_id} is being added while it's disabled"
self.async_on_remove(
async_track_entity_registry_updated_event(
self.hass, self.entity_id, self._async_registry_updated
)
)
async def async_internal_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
Not to be extended by integrations.
"""
if self.platform:
assert self.hass is not None
self.hass.data[DATA_ENTITY_SOURCE].pop(self.entity_id)
async def _async_registry_updated(self, event: Event) -> None:
"""Handle entity registry update."""
data = event.data
if data["action"] == "remove":
await self.async_removed_from_registry()
await self.async_remove()
if data["action"] != "update":
return
assert self.hass is not None
ent_reg = await self.hass.helpers.entity_registry.async_get_registry()
old = self.registry_entry
self.registry_entry = ent_reg.async_get(data["entity_id"])
assert self.registry_entry is not None
if self.registry_entry.disabled_by is not None:
await self.async_remove()
return
assert old is not None
if self.registry_entry.entity_id == old.entity_id:
self.async_write_ha_state()
return
await self.async_remove()
assert self.platform is not None
self.entity_id = self.registry_entry.entity_id
await self.platform.async_add_entities([self])
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
if not isinstance(other, self.__class__):
return False
# Can only decide equality if both have a unique id
if self.unique_id is None or other.unique_id is None:
return False
# Ensure they belong to the same platform
if self.platform is not None or other.platform is not None:
if self.platform is None or other.platform is None:
return False
if self.platform.platform != other.platform.platform:
return False
return self.unique_id == other.unique_id
def __repr__(self) -> str:
"""Return the representation."""
return f"<Entity {self.name}: {self.state}>"
async def async_request_call(self, coro: Awaitable) -> None:
"""Process request batched."""
if self.parallel_updates:
await self.parallel_updates.acquire()
try:
await coro
finally:
if self.parallel_updates:
self.parallel_updates.release()
class ToggleEntity(Entity):
"""An abstract class for entities that can be turned on and off."""
@property
def state(self) -> str:
"""Return the state."""
return STATE_ON if self.is_on else STATE_OFF
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
raise NotImplementedError()
def turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
raise NotImplementedError()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
assert self.hass is not None
await self.hass.async_add_executor_job(ft.partial(self.turn_on, **kwargs))
def turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
raise NotImplementedError()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
assert self.hass is not None
await self.hass.async_add_executor_job(ft.partial(self.turn_off, **kwargs))
def toggle(self, **kwargs: Any) -> None:
"""Toggle the entity."""
if self.is_on:
self.turn_off(**kwargs)
else:
self.turn_on(**kwargs)
async def async_toggle(self, **kwargs: Any) -> None:
"""Toggle the entity."""
if self.is_on:
await self.async_turn_off(**kwargs)
else:
await self.async_turn_on(**kwargs)
|
|
#!/usr/bin/env python
"""
Copyright (c) 2019 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
module = 'axis_async_fifo_adapter'
testbench = 'test_%s_64_8' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/axis_adapter.v")
srcs.append("../rtl/axis_async_fifo.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
DEPTH = 32
S_DATA_WIDTH = 64
S_KEEP_ENABLE = (S_DATA_WIDTH>8)
S_KEEP_WIDTH = (S_DATA_WIDTH/8)
M_DATA_WIDTH = 8
M_KEEP_ENABLE = (M_DATA_WIDTH>8)
M_KEEP_WIDTH = (M_DATA_WIDTH/8)
ID_ENABLE = 1
ID_WIDTH = 8
DEST_ENABLE = 1
DEST_WIDTH = 8
USER_ENABLE = 1
USER_WIDTH = 1
PIPELINE_OUTPUT = 2
FRAME_FIFO = 0
USER_BAD_FRAME_VALUE = 1
USER_BAD_FRAME_MASK = 1
DROP_BAD_FRAME = 0
DROP_WHEN_FULL = 0
# Inputs
s_clk = Signal(bool(0))
s_rst = Signal(bool(0))
m_clk = Signal(bool(0))
m_rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_tdata = Signal(intbv(0)[S_DATA_WIDTH:])
s_axis_tkeep = Signal(intbv(1)[S_KEEP_WIDTH:])
s_axis_tvalid = Signal(bool(0))
s_axis_tlast = Signal(bool(0))
s_axis_tid = Signal(intbv(0)[ID_WIDTH:])
s_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
s_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
m_axis_tready = Signal(bool(0))
# Outputs
s_axis_tready = Signal(bool(0))
m_axis_tdata = Signal(intbv(0)[M_DATA_WIDTH:])
m_axis_tkeep = Signal(intbv(1)[M_KEEP_WIDTH:])
m_axis_tvalid = Signal(bool(0))
m_axis_tlast = Signal(bool(0))
m_axis_tid = Signal(intbv(0)[ID_WIDTH:])
m_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
m_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
# sources and sinks
source_pause = Signal(bool(0))
sink_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource()
source_logic = source.create_logic(
s_clk,
s_rst,
tdata=s_axis_tdata,
tkeep=s_axis_tkeep,
tvalid=s_axis_tvalid,
tready=s_axis_tready,
tlast=s_axis_tlast,
tid=s_axis_tid,
tdest=s_axis_tdest,
tuser=s_axis_tuser,
pause=source_pause,
name='source'
)
sink = axis_ep.AXIStreamSink()
sink_logic = sink.create_logic(
m_clk,
m_rst,
tdata=m_axis_tdata,
tkeep=m_axis_tkeep,
tvalid=m_axis_tvalid,
tready=m_axis_tready,
tlast=m_axis_tlast,
tid=m_axis_tid,
tdest=m_axis_tdest,
tuser=m_axis_tuser,
pause=sink_pause,
name='sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
s_clk=s_clk,
s_rst=s_rst,
m_clk=m_clk,
m_rst=m_rst,
current_test=current_test,
s_axis_tdata=s_axis_tdata,
s_axis_tkeep=s_axis_tkeep,
s_axis_tvalid=s_axis_tvalid,
s_axis_tready=s_axis_tready,
s_axis_tlast=s_axis_tlast,
s_axis_tid=s_axis_tid,
s_axis_tdest=s_axis_tdest,
s_axis_tuser=s_axis_tuser,
m_axis_tdata=m_axis_tdata,
m_axis_tkeep=m_axis_tkeep,
m_axis_tvalid=m_axis_tvalid,
m_axis_tready=m_axis_tready,
m_axis_tlast=m_axis_tlast,
m_axis_tid=m_axis_tid,
m_axis_tdest=m_axis_tdest,
m_axis_tuser=m_axis_tuser
)
@always(delay(4))
def s_clkgen():
s_clk.next = not s_clk
@always(delay(5))
def m_clkgen():
m_clk.next = not m_clk
def wait_normal():
while s_axis_tvalid or m_axis_tvalid:
yield s_clk.posedge
def wait_pause_source():
while s_axis_tvalid or m_axis_tvalid:
yield s_clk.posedge
yield s_clk.posedge
source_pause.next = False
yield s_clk.posedge
source_pause.next = True
yield s_clk.posedge
source_pause.next = False
def wait_pause_sink():
while s_axis_tvalid or m_axis_tvalid:
sink_pause.next = True
yield s_clk.posedge
yield s_clk.posedge
yield s_clk.posedge
sink_pause.next = False
yield s_clk.posedge
@instance
def check():
yield delay(100)
yield s_clk.posedge
s_rst.next = 1
m_rst.next = 1
yield s_clk.posedge
yield s_clk.posedge
yield s_clk.posedge
s_rst.next = 0
m_rst.next = 0
yield s_clk.posedge
yield delay(100)
yield s_clk.posedge
for payload_len in range(1,18):
yield s_clk.posedge
print("test 1: test packet, length %d" % payload_len)
current_test.next = 1
test_frame = axis_ep.AXIStreamFrame(
bytearray(range(payload_len)),
id=1,
dest=1,
)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame)
yield s_clk.posedge
yield s_clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame
assert sink.empty()
yield delay(100)
yield s_clk.posedge
print("test 2: back-to-back packets, length %d" % payload_len)
current_test.next = 2
test_frame1 = axis_ep.AXIStreamFrame(
bytearray(range(payload_len)),
id=2,
dest=1,
)
test_frame2 = axis_ep.AXIStreamFrame(
bytearray(range(payload_len)),
id=2,
dest=2,
)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame1)
source.send(test_frame2)
yield s_clk.posedge
yield s_clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
assert sink.empty()
yield delay(100)
yield s_clk.posedge
print("test 3: tuser assert, length %d" % payload_len)
current_test.next = 3
test_frame1 = axis_ep.AXIStreamFrame(
bytearray(range(payload_len)),
id=3,
dest=1,
last_cycle_user=1
)
test_frame2 = axis_ep.AXIStreamFrame(
bytearray(range(payload_len)),
id=3,
dest=2,
)
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source.send(test_frame1)
source.send(test_frame2)
yield s_clk.posedge
yield s_clk.posedge
yield wait()
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
assert rx_frame.last_cycle_user
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
assert sink.empty()
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
|
"""
This module implements a faster canvas for plotting.
it ovewrites some matplolib methods to allow printing on sys.platform=='win32'
"""
import wx
import sys
import logging
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.backend_bases import MouseEvent, RendererBase
from matplotlib.backends.backend_wx import GraphicsContextWx, PrintoutWx
from matplotlib.backends.backend_wx import RendererWx
logger = logging.getLogger(__name__)
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the image instance into the current axes;
:param x: is the distance in pixels from the left hand side of the canvas.
:param y: the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
:param im: the class`matplotlib._image.Image` instance
:param bbox: a class `matplotlib.transforms.Bbox` instance for clipping, or
None
"""
pass
def select(self):
"""
"""
pass
def unselect(self):
"""
"""
pass
def OnPrintPage(self, page):
"""
override printPage of matplotlib
"""
self.canvas.draw()
dc = self.GetDC()
try:
(ppw, pph) = self.GetPPIPrinter() # printer's pixels per in
except:
ppw = 1
pph = 1
(pgw, _) = self.GetPageSizePixels() # page size in pixels
(dcw, _) = dc.GetSize()
(grw, _) = self.canvas.GetSizeTuple()
# save current figure dpi resolution and bg color,
# so that we can temporarily set them to the dpi of
# the printer, and the bg color to white
bgcolor = self.canvas.figure.get_facecolor()
fig_dpi = self.canvas.figure.dpi
# draw the bitmap, scaled appropriately
vscale = float(ppw) / fig_dpi
# set figure resolution,bg color for printer
self.canvas.figure.dpi = ppw
self.canvas.figure.set_facecolor('#FFFFFF')
renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi)
self.canvas.figure.draw(renderer)
self.canvas.bitmap.SetWidth(int(self.canvas.bitmap.GetWidth() * vscale))
self.canvas.bitmap.SetHeight(int(self.canvas.bitmap.GetHeight() * vscale))
self.canvas.draw()
# page may need additional scaling on preview
page_scale = 1.0
if self.IsPreview():
page_scale = float(dcw) / pgw
# get margin in pixels = (margin in in) * (pixels/in)
top_margin = int(self.margin * pph * page_scale)
left_margin = int(self.margin * ppw * page_scale)
# set scale so that width of output is self.width inches
# (assuming grw is size of graph in inches....)
user_scale = (self.width * fig_dpi * page_scale) / float(grw)
dc.SetDeviceOrigin(left_margin, top_margin)
dc.SetUserScale(user_scale, user_scale)
# this cute little number avoid API inconsistencies in wx
try:
dc.DrawBitmap(self.canvas.bitmap, 0, 0)
except:
try:
dc.DrawBitmap(self.canvas.bitmap, (0, 0))
except:
logger.error(sys.exc_value)
# restore original figure resolution
self.canvas.figure.set_facecolor(bgcolor)
# # used to be self.canvas.figure.dpi.set( fig_dpi)
self.canvas.figure.dpi = fig_dpi
self.canvas.draw()
return True
GraphicsContextWx.select = select
GraphicsContextWx.unselect = unselect
PrintoutWx.OnPrintPage = OnPrintPage
RendererBase.draw_image = draw_image
class FigureCanvas(FigureCanvasWxAgg):
"""
Add features to the wx agg canvas for better support of AUI and
faster plotting.
"""
def __init__(self, *args, **kw):
super(FigureCanvas, self).__init__(*args, **kw)
self._isRendered = False
# Create an timer for handling draw_idle requests
# If there are events pending when the timer is
# complete, reset the timer and continue. The
# alternative approach, binding to wx.EVT_IDLE,
# doesn't behave as nicely.
self.idletimer = wx.CallLater(1, self._onDrawIdle)
# panel information
self.panel = None
self.resizing = False
self.xaxis = None
self.yaxis = None
self.ndraw = 0
# Support for mouse wheel
self.Bind(wx.EVT_MOUSEWHEEL, self._onMouseWheel)
def set_panel(self, panel):
"""
Set axes
"""
# set panel
self.panel = panel
# set axes
self.xaxis = panel.subplot.xaxis
self.yaxis = panel.subplot.yaxis
def draw_idle(self, *args, **kwargs):
"""
Render after a delay if no other render requests have been made.
"""
self.panel.subplot.grid(self.panel.grid_on)
if self.panel.legend is not None and self.panel.legend_pos_loc:
self.panel.legend._loc = self.panel.legend_pos_loc
self.idletimer.Restart(5, *args, **kwargs) # Delay by 5 ms
def _onDrawIdle(self, *args, **kwargs):
"""
"""
if False and wx.GetApp().Pending():
self.idletimer.Restart(5, *args, **kwargs)
else:
# Draw plot, changes resizing too
self.draw(*args, **kwargs)
self.resizing = False
def _get_axes_switch(self):
"""
"""
# Check resize whether or not True
if self.panel.dimension == 3:
return
# This is for fast response when plot is being resized
if not self.resizing:
self.xaxis.set_visible(True)
self.yaxis.set_visible(True)
self.panel.schedule_full_draw('del')
else:
self.xaxis.set_visible(False)
self.yaxis.set_visible(False)
self.panel.schedule_full_draw('append')
# set the resizing back to default= False
self.set_resizing(False)
def set_resizing(self, resizing=False):
"""
Setting the resizing
"""
self.resizing = resizing
self.panel.set_resizing(False)
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
# Only draw if window is shown, otherwise graph will bleed through
# on the notebook style AUI widgets.
# raise
fig = FigureCanvasWxAgg
if self.IsShownOnScreen() and self.ndraw != 1:
self._isRendered = True
self._get_axes_switch()
# import time
# st = time.time()
try:
fig.draw(self)
except ValueError:
logger.error(sys.exc_value)
else:
self._isRendered = False
if self.ndraw <= 1:
self.ndraw += 1
def _onMouseWheel(self, evt):
"""Translate mouse wheel events into matplotlib events"""
# Determine mouse location
_, h = self.figure.canvas.get_width_height()
x = evt.GetX()
y = h - evt.GetY()
# Convert delta/rotation/rate into a floating point step size
delta = evt.GetWheelDelta()
rotation = evt.GetWheelRotation()
rate = evt.GetLinesPerAction()
# print "delta,rotation,rate",delta,rotation,rate
step = rate * float(rotation) / delta
# Convert to mpl event
evt.Skip()
self.scroll_event(x, y, step, guiEvent=evt)
def scroll_event(self, x, y, step=1, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent
"""
button = 'up' if step >= 0 else 'down'
self._button = button
s = 'scroll_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
setattr(event, 'step', step)
self.callbacks.process(s, event)
if step != 0:
self.panel.is_zoomed = True
def _onRightButtonDown(self, evt):
"""
Overload the right button down call back to avoid a problem
with the context menu over matplotlib plots on linux.
:TODO: Investigate what the root cause of the problem is.
"""
if sys.platform == 'linux2' or self.panel.dimension == 3:
evt.Skip()
else:
FigureCanvasWxAgg._onRightButtonDown(self, evt)
# This solves the focusing on rightclick.
# Todo: better design
self.panel.parent.set_plot_unfocus()
self.panel.on_set_focus(None)
return
# CRUFT: wx 3.0.0.0 on OS X doesn't release the mouse on leaving window
def _onLeave(self, evt):
if self.HasCapture(): self.ReleaseMouse()
super(FigureCanvas, self)._onLeave(evt)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import routes
import webob.dec
import webob.exc
import nova.api.openstack
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
from nova import flags
from nova import log as logging
import nova.policy
from nova import utils
from nova import wsgi as base_wsgi
LOG = logging.getLogger('nova.api.openstack.extensions')
FLAGS = flags.FLAGS
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
# The name of the extension, e.g., 'Fox In Socks'
name = None
# The alias for the extension, e.g., 'FOXNSOX'
alias = None
# Description comes from the docstring for the class
# The XML namespace for the extension, e.g.,
# 'http://www.fox.in.socks/api/ext/pie/v1.0'
namespace = None
# The timestamp when the extension was last updated, e.g.,
# '2011-01-22T13:25:27-06:00'
updated = None
def __init__(self, ext_mgr):
"""Register extension with the extension manager."""
ext_mgr.register(self)
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_controller_extensions(self):
"""List of extensions.ControllerExtension extension objects.
Controller extensions are used to extend existing controllers.
"""
controller_exts = []
return controller_exts
@classmethod
def nsmap(cls):
"""Synthesize a namespace map from extension."""
# Start with a base nsmap
nsmap = ext_nsmap.copy()
# Add the namespace for the extension
nsmap[cls.alias] = cls.namespace
return nsmap
@classmethod
def xmlname(cls, name):
"""Synthesize element and attribute names."""
return '{%s}%s' % (cls.namespace, name)
def make_ext(elem):
elem.set('name')
elem.set('namespace')
elem.set('alias')
elem.set('updated')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
xmlutil.make_links(elem, 'links')
ext_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM}
class ExtensionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extension', selector='extension')
make_ext(root)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extensions')
elem = xmlutil.SubTemplateElement(root, 'extension',
selector='extensions')
make_ext(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsResource(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
super(ExtensionsResource, self).__init__(None)
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.name
ext_data['alias'] = ext.alias
ext_data['description'] = ext.__doc__
ext_data['namespace'] = ext.namespace
ext_data['updated'] = ext.updated
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
@wsgi.serializers(xml=ExtensionsTemplate)
def index(self, req):
extensions = []
for _alias, ext in self.extension_manager.extensions.iteritems():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
@wsgi.serializers(xml=ExtensionTemplate)
def show(self, req, id):
try:
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions[id]
except KeyError:
raise webob.exc.HTTPNotFound()
return dict(extension=self._translate(ext))
def delete(self, req, id):
raise webob.exc.HTTPNotFound()
def create(self, req):
raise webob.exc.HTTPNotFound()
@utils.deprecated("The extension middleware is no longer necessary.")
class ExtensionMiddleware(base_wsgi.Middleware):
"""Extensions middleware for WSGI.
Provided only for backwards compatibility with existing
api-paste.ini files. This middleware will be removed in future
versions of nova.
"""
pass
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See nova/tests/api/openstack/extensions/foxinsocks/extension.py for an
example extension implementation.
"""
def register(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.alias
LOG.audit(_('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exception.Error("Found duplicate extension: %s" % alias)
self.extensions[alias] = ext
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionsResource(self)))
for ext in self.extensions.values():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_controller_extensions(self):
"""Returns a list of ControllerExtension objects."""
controller_exts = []
for ext in self.extensions.values():
try:
controller_exts.extend(ext.get_controller_extensions())
except AttributeError:
# NOTE(Vek): Extensions aren't required to have
# controller extensions
pass
return controller_exts
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug(_('Ext name: %s'), extension.name)
LOG.debug(_('Ext alias: %s'), extension.alias)
LOG.debug(_('Ext description: %s'),
' '.join(extension.__doc__.strip().split()))
LOG.debug(_('Ext namespace: %s'), extension.namespace)
LOG.debug(_('Ext updated: %s'), extension.updated)
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
return False
return True
def load_extension(self, ext_factory):
"""Execute an extension factory.
Loads an extension. The 'ext_factory' is the name of a
callable that will be imported and called with one
argument--the extension manager. The factory callable is
expected to call the register() method at least once.
"""
LOG.debug(_("Loading extension %s"), ext_factory)
# Load the factory
factory = utils.import_class(ext_factory)
# Call it
LOG.debug(_("Calling extension factory %s"), ext_factory)
factory(self)
def _load_extensions(self):
"""Load extensions specified on the command line."""
extensions = list(self.cls_list)
for ext_factory in extensions:
try:
self.load_extension(ext_factory)
except Exception as exc:
LOG.warn(_('Failed to load extension %(ext_factory)s: '
'%(exc)s') % locals())
class ControllerExtension(object):
"""Extend core controllers of nova OpenStack API.
Provide a way to extend existing nova OpenStack API core
controllers.
"""
def __init__(self, extension, collection, controller):
self.extension = extension
self.collection = collection
self.controller = controller
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in nova."""
def __init__(self, collection, controller, parent=None,
collection_actions=None, member_actions=None):
if not collection_actions:
collection_actions = {}
if not member_actions:
member_actions = {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
def wrap_errors(fn):
"""Ensure errors are not passed along."""
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception, e:
raise webob.exc.HTTPInternalServerError()
return wrapped
def load_standard_extensions(ext_mgr, logger, path, package):
"""Registers all standard API extensions."""
# Walk through all the modules in our directory...
our_dir = path[0]
for dirpath, dirnames, filenames in os.walk(our_dir):
# Compute the relative package name from the dirpath
relpath = os.path.relpath(dirpath, our_dir)
if relpath == '.':
relpkg = ''
else:
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
# Now, consider each file in turn, only considering .py files
for fname in filenames:
root, ext = os.path.splitext(fname)
# Skip __init__ and anything that's not .py
if ext != '.py' or root == '__init__':
continue
# Try loading it
classname = ("%s%s.%s.%s%s" %
(package, relpkg, root,
root[0].upper(), root[1:]))
try:
ext_mgr.load_extension(classname)
except Exception as exc:
logger.warn(_('Failed to load extension %(classname)s: '
'%(exc)s') % locals())
# Now, let's consider any subdirectories we may have...
subdirs = []
for dname in dirnames:
# Skip it if it does not have __init__.py
if not os.path.exists(os.path.join(dirpath, dname,
'__init__.py')):
continue
# If it has extension(), delegate...
ext_name = ("%s%s.%s.extension" %
(package, relpkg, dname))
try:
ext = utils.import_class(ext_name)
except exception.ClassNotFound:
# extension() doesn't exist on it, so we'll explore
# the directory for ourselves
subdirs.append(dname)
else:
try:
ext(ext_mgr)
except Exception as exc:
logger.warn(_('Failed to load extension %(ext_name)s: '
'%(exc)s') % locals())
# Update the list of directories we'll explore...
dirnames[:] = subdirs
def extension_authorizer(api_name, extension_name):
def authorize(context):
action = '%s_extension:%s' % (api_name, extension_name)
nova.policy.enforce(context, action, {})
return authorize
def soft_extension_authorizer(api_name, extension_name):
hard_authorize = extension_authorizer(api_name, extension_name)
def authorize(context):
try:
hard_authorize(context)
return True
except exception.NotAuthorized:
return False
return authorize
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Generative Adversarial Model with two linear layers.
Example of how to create a GAN in T2T.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
def lrelu(input_, leak=0.2, name="lrelu"):
return tf.maximum(input_, leak * input_, name=name)
def deconv2d(
input_, output_shape, k_h, k_w, d_h, d_w, stddev=0.02, name="deconv2d"):
"""Deconvolution layer."""
with tf.variable_scope(name):
w = tf.get_variable(
"w", [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
deconv = tf.nn.conv2d_transpose(
input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1])
biases = tf.get_variable(
"biases", [output_shape[-1]], initializer=tf.constant_initializer(0.0))
return tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
def reverse_gradient(x):
return -x + tf.stop_gradient(2 * x)
class AbstractGAN(t2t_model.T2TModel):
"""Base class for all GANs."""
def discriminator(self, x, is_training, reuse=False):
"""Discriminator architecture based on InfoGAN.
Args:
x: input images, shape [bs, h, w, channels]
is_training: boolean, are we in train or eval model.
reuse: boolean, should params be re-used.
Returns:
out_logit: the output logits (before sigmoid).
"""
hparams = self.hparams
with tf.variable_scope(
"discriminator", reuse=reuse,
initializer=tf.random_normal_initializer(stddev=0.02)):
batch_size, height, width = common_layers.shape_list(x)[:3]
# Mapping x from [bs, h, w, c] to [bs, 1]
net = tf.layers.conv2d(x, 64, (4, 4), strides=(2, 2),
padding="SAME", name="d_conv1")
# [bs, h/2, w/2, 64]
net = lrelu(net)
net = tf.layers.conv2d(net, 128, (4, 4), strides=(2, 2),
padding="SAME", name="d_conv2")
# [bs, h/4, w/4, 128]
if hparams.discriminator_batchnorm:
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="d_bn2")
net = lrelu(net)
size = height * width
net = tf.reshape(net, [batch_size, size * 8]) # [bs, h * w * 8]
net = tf.layers.dense(net, 1024, name="d_fc3") # [bs, 1024]
if hparams.discriminator_batchnorm:
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="d_bn3")
net = lrelu(net)
return net
def generator(self, z, is_training, out_shape):
"""Generator outputting image in [0, 1]."""
hparams = self.hparams
height, width, c_dim = out_shape
batch_size = hparams.batch_size
with tf.variable_scope(
"generator",
initializer=tf.random_normal_initializer(stddev=0.02)):
net = tf.layers.dense(z, 1024, name="g_fc1")
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="g_bn1")
net = lrelu(net)
net = tf.layers.dense(net, 128 * (height // 4) * (width // 4),
name="g_fc2")
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="g_bn2")
net = lrelu(net)
net = tf.reshape(net, [batch_size, height // 4, width // 4, 128])
net = deconv2d(net, [batch_size, height // 2, width // 2, 64],
4, 4, 2, 2, name="g_dc3")
net = tf.layers.batch_normalization(net, training=is_training,
momentum=0.999, name="g_bn3")
net = lrelu(net)
net = deconv2d(net, [batch_size, height, width, c_dim],
4, 4, 2, 2, name="g_dc4")
out = tf.nn.sigmoid(net)
return common_layers.convert_real_to_rgb(out)
def losses(self, inputs, generated):
"""Return the losses dictionary."""
raise NotImplementedError
def body(self, features):
"""Body of the model.
Args:
features: a dictionary with the tensors.
Returns:
A pair (predictions, losses) where predictions is the generated image
and losses is a dictionary of losses (that get added for the final loss).
"""
features["targets"] = features["inputs"]
is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN
# Input images.
inputs = tf.to_float(features["targets_raw"])
# Noise vector.
z = tf.random_uniform([self.hparams.batch_size,
self.hparams.bottleneck_bits],
minval=-1, maxval=1, name="z")
# Generator output: fake images.
out_shape = common_layers.shape_list(inputs)[1:4]
g = self.generator(z, is_training, out_shape)
losses = self.losses(inputs, g) # pylint: disable=not-callable
summary_g_image = tf.reshape(
g[0, :], [1] + common_layers.shape_list(inputs)[1:])
tf.summary.image("generated", summary_g_image, max_outputs=1)
if is_training: # Returns an dummy output and the losses dictionary.
return tf.zeros_like(inputs), losses
return tf.reshape(g, tf.shape(inputs)), losses
def top(self, body_output, features):
"""Override the top function to not do anything."""
return body_output
@registry.register_model
class SlicedGan(AbstractGAN):
"""Sliced GAN for demonstration."""
def losses(self, inputs, generated):
"""Losses in the sliced case."""
is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN
def discriminate(x):
return self.discriminator(x, is_training=is_training, reuse=False)
generator_loss = common_layers.sliced_gan_loss(
inputs, reverse_gradient(generated), discriminate,
self.hparams.num_sliced_vecs)
return {"training": - generator_loss}
def infer(self, *args, **kwargs): # pylint: disable=arguments-differ
del args, kwargs
try:
num_channels = self.hparams.problem.num_channels
except AttributeError:
num_channels = 1
with tf.variable_scope("body/vanilla_gan", reuse=tf.AUTO_REUSE):
hparams = self.hparams
z = tf.random_uniform([hparams.batch_size, hparams.bottleneck_bits],
minval=-1, maxval=1, name="z")
out_shape = (hparams.sample_height, hparams.sample_width, num_channels)
g_sample = self.generator(z, False, out_shape)
return g_sample
@registry.register_hparams
def sliced_gan():
"""Basic parameters for a vanilla_gan."""
hparams = common_hparams.basic_params1()
hparams.optimizer = "Adam"
hparams.learning_rate_constant = 0.0002
hparams.learning_rate_warmup_steps = 500
hparams.learning_rate_schedule = "constant * linear_warmup"
hparams.label_smoothing = 0.0
hparams.batch_size = 128
hparams.hidden_size = 128
hparams.initializer = "uniform_unit_scaling"
hparams.initializer_gain = 1.0
hparams.weight_decay = 1e-6
hparams.kernel_height = 4
hparams.kernel_width = 4
hparams.bottleneck_bits = 128
hparams.add_hparam("discriminator_batchnorm", True)
hparams.add_hparam("num_sliced_vecs", 4096)
return hparams
|
|
"""Manages Placeholders for Graph convolution networks.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import numpy as np
import tensorflow as tf
from deepchem.nn.copy import Input
from deepchem.feat.mol_graphs import ConvMol
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
def merge_dicts(l):
"""Convenience function to merge list of dictionaries."""
merged = {}
for dict in l:
merged = merge_two_dicts(merged, dict)
return merged
class GraphTopology(object):
"""Manages placeholders associated with batch of graphs and their topology"""
def __init__(self, n_feat, name='topology', max_deg=10, min_deg=0):
"""
Note that batch size is not specified in a GraphTopology object. A batch
of molecules must be combined into a disconnected graph and fed to topology
directly to handle batches.
Parameters
----------
n_feat: int
Number of features per atom.
name: str, optional
Name of this manager.
max_deg: int, optional
Maximum #bonds for atoms in molecules.
min_deg: int, optional
Minimum #bonds for atoms in molecules.
"""
#self.n_atoms = n_atoms
self.n_feat = n_feat
self.name = name
self.max_deg = max_deg
self.min_deg = min_deg
self.atom_features_placeholder = tensor = tf.placeholder(
dtype='float32',
shape=(None, self.n_feat),
name=self.name + '_atom_features')
self.deg_adj_lists_placeholders = [
tf.placeholder(
dtype='int32',
shape=(None, deg),
name=self.name + '_deg_adj' + str(deg))
for deg in range(1, self.max_deg + 1)
]
self.deg_slice_placeholder = tf.placeholder(
dtype='int32',
shape=(self.max_deg - self.min_deg + 1, 2),
name=self.name + '_deg_slice')
self.membership_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_membership')
# Define the list of tensors to be used as topology
self.topology = [self.deg_slice_placeholder, self.membership_placeholder]
self.topology += self.deg_adj_lists_placeholders
self.inputs = [self.atom_features_placeholder]
self.inputs += self.topology
def get_input_placeholders(self):
"""All placeholders.
Contains atom_features placeholder and topology placeholders.
"""
return self.inputs
def get_topology_placeholders(self):
"""Returns topology placeholders
Consists of deg_slice_placeholder, membership_placeholder, and the
deg_adj_list_placeholders.
"""
return self.topology
def get_atom_features_placeholder(self):
return self.atom_features_placeholder
def get_deg_adjacency_lists_placeholders(self):
return self.deg_adj_lists_placeholders
def get_deg_slice_placeholder(self):
return self.deg_slice_placeholder
def get_membership_placeholder(self):
return self.membership_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of mol_graphs into tensorflow feed_dict.
Assigns the graph information in array of ConvMol objects to the
placeholders tensors
params
------
batch : np.ndarray
Array of ConvMol objects
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
# Merge mol conv objects
batch = ConvMol.agglomerate_mols(batch)
atoms = batch.get_atom_features()
deg_adj_lists = [
batch.deg_adj_lists[deg] for deg in range(1, self.max_deg + 1)
]
# Generate dicts
deg_adj_dict = dict(
list(zip(self.deg_adj_lists_placeholders, deg_adj_lists)))
atoms_dict = {
self.atom_features_placeholder: atoms,
self.deg_slice_placeholder: batch.deg_slice,
self.membership_placeholder: batch.membership
}
return merge_dicts([atoms_dict, deg_adj_dict])
class DTNNGraphTopology(GraphTopology):
"""Manages placeholders associated with batch of graphs and their topology"""
def __init__(self,
n_distance=100,
distance_min=-1.,
distance_max=18.,
name='DTNN_topology'):
"""
Parameters
----------
n_distance: int, optional
granularity of distance matrix
step size will be (distance_max-distance_min)/n_distance
distance_min: float, optional
minimum distance of atom pairs, default = -1 Angstorm
distance_max: float, optional
maximum distance of atom pairs, default = 18 Angstorm
"""
#self.n_atoms = n_atoms
self.name = name
self.n_distance = n_distance
self.distance_min = distance_min
self.distance_max = distance_max
self.step_size = (distance_max - distance_min) / n_distance
self.steps = np.array(
[distance_min + i * self.step_size for i in range(n_distance)])
self.steps = np.expand_dims(self.steps, 0)
self.atom_number_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_atom_number')
self.distance_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.n_distance),
name=self.name + '_distance')
self.atom_membership_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_atom_membership')
self.distance_membership_i_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_distance_membership_i')
self.distance_membership_j_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_distance_membership_j')
# Define the list of tensors to be used as topology
self.topology = [
self.distance_placeholder,
self.atom_membership_placeholder,
self.distance_membership_i_placeholder,
self.distance_membership_j_placeholder,
]
self.inputs = [self.atom_number_placeholder]
self.inputs += self.topology
def get_atom_number_placeholder(self):
return self.atom_number_placeholder
def get_distance_placeholder(self):
return self.distance_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of Coulomb Matrix into tensorflow feed_dict.
Assigns the atom number and distance info to the
placeholders tensors
params
------
batch : np.ndarray
Array of Coulomb Matrix
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
# Extract atom numbers
num_atoms = list(map(sum, batch.astype(bool)[:, :, 0]))
atom_number = [
np.round(
np.power(2 * np.diag(batch[i, :num_atoms[i], :num_atoms[i]]), 1 /
2.4)).astype(int) for i in range(len(num_atoms))
]
distance = []
atom_membership = []
distance_membership_i = []
distance_membership_j = []
start = 0
for im, molecule in enumerate(atom_number):
distance_matrix = np.outer(
molecule, molecule) / batch[im, :num_atoms[im], :num_atoms[im]]
np.fill_diagonal(distance_matrix, -100)
distance.append(np.expand_dims(distance_matrix.flatten(), 1))
atom_membership.append([im] * num_atoms[im])
membership = np.array([np.arange(num_atoms[im])] * num_atoms[im])
membership_i = membership.flatten(order='F')
membership_j = membership.flatten()
distance_membership_i.append(membership_i + start)
distance_membership_j.append(membership_j + start)
start = start + num_atoms[im]
atom_number = np.concatenate(atom_number)
distance = np.concatenate(distance, 0)
distance = np.exp(-np.square(distance - self.steps) /
(2 * self.step_size**2))
distance_membership_i = np.concatenate(distance_membership_i)
distance_membership_j = np.concatenate(distance_membership_j)
atom_membership = np.concatenate(atom_membership)
# Generate dicts
dict_DTNN = {
self.atom_number_placeholder: atom_number,
self.distance_placeholder: distance,
self.atom_membership_placeholder: atom_membership,
self.distance_membership_i_placeholder: distance_membership_i,
self.distance_membership_j_placeholder: distance_membership_j
}
return dict_DTNN
class DAGGraphTopology(GraphTopology):
"""GraphTopology for DAG models
"""
def __init__(self, n_atom_feat=75, max_atoms=50, name='topology'):
"""
Parameters
----------
n_atom_feat: int, optional
Number of features per atom.
max_atoms: int, optional
Maximum number of atoms in a molecule, should be defined based on dataset
"""
self.n_atom_feat = n_atom_feat
self.max_atoms = max_atoms
self.name = name
self.atom_features_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.n_atom_feat),
name=self.name + '_atom_features')
self.parents_placeholder = tf.placeholder(
dtype='int32',
shape=(None, self.max_atoms, self.max_atoms),
# molecule * atom(graph) => step => features
name=self.name + '_parents')
self.calculation_orders_placeholder = tf.placeholder(
dtype='int32',
shape=(None, self.max_atoms),
# molecule * atom(graph) => step
name=self.name + '_orders')
self.calculation_masks_placeholder = tf.placeholder(
dtype='bool',
shape=(None, self.max_atoms),
# molecule * atom(graph) => step
name=self.name + '_masks')
self.membership_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_membership')
self.n_atoms_placeholder = tf.placeholder(
dtype='int32', shape=(), name=self.name + '_n_atoms')
# Define the list of tensors to be used as topology
self.topology = [
self.parents_placeholder, self.calculation_orders_placeholder,
self.calculation_masks_placeholder, self.membership_placeholder,
self.n_atoms_placeholder
]
self.inputs = [self.atom_features_placeholder]
self.inputs += self.topology
def get_parents_placeholder(self):
return self.parents_placeholder
def get_calculation_orders_placeholder(self):
return self.calculation_orders_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of mol_graphs into tensorflow feed_dict.
Assigns the graph information in array of ConvMol objects to the
placeholders tensors for DAG models
params
------
batch : np.ndarray
Array of ConvMol objects
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
atoms_per_mol = [mol.get_num_atoms() for mol in batch]
n_atoms = sum(atoms_per_mol)
start_index = [0] + list(np.cumsum(atoms_per_mol)[:-1])
atoms_all = []
# calculation orders for a batch of molecules
parents_all = []
calculation_orders = []
calculation_masks = []
membership = []
for idm, mol in enumerate(batch):
# padding atom features vector of each molecule with 0
atoms_all.append(mol.get_atom_features())
parents = mol.parents
parents_all.extend(parents)
calculation_index = np.array(parents)[:, :, 0]
mask = np.array(calculation_index - self.max_atoms, dtype=bool)
calculation_orders.append(calculation_index + start_index[idm])
calculation_masks.append(mask)
membership.extend([idm] * atoms_per_mol[idm])
atoms_all = np.concatenate(atoms_all, axis=0)
parents_all = np.stack(parents_all, axis=0)
calculation_orders = np.concatenate(calculation_orders, axis=0)
calculation_masks = np.concatenate(calculation_masks, axis=0)
membership = np.array(membership)
atoms_dict = {
self.atom_features_placeholder: atoms_all,
self.parents_placeholder: parents_all,
self.calculation_orders_placeholder: calculation_orders,
self.calculation_masks_placeholder: calculation_masks,
self.membership_placeholder: membership,
self.n_atoms_placeholder: n_atoms
}
return atoms_dict
class WeaveGraphTopology(GraphTopology):
"""Manages placeholders associated with batch of graphs and their topology"""
def __init__(self,
max_atoms=50,
n_atom_feat=75,
n_pair_feat=14,
name='Weave_topology'):
"""
Parameters
----------
max_atoms: int, optional
maximum number of atoms in a molecule
n_atom_feat: int, optional
number of basic features of each atom
n_pair_feat: int, optional
number of basic features of each pair
"""
#self.n_atoms = n_atoms
self.name = name
self.max_atoms = max_atoms
self.n_atom_feat = n_atom_feat
self.n_pair_feat = n_pair_feat
self.atom_features_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_atoms, self.n_atom_feat),
name=self.name + '_atom_features')
self.atom_mask_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_atoms),
name=self.name + '_atom_mask')
self.pair_features_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_atoms, self.max_atoms, self.n_pair_feat),
name=self.name + '_pair_features')
self.pair_mask_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.max_atoms, self.max_atoms),
name=self.name + '_pair_mask')
self.membership_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_membership')
# Define the list of tensors to be used as topology
self.topology = [self.atom_mask_placeholder, self.pair_mask_placeholder]
self.inputs = [self.atom_features_placeholder]
self.inputs += self.topology
def get_pair_features_placeholder(self):
return self.pair_features_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of WeaveMol into tensorflow feed_dict.
Assigns the atom features and pair features to the
placeholders tensors
params
------
batch : np.ndarray
Array of WeaveMol
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
# Extract atom numbers
atom_feat = []
pair_feat = []
atom_mask = []
pair_mask = []
membership = []
max_atoms = self.max_atoms
for im, mol in enumerate(batch):
n_atoms = mol.get_num_atoms()
atom_feat.append(
np.pad(mol.get_atom_features(), ((0, max_atoms - n_atoms), (0, 0)),
'constant'))
atom_mask.append(
np.array([1] * n_atoms + [0] * (max_atoms - n_atoms), dtype=float))
pair_feat.append(
np.pad(mol.get_pair_features(), ((0, max_atoms - n_atoms), (
0, max_atoms - n_atoms), (0, 0)), 'constant'))
pair_mask.append(np.array([[1]*n_atoms + [0]*(max_atoms-n_atoms)]*n_atoms + \
[[0]*max_atoms]*(max_atoms-n_atoms), dtype=float))
membership.extend([im] * n_atoms)
atom_feat = np.stack(atom_feat)
pair_feat = np.stack(pair_feat)
atom_mask = np.stack(atom_mask)
pair_mask = np.stack(pair_mask)
membership = np.array(membership)
# Generate dicts
dict_DTNN = {
self.atom_features_placeholder: atom_feat,
self.pair_features_placeholder: pair_feat,
self.atom_mask_placeholder: atom_mask,
self.pair_mask_placeholder: pair_mask,
self.membership_placeholder: membership
}
return dict_DTNN
class AlternateWeaveGraphTopology(GraphTopology):
"""Manages placeholders associated with batch of graphs and their topology"""
def __init__(self,
batch_size,
max_atoms=50,
n_atom_feat=75,
n_pair_feat=14,
name='Weave_topology'):
"""
Parameters
----------
batch_size: int
number of molecules in a batch
max_atoms: int, optional
maximum number of atoms in a molecule
n_atom_feat: int, optional
number of basic features of each atom
n_pair_feat: int, optional
number of basic features of each pair
"""
#self.n_atoms = n_atoms
self.name = name
self.batch_size = batch_size
self.max_atoms = max_atoms * batch_size
self.n_atom_feat = n_atom_feat
self.n_pair_feat = n_pair_feat
self.atom_features_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.n_atom_feat),
name=self.name + '_atom_features')
self.pair_features_placeholder = tf.placeholder(
dtype='float32',
shape=(None, self.n_pair_feat),
name=self.name + '_pair_features')
self.pair_split_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_pair_split')
self.atom_split_placeholder = tf.placeholder(
dtype='int32', shape=(None,), name=self.name + '_atom_split')
self.atom_to_pair_placeholder = tf.placeholder(
dtype='int32', shape=(None, 2), name=self.name + '_atom_to_pair')
# Define the list of tensors to be used as topology
self.topology = [
self.pair_split_placeholder, self.atom_split_placeholder,
self.atom_to_pair_placeholder
]
self.inputs = [self.atom_features_placeholder]
self.inputs += self.topology
def get_pair_features_placeholder(self):
return self.pair_features_placeholder
def batch_to_feed_dict(self, batch):
"""Converts the current batch of WeaveMol into tensorflow feed_dict.
Assigns the atom features and pair features to the
placeholders tensors
params
------
batch : np.ndarray
Array of WeaveMol
returns
-------
feed_dict : dict
Can be merged with other feed_dicts for input into tensorflow
"""
# Extract atom numbers
atom_feat = []
pair_feat = []
atom_split = []
atom_to_pair = []
pair_split = []
max_atoms = self.max_atoms
start = 0
for im, mol in enumerate(batch):
n_atoms = mol.get_num_atoms()
# number of atoms in each molecule
atom_split.extend([im] * n_atoms)
# index of pair features
C0, C1 = np.meshgrid(np.arange(n_atoms), np.arange(n_atoms))
atom_to_pair.append(
np.transpose(np.array([C1.flatten() + start, C0.flatten() + start])))
# number of pairs for each atom
pair_split.extend(C1.flatten() + start)
start = start + n_atoms
# atom features
atom_feat.append(mol.get_atom_features())
# pair features
pair_feat.append(
np.reshape(mol.get_pair_features(), (n_atoms * n_atoms,
self.n_pair_feat)))
atom_feat = np.concatenate(atom_feat, axis=0)
pair_feat = np.concatenate(pair_feat, axis=0)
atom_to_pair = np.concatenate(atom_to_pair, axis=0)
atom_split = np.array(atom_split)
# Generate dicts
dict_DTNN = {
self.atom_features_placeholder: atom_feat,
self.pair_features_placeholder: pair_feat,
self.pair_split_placeholder: pair_split,
self.atom_split_placeholder: atom_split,
self.atom_to_pair_placeholder: atom_to_pair
}
return dict_DTNN
|
|
# Copyright (c) 2010-2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from urllib import unquote
import cStringIO as StringIO
from logging.handlers import SysLogHandler
import mock
from test.unit import FakeLogger
from swift.common.utils import get_logger
from swift.common.middleware import proxy_logging
from swift.common.swob import Request, Response
class FakeApp(object):
def __init__(self, body=['FAKE APP'], response_str='200 OK'):
self.body = body
self.response_str = response_str
def __call__(self, env, start_response):
start_response(self.response_str,
[('Content-Type', 'text/plain'),
('Content-Length', str(sum(map(len, self.body))))])
while env['wsgi.input'].read(5):
pass
return self.body
class FakeAppThatExcepts(object):
def __call__(self, env, start_response):
raise Exception("We take exception to that!")
class FakeAppNoContentLengthNoTransferEncoding(object):
def __init__(self, body=['FAKE APP']):
self.body = body
def __call__(self, env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
while env['wsgi.input'].read(5):
pass
return self.body
class FileLikeExceptor(object):
def __init__(self):
pass
def read(self, len):
raise IOError('of some sort')
def readline(self, len=1024):
raise IOError('of some sort')
class FakeAppReadline(object):
def __call__(self, env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain'),
('Content-Length', '8')])
env['wsgi.input'].readline()
return ["FAKE APP"]
def start_response(*args):
pass
class TestProxyLogging(unittest.TestCase):
def _log_parts(self, app, should_be_empty=False):
info_calls = app.access_logger.log_dict['info']
if should_be_empty:
self.assertEquals([], info_calls)
else:
self.assertEquals(1, len(info_calls))
return info_calls[0][0][0].split(' ')
def assertTiming(self, exp_metric, app, exp_timing=None):
timing_calls = app.access_logger.log_dict['timing']
found = False
for timing_call in timing_calls:
self.assertEquals({}, timing_call[1])
self.assertEquals(2, len(timing_call[0]))
if timing_call[0][0] == exp_metric:
found = True
if exp_timing is not None:
self.assertAlmostEqual(exp_timing, timing_call[0][1],
places=4)
if not found:
self.assertTrue(False, 'assertTiming: %s not found in %r' % (
exp_metric, timing_calls))
def assertTimingSince(self, exp_metric, app, exp_start=None):
timing_calls = app.access_logger.log_dict['timing_since']
found = False
for timing_call in timing_calls:
self.assertEquals({}, timing_call[1])
self.assertEquals(2, len(timing_call[0]))
if timing_call[0][0] == exp_metric:
found = True
if exp_start is not None:
self.assertAlmostEqual(exp_start, timing_call[0][1],
places=4)
if not found:
self.assertTrue(False, 'assertTimingSince: %s not found in %r' % (
exp_metric, timing_calls))
def assertNotTiming(self, not_exp_metric, app):
timing_calls = app.access_logger.log_dict['timing']
for timing_call in timing_calls:
self.assertNotEqual(not_exp_metric, timing_call[0][0])
def assertUpdateStats(self, exp_metric, exp_bytes, app):
update_stats_calls = app.access_logger.log_dict['update_stats']
self.assertEquals(1, len(update_stats_calls))
self.assertEquals({}, update_stats_calls[0][1])
self.assertEquals((exp_metric, exp_bytes), update_stats_calls[0][0])
def test_log_request_statsd_invalid_stats_types(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
for url in ['/', '/foo', '/foo/bar', '/v1']:
req = Request.blank(url, environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
# get body
''.join(resp)
self.assertEqual([], app.access_logger.log_dict['timing'])
self.assertEqual([], app.access_logger.log_dict['update_stats'])
def test_log_request_stat_type_bad(self):
for bad_path in ['', '/', '/bad', '/baddy/mc_badderson', '/v1',
'/v1/']:
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank(bad_path, environ={'REQUEST_METHOD': 'GET'})
now = 10000.0
app.log_request(req, 123, 7, 13, now, now + 2.71828182846)
self.assertEqual([], app.access_logger.log_dict['timing'])
self.assertEqual([], app.access_logger.log_dict['update_stats'])
def test_log_request_stat_type_good(self):
"""
log_request() should send timing and byte-count counters for GET
requests. Also, __call__()'s iter_response() function should
statsd-log time to first byte (calling the passed-in start_response
function), but only for GET requests.
"""
stub_times = []
def stub_time():
return stub_times.pop(0)
path_types = {
'/v1/a': 'account',
'/v1/a/': 'account',
'/v1/a/c': 'container',
'/v1/a/c/': 'container',
'/v1/a/c/o': 'object',
'/v1/a/c/o/': 'object',
'/v1/a/c/o/p': 'object',
'/v1/a/c/o/p/': 'object',
'/v1/a/c/o/p/p2': 'object',
}
with mock.patch("time.time", stub_time):
for path, exp_type in path_types.iteritems():
# GET
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(body='7654321', response_str='321 Fubar'), {})
app.access_logger = FakeLogger()
req = Request.blank(path, environ={
'REQUEST_METHOD': 'GET',
'wsgi.input': StringIO.StringIO('4321')})
stub_times = [18.0, 20.71828182846]
iter_response = app(req.environ, lambda *_: None)
self.assertEqual('7654321', ''.join(iter_response))
self.assertTiming('%s.GET.321.timing' % exp_type, app,
exp_timing=2.71828182846 * 1000)
self.assertTimingSince(
'%s.GET.321.first-byte.timing' % exp_type, app,
exp_start=18.0)
self.assertUpdateStats('%s.GET.321.xfer' % exp_type,
4 + 7, app)
# GET with swift.proxy_access_log_made already set
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(body='7654321', response_str='321 Fubar'), {})
app.access_logger = FakeLogger()
req = Request.blank(path, environ={
'REQUEST_METHOD': 'GET',
'swift.proxy_access_log_made': True,
'wsgi.input': StringIO.StringIO('4321')})
stub_times = [18.0, 20.71828182846]
iter_response = app(req.environ, lambda *_: None)
self.assertEqual('7654321', ''.join(iter_response))
self.assertEqual([], app.access_logger.log_dict['timing'])
self.assertEqual([],
app.access_logger.log_dict['timing_since'])
self.assertEqual([],
app.access_logger.log_dict['update_stats'])
# PUT (no first-byte timing!)
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(body='87654321', response_str='314 PiTown'), {})
app.access_logger = FakeLogger()
req = Request.blank(path, environ={
'REQUEST_METHOD': 'PUT',
'wsgi.input': StringIO.StringIO('654321')})
# (it's not a GET, so time() doesn't have a 2nd call)
stub_times = [58.2, 58.2 + 7.3321]
iter_response = app(req.environ, lambda *_: None)
self.assertEqual('87654321', ''.join(iter_response))
self.assertTiming('%s.PUT.314.timing' % exp_type, app,
exp_timing=7.3321 * 1000)
self.assertNotTiming(
'%s.GET.314.first-byte.timing' % exp_type, app)
self.assertNotTiming(
'%s.PUT.314.first-byte.timing' % exp_type, app)
self.assertUpdateStats(
'%s.PUT.314.xfer' % exp_type, 6 + 8, app)
def test_log_request_stat_method_filtering_default(self):
method_map = {
'foo': 'BAD_METHOD',
'': 'BAD_METHOD',
'PUTT': 'BAD_METHOD',
'SPECIAL': 'BAD_METHOD',
'GET': 'GET',
'PUT': 'PUT',
'COPY': 'COPY',
'HEAD': 'HEAD',
'POST': 'POST',
'DELETE': 'DELETE',
'OPTIONS': 'OPTIONS',
}
for method, exp_method in method_map.iteritems():
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/v1/a/', environ={'REQUEST_METHOD': method})
now = 10000.0
app.log_request(req, 299, 11, 3, now, now + 1.17)
self.assertTiming('account.%s.299.timing' % exp_method, app,
exp_timing=1.17 * 1000)
self.assertUpdateStats('account.%s.299.xfer' % exp_method,
11 + 3, app)
def test_log_request_stat_method_filtering_custom(self):
method_map = {
'foo': 'BAD_METHOD',
'': 'BAD_METHOD',
'PUTT': 'BAD_METHOD',
'SPECIAL': 'SPECIAL', # will be configured
'GET': 'GET',
'PUT': 'PUT',
'COPY': 'BAD_METHOD', # prove no one's special
}
# this conf var supports optional leading access_
for conf_key in ['access_log_statsd_valid_http_methods',
'log_statsd_valid_http_methods']:
for method, exp_method in method_map.iteritems():
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
conf_key: 'SPECIAL, GET,PUT ', # crazy spaces ok
})
app.access_logger = FakeLogger()
req = Request.blank('/v1/a/c',
environ={'REQUEST_METHOD': method})
now = 10000.0
app.log_request(req, 911, 4, 43, now, now + 1.01)
self.assertTiming('container.%s.911.timing' % exp_method, app,
exp_timing=1.01 * 1000)
self.assertUpdateStats('container.%s.911.xfer' % exp_method,
4 + 43, app)
def test_basic_req(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_basic_req_second_time(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={
'swift.proxy_access_log_made': True,
'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
self._log_parts(app, should_be_empty=True)
self.assertEquals(resp_body, 'FAKE APP')
def test_multi_segment_resp(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(
['some', 'chunks', 'of data']), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'swift.source': 'SOS'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'somechunksof data')
self.assertEquals(log_parts[11], str(len(resp_body)))
self.assertUpdateStats('SOS.GET.200.xfer', len(resp_body), app)
def test_log_headers(self):
for conf_key in ['access_log_headers', 'log_headers']:
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(),
{conf_key: 'yes'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
headers = unquote(log_parts[14]).split('\n')
self.assert_('Host: localhost:80' in headers)
def test_access_log_headers_only(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(), {'log_headers': 'yes',
'access_log_headers_only': 'FIRST, seCond'})
app.access_logger = FakeLogger()
req = Request.blank('/',
environ={'REQUEST_METHOD': 'GET'},
headers={'First': '1',
'Second': '2',
'Third': '3'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
headers = unquote(log_parts[14]).split('\n')
self.assert_('First: 1' in headers)
self.assert_('Second: 2' in headers)
self.assert_('Third: 3' not in headers)
self.assert_('Host: localhost:80' not in headers)
def test_upload_size(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(),
{'log_headers': 'yes'})
app.access_logger = FakeLogger()
req = Request.blank(
'/v1/a/c/o/foo',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input': StringIO.StringIO('some stuff')})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[11], str(len('FAKE APP')))
self.assertEquals(log_parts[10], str(len('some stuff')))
self.assertUpdateStats('object.PUT.200.xfer',
len('some stuff') + len('FAKE APP'),
app)
def test_upload_line(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeAppReadline(),
{'log_headers': 'yes'})
app.access_logger = FakeLogger()
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'POST',
'wsgi.input': StringIO.StringIO(
'some stuff\nsome other stuff\n')})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[11], str(len('FAKE APP')))
self.assertEquals(log_parts[10], str(len('some stuff\n')))
self.assertUpdateStats('container.POST.200.xfer',
len('some stuff\n') + len('FAKE APP'),
app)
def test_log_query_string(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'x=3'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(unquote(log_parts[4]), '/?x=3')
def test_client_logging(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'REMOTE_ADDR': '1.2.3.4'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[0], '1.2.3.4') # client ip
self.assertEquals(log_parts[1], '1.2.3.4') # remote addr
def test_proxy_client_logging(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={
'REQUEST_METHOD': 'GET',
'REMOTE_ADDR': '1.2.3.4',
'HTTP_X_FORWARDED_FOR': '4.5.6.7,8.9.10.11'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[0], '4.5.6.7') # client ip
self.assertEquals(log_parts[1], '1.2.3.4') # remote addr
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={
'REQUEST_METHOD': 'GET',
'REMOTE_ADDR': '1.2.3.4',
'HTTP_X_CLUSTER_CLIENT_IP': '4.5.6.7'})
resp = app(req.environ, start_response)
# exhaust generator
[x for x in resp]
log_parts = self._log_parts(app)
self.assertEquals(log_parts[0], '4.5.6.7') # client ip
self.assertEquals(log_parts[1], '1.2.3.4') # remote addr
def test_facility(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(),
{'log_headers': 'yes',
'access_log_facility': 'LOG_LOCAL7'})
handler = get_logger.handler4logger[app.access_logger.logger]
self.assertEquals(SysLogHandler.LOG_LOCAL7, handler.facility)
def test_filter(self):
factory = proxy_logging.filter_factory({})
self.assert_(callable(factory))
self.assert_(callable(factory(FakeApp())))
def test_unread_body(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(['some', 'stuff']), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
# read first chunk
next(resp)
resp.close() # raise a GeneratorExit in middleware app_iter loop
log_parts = self._log_parts(app)
self.assertEquals(log_parts[6], '499')
self.assertEquals(log_parts[11], '4') # write length
def test_disconnect_on_readline(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeAppReadline(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'wsgi.input': FileLikeExceptor()})
try:
resp = app(req.environ, start_response)
# read body
''.join(resp)
except IOError:
pass
log_parts = self._log_parts(app)
self.assertEquals(log_parts[6], '499')
self.assertEquals(log_parts[10], '-') # read length
def test_disconnect_on_read(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeApp(['some', 'stuff']), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'wsgi.input': FileLikeExceptor()})
try:
resp = app(req.environ, start_response)
# read body
''.join(resp)
except IOError:
pass
log_parts = self._log_parts(app)
self.assertEquals(log_parts[6], '499')
self.assertEquals(log_parts[10], '-') # read length
def test_app_exception(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeAppThatExcepts(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
try:
app(req.environ, start_response)
except Exception:
pass
log_parts = self._log_parts(app)
self.assertEquals(log_parts[6], '500')
self.assertEquals(log_parts[10], '-') # read length
def test_no_content_length_no_transfer_encoding_with_list_body(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeAppNoContentLengthNoTransferEncoding(
# test the "while not chunk: chunk = iterator.next()"
body=['', '', 'line1\n', 'line2\n'],
), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'line1\nline2\n')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_no_content_length_no_transfer_encoding_with_empty_strings(self):
app = proxy_logging.ProxyLoggingMiddleware(
FakeAppNoContentLengthNoTransferEncoding(
# test the "while not chunk: chunk = iterator.next()"
body=['', '', ''],
), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, '')
self.assertEquals(log_parts[11], '-')
def test_no_content_length_no_transfer_encoding_with_generator(self):
class BodyGen(object):
def __init__(self, data):
self.data = data
def __iter__(self):
yield self.data
app = proxy_logging.ProxyLoggingMiddleware(
FakeAppNoContentLengthNoTransferEncoding(
body=BodyGen('abc'),
), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'abc')
self.assertEquals(log_parts[11], '3')
def test_req_path_info_popping(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/v1/something', environ={'REQUEST_METHOD': 'GET'})
req.path_info_pop()
self.assertEquals(req.environ['PATH_INFO'], '/something')
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/v1/something')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_ipv6(self):
ipv6addr = '2001:db8:85a3:8d3:1319:8a2e:370:7348'
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
req.remote_addr = ipv6addr
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[0], ipv6addr)
self.assertEquals(log_parts[1], ipv6addr)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_log_info_none(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
list(app(req.environ, start_response))
log_parts = self._log_parts(app)
self.assertEquals(log_parts[17], '-')
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
req.environ['swift.log_info'] = []
list(app(req.environ, start_response))
log_parts = self._log_parts(app)
self.assertEquals(log_parts[17], '-')
def test_log_info_single(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
req.environ['swift.log_info'] = ['one']
list(app(req.environ, start_response))
log_parts = self._log_parts(app)
self.assertEquals(log_parts[17], 'one')
def test_log_info_multiple(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
req.environ['swift.log_info'] = ['one', 'and two']
list(app(req.environ, start_response))
log_parts = self._log_parts(app)
self.assertEquals(log_parts[17], 'one%2Cand%20two')
def test_log_auth_token(self):
auth_token = 'b05bf940-0464-4c0e-8c70-87717d2d73e8'
# Default - no reveal_sensitive_prefix in config
# No x-auth-token header
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], '-')
# Has x-auth-token header
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], auth_token)
# Truncate to first 8 characters
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': '8'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], '-')
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': '8'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], 'b05bf940...')
# Token length and reveal_sensitive_prefix are same (no truncate)
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': str(len(auth_token))})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], auth_token)
# Don't log x-auth-token
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': '0'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], '-')
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {
'reveal_sensitive_prefix': '0'})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_AUTH_TOKEN': auth_token})
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(log_parts[9], '...')
# Avoids pyflakes error, "local variable 'resp_body' is assigned to
# but never used
self.assertTrue(resp_body is not None)
def test_ensure_fields(self):
app = proxy_logging.ProxyLoggingMiddleware(FakeApp(), {})
app.access_logger = FakeLogger()
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
with mock.patch('time.time',
mock.MagicMock(
side_effect=[10000000.0, 10000001.0])):
resp = app(req.environ, start_response)
resp_body = ''.join(resp)
log_parts = self._log_parts(app)
self.assertEquals(len(log_parts), 20)
self.assertEquals(log_parts[0], '-')
self.assertEquals(log_parts[1], '-')
self.assertEquals(log_parts[2], '26/Apr/1970/17/46/41')
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(log_parts[7], '-')
self.assertEquals(log_parts[8], '-')
self.assertEquals(log_parts[9], '-')
self.assertEquals(log_parts[10], '-')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
self.assertEquals(log_parts[12], '-')
self.assertEquals(log_parts[13], '-')
self.assertEquals(log_parts[14], '-')
self.assertEquals(log_parts[15], '1.0000')
self.assertEquals(log_parts[16], '-')
self.assertEquals(log_parts[17], '-')
self.assertEquals(log_parts[18], '10000000.000000000')
self.assertEquals(log_parts[19], '10000001.000000000')
def test_dual_logging_middlewares(self):
# Since no internal request is being made, outer most proxy logging
# middleware, log1, should have performed the logging.
app = FakeApp()
flg0 = FakeLogger()
env = {}
log0 = proxy_logging.ProxyLoggingMiddleware(app, env, logger=flg0)
flg1 = FakeLogger()
log1 = proxy_logging.ProxyLoggingMiddleware(log0, env, logger=flg1)
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = log1(req.environ, start_response)
resp_body = ''.join(resp)
self._log_parts(log0, should_be_empty=True)
log_parts = self._log_parts(log1)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE APP')
self.assertEquals(log_parts[11], str(len(resp_body)))
def test_dual_logging_middlewares_w_inner(self):
class FakeMiddleware(object):
"""
Fake middleware to make a separate internal request, but construct
the response with different data.
"""
def __init__(self, app, conf):
self.app = app
self.conf = conf
def GET(self, req):
# Make the internal request
ireq = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(ireq.environ, start_response)
resp_body = ''.join(resp)
if resp_body != 'FAKE APP':
return Response(request=req,
body="FAKE APP WAS NOT RETURNED",
content_type="text/plain")
# But our response is different
return Response(request=req, body="FAKE MIDDLEWARE",
content_type="text/plain")
def __call__(self, env, start_response):
req = Request(env)
return self.GET(req)(env, start_response)
# Since an internal request is being made, inner most proxy logging
# middleware, log0, should have performed the logging.
app = FakeApp()
flg0 = FakeLogger()
env = {}
log0 = proxy_logging.ProxyLoggingMiddleware(app, env, logger=flg0)
fake = FakeMiddleware(log0, env)
flg1 = FakeLogger()
log1 = proxy_logging.ProxyLoggingMiddleware(fake, env, logger=flg1)
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = log1(req.environ, start_response)
resp_body = ''.join(resp)
# Inner most logger should have logged the app's response
log_parts = self._log_parts(log0)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(log_parts[11], str(len('FAKE APP')))
# Outer most logger should have logged the other middleware's response
log_parts = self._log_parts(log1)
self.assertEquals(log_parts[3], 'GET')
self.assertEquals(log_parts[4], '/')
self.assertEquals(log_parts[5], 'HTTP/1.0')
self.assertEquals(log_parts[6], '200')
self.assertEquals(resp_body, 'FAKE MIDDLEWARE')
self.assertEquals(log_parts[11], str(len(resp_body)))
if __name__ == '__main__':
unittest.main()
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron_lib.api.definitions import dns as dns_apidef
from neutron_lib.api.definitions import provider_net as pnet
from neutron_lib import context
from oslo_utils import uuidutils
from neutron.objects import ports as port_obj
from neutron.plugins.ml2.extensions import dns_domain_keywords
from neutron.tests.unit.plugins.ml2.extensions import test_dns_integration
PROJECT_ID = uuidutils.generate_uuid()
class DNSDomainKeyworkdsTestCase(
test_dns_integration.DNSIntegrationTestCase):
_extension_drivers = ['dns_domain_keywords']
_expected_dns_domain = "%s.%s" % (PROJECT_ID,
test_dns_integration.DNSDOMAIN)
def _create_port_for_test(self, provider_net=True, dns_domain=True,
dns_name=True, ipv4=True, ipv6=True,
dns_domain_port=False):
net_kwargs = {}
if provider_net:
net_kwargs = {
'arg_list': (pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID,),
pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: '2016',
}
if dns_domain:
net_kwargs[dns_apidef.DNSDOMAIN] = (
"<project_id>.%s" % test_dns_integration.DNSDOMAIN)
net_kwargs['arg_list'] = \
net_kwargs.get('arg_list', ()) + (dns_apidef.DNSDOMAIN,)
net_kwargs['shared'] = True
res = self._create_network(self.fmt, 'test_network', True,
**net_kwargs)
network = self.deserialize(self.fmt, res)
if ipv4:
cidr = '10.0.0.0/24'
self._create_subnet_for_test(network['network']['id'], cidr)
if ipv6:
cidr = 'fd3d:bdd4:da60::/64'
self._create_subnet_for_test(network['network']['id'], cidr)
port_kwargs = {}
if dns_name:
port_kwargs = {
'arg_list': (dns_apidef.DNSNAME,),
dns_apidef.DNSNAME: test_dns_integration.DNSNAME
}
if dns_domain_port:
port_kwargs[dns_apidef.DNSDOMAIN] = (
test_dns_integration.PORTDNSDOMAIN)
port_kwargs['arg_list'] = (port_kwargs.get('arg_list', ()) +
(dns_apidef.DNSDOMAIN,))
res = self._create_port('json', network['network']['id'],
set_context=True, tenant_id=PROJECT_ID,
**port_kwargs)
self.assertEqual(201, res.status_int)
port = self.deserialize(self.fmt, res)['port']
ctx = context.get_admin_context()
dns_data_db = port_obj.PortDNS.get_object(ctx, port_id=port['id'])
return port, dns_data_db
def _update_port_for_test(self, port,
new_dns_name=test_dns_integration.NEWDNSNAME,
new_dns_domain=None, **kwargs):
test_dns_integration.mock_client.reset_mock()
ip_addresses = [netaddr.IPAddress(ip['ip_address'])
for ip in port['fixed_ips']]
records_v4 = [ip for ip in ip_addresses if ip.version == 4]
records_v6 = [ip for ip in ip_addresses if ip.version == 6]
recordsets = []
if records_v4:
recordsets.append({'id': test_dns_integration.V4UUID,
'records': records_v4})
if records_v6:
recordsets.append({'id': test_dns_integration.V6UUID,
'records': records_v6})
test_dns_integration.mock_client.recordsets.list.return_value = (
recordsets)
test_dns_integration.mock_admin_client.reset_mock()
body = {}
if new_dns_name is not None:
body['dns_name'] = new_dns_name
if new_dns_domain is not None:
body[dns_apidef.DNSDOMAIN] = new_dns_domain
body.update(kwargs)
data = {'port': body}
# NOTE(slaweq): Admin context is required here to be able to update
# fixed_ips of the port as by default it is not possible for non-admin
# users
ctx = context.Context(project_id=PROJECT_ID, is_admin=True)
req = self.new_update_request('ports', data, port['id'], context=ctx)
res = req.get_response(self.api)
self.assertEqual(200, res.status_int)
port = self.deserialize(self.fmt, res)['port']
admin_ctx = context.get_admin_context()
dns_data_db = port_obj.PortDNS.get_object(admin_ctx,
port_id=port['id'])
return port, dns_data_db
def _verify_port_dns(self, port, dns_data_db, dns_name=True,
dns_domain=True, ptr_zones=True, delete_records=False,
provider_net=True, dns_driver=True, original_ips=None,
current_dns_name=test_dns_integration.DNSNAME,
previous_dns_name='', dns_domain_port=False,
current_dns_domain=None, previous_dns_domain=None):
current_dns_domain = current_dns_domain or self._expected_dns_domain
previous_dns_domain = previous_dns_domain or self._expected_dns_domain
super(DNSDomainKeyworkdsTestCase, self)._verify_port_dns(
port=port, dns_data_db=dns_data_db, dns_name=dns_name,
dns_domain=dns_domain, ptr_zones=ptr_zones,
delete_records=delete_records, provider_net=provider_net,
dns_driver=dns_driver, original_ips=original_ips,
current_dns_name=current_dns_name,
previous_dns_name=previous_dns_name,
dns_domain_port=dns_domain_port,
current_dns_domain=current_dns_domain,
previous_dns_domain=previous_dns_domain)
def test__parse_dns_domain(self, *mocks):
ctx = context.Context(
project_id=uuidutils.generate_uuid(),
project_name="project",
user_id=uuidutils.generate_uuid(),
user_name="user"
)
domains = [
("<project_id>.<project_name>.<user_id>.<user_name>.domain",
"%s.%s.%s.%s.domain" % (ctx.project_id, ctx.project_name,
ctx.user_id, ctx.user_name)),
("<project_id>.domain",
"%s.domain" % ctx.project_id),
("<project_name>.domain",
"%s.domain" % ctx.project_name),
("<user_id>.domain",
"%s.domain" % ctx.user_id),
("<user_name>.domain",
"%s.domain" % ctx.user_name)]
for domain, expected_domain in domains:
self.assertEqual(
expected_domain,
dns_domain_keywords.DnsDomainKeywordsExtensionDriver.
_parse_dns_domain(ctx, domain))
def test__parse_dns_domain_missing_fields_in_context(self, *mocks):
domain = "<project_id>.<project_name>.<user_id>.<user_name>.domain"
ctx = context.Context(
project_id=uuidutils.generate_uuid(),
project_name=None,
user_id=uuidutils.generate_uuid(),
user_name="user"
)
expected_domain = "%s.<project_name>.%s.%s.domain" % (
ctx.project_id, ctx.user_id, ctx.user_name)
self.assertEqual(
expected_domain,
dns_domain_keywords.DnsDomainKeywordsExtensionDriver.
_parse_dns_domain(ctx, domain))
def test_update_port_with_current_dns_name(self, *mocks):
port, dns_data_db = self._create_port_for_test()
port, dns_data_db = self._update_port_for_test(
port, new_dns_name=test_dns_integration.DNSNAME)
self.assertEqual(test_dns_integration.DNSNAME,
dns_data_db['current_dns_name'])
self.assertEqual(self._expected_dns_domain,
dns_data_db['current_dns_domain'])
self.assertEqual('', dns_data_db['previous_dns_name'])
self.assertEqual('', dns_data_db['previous_dns_domain'])
self.assertFalse(
test_dns_integration.mock_client.recordsets.create.call_args_list)
self.assertFalse(
test_dns_integration.mock_admin_client.recordsets.
create.call_args_list)
self.assertFalse(
test_dns_integration.mock_client.recordsets.delete.call_args_list)
self.assertFalse(
test_dns_integration.mock_admin_client.recordsets.
delete.call_args_list)
def test_update_port_non_dns_name_attribute(self, *mocks):
port, dns_data_db = self._create_port_for_test()
port_name = 'port_name'
kwargs = {'name': port_name}
port, dns_data_db = self._update_port_for_test(port,
new_dns_name=None,
**kwargs)
self.assertEqual(test_dns_integration.DNSNAME,
dns_data_db['current_dns_name'])
self.assertEqual(self._expected_dns_domain,
dns_data_db['current_dns_domain'])
self.assertEqual('', dns_data_db['previous_dns_name'])
self.assertEqual('', dns_data_db['previous_dns_domain'])
self.assertFalse(
test_dns_integration.mock_client.recordsets.create.call_args_list)
self.assertFalse(
test_dns_integration.mock_admin_client.recordsets.
create.call_args_list)
self.assertFalse(
test_dns_integration.mock_client.recordsets.delete.call_args_list)
self.assertFalse(
test_dns_integration.mock_admin_client.recordsets.
delete.call_args_list)
self.assertEqual(port_name, port['name'])
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
connection = db._get_connection()
cursor = connection.cursor()
try:
cursor.execute('select raw_field_order from %s' % db.quote_name('preferences_registrationpreferences'))
connection.close()
except:
connection.close()
# Adding field 'RegistrationPreferences.raw_field_order'
db.add_column('preferences_registrationpreferences', 'raw_field_order',
self.gf('django.db.models.fields.CharField')(default='{}', max_length=1024, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'RegistrationPreferences.raw_field_order'
db.delete_column('preferences_registrationpreferences', 'raw_field_order')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'category.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'category.tag': {
'Meta': {'ordering': "('title',)", 'object_name': 'Tag'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'foundry.blogpost': {
'Meta': {'ordering': "('-created',)", 'object_name': 'BlogPost', '_ormbases': ['jmbo.ModelBase']},
'content': ('ckeditor.fields.RichTextField', [], {}),
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'})
},
'foundry.chatroom': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ChatRoom', '_ormbases': ['jmbo.ModelBase']},
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'})
},
'foundry.column': {
'Meta': {'object_name': 'Column'},
'designation': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'row': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Row']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '8'})
},
'foundry.commentreport': {
'Meta': {'object_name': 'CommentReport'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.FoundryComment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reporter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'foundry.country': {
'Meta': {'ordering': "('title',)", 'object_name': 'Country'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minimum_age': ('django.db.models.fields.PositiveIntegerField', [], {'default': '18'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'foundry.defaultavatar': {
'Meta': {'object_name': 'DefaultAvatar'},
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'defaultavatar_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'foundry.foundrycomment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'FoundryComment', '_ormbases': ['comments.Comment']},
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'in_reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.FoundryComment']", 'null': 'True', 'blank': 'True'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'})
},
'foundry.link': {
'Meta': {'ordering': "('title', 'subtitle')", 'object_name': 'Link'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'link_target_content_type'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'foundry.listing': {
'Meta': {'ordering': "('title', 'subtitle')", 'object_name': 'Listing'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['jmbo.ModelBase']", 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {}),
'display_title_tiled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_syndication': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_per_page': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pinned': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'listing_pinned'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['jmbo.ModelBase']"}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'view_modifier': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'foundry.member': {
'Meta': {'object_name': 'Member', '_ormbases': ['auth.User']},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Country']", 'null': 'True', 'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'member_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'province': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'receive_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receive_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'foundry.menu': {
'Meta': {'ordering': "('title', 'subtitle')", 'object_name': 'Menu'},
'display_title': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.menulinkposition': {
'Meta': {'ordering': "('position',)", 'object_name': 'MenuLinkPosition'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Menu']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {})
},
'foundry.navbar': {
'Meta': {'ordering': "('title', 'subtitle')", 'object_name': 'Navbar'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.navbarlinkposition': {
'Meta': {'ordering': "('position',)", 'object_name': 'NavbarLinkPosition'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'navbar': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Navbar']"}),
'position': ('django.db.models.fields.IntegerField', [], {})
},
'foundry.notification': {
'Meta': {'object_name': 'Notification'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Member']"})
},
'foundry.page': {
'Meta': {'object_name': 'Page'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32'}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'foundry.pageview': {
'Meta': {'object_name': 'PageView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Page']"}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'foundry.row': {
'Meta': {'object_name': 'Row'},
'block_name': ('django.db.models.fields.CharField', [], {'default': "'content'", 'max_length': '32'}),
'has_left_or_right_column': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Page']"})
},
'foundry.tile': {
'Meta': {'object_name': 'Tile'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Column']"}),
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'enable_ajax': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tile_target_content_type'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'jmbo.modelbase': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ModelBase'},
'anonymous_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anonymous_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'comments_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comments_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modelbase_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'likes_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': "orm['category.Category']"}),
'publish_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['publisher.Publisher']", 'null': 'True', 'blank': 'True'}),
'retract_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unpublished'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photo': {
'Meta': {'ordering': "['-date_added']", 'object_name': 'Photo'},
'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photo_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'tags': ('photologue.models.TagField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'title_slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.59999999999999998'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
'publisher.publisher': {
'Meta': {'object_name': 'Publisher'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'secretballot.vote': {
'Meta': {'unique_together': "(('token', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['foundry']
|
|
# Copyright (c) 2016 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cinder Driver for Unity"""
from oslo_config import cfg
from oslo_log import log as logging
from cinder import interface
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.dell_emc.unity import adapter
from cinder.volume.drivers.san.san import san_opts
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
UNITY_OPTS = [
cfg.ListOpt('unity_storage_pool_names',
default=[],
help='A comma-separated list of storage pool names to be '
'used.'),
cfg.ListOpt('unity_io_ports',
default=[],
help='A comma-separated list of iSCSI or FC ports to be used. '
'Each port can be Unix-style glob expressions.'),
cfg.BoolOpt('remove_empty_host',
default=False,
help='To remove the host from Unity when the last LUN is '
'detached from it. By default, it is False.')]
CONF.register_opts(UNITY_OPTS, group=configuration.SHARED_CONF_GROUP)
@interface.volumedriver
class UnityDriver(driver.ManageableVD,
driver.ManageableSnapshotsVD,
driver.BaseVD):
"""Unity Driver.
Version history:
.. code-block:: none
1.0.0 - Initial version
2.0.0 - Add thin clone support
3.0.0 - Add IPv6 support
3.1.0 - Support revert to snapshot API
3.1.1 - Enalbe SSL support
3.1.2 - Fixes bug 1759175 to detach the lun correctly when auto zone
was enabled and the lun was the last one attached to the host.
3.1.3 - Support remove empty host
3.1.4 - Fixes bug 1775518 to make sure driver succeed to initialize
even though the value of unity_io_ports and
unity_storage_pool_names are empty
3.1.5 - Cherry-pick the fix for 1773305 to return the targets which
connect to the logged-out initiators
"""
VERSION = '03.01.05'
VENDOR = 'Dell EMC'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "EMC_UNITY_CI"
def __init__(self, *args, **kwargs):
super(UnityDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(UNITY_OPTS)
self.configuration.append_config_values(san_opts)
protocol = self.configuration.storage_protocol
if protocol.lower() == adapter.PROTOCOL_FC.lower():
self.protocol = adapter.PROTOCOL_FC
self.adapter = adapter.FCAdapter(self.VERSION)
else:
self.protocol = adapter.PROTOCOL_ISCSI
self.adapter = adapter.ISCSIAdapter(self.VERSION)
def do_setup(self, context):
self.adapter.do_setup(self, self.configuration)
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a volume."""
return self.adapter.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
return self.adapter.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
return self.adapter.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
self.adapter.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
self.adapter.delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.adapter.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.adapter.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
@zm_utils.add_fc_zone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
The initiator_target_map is a map that represents the remote wwn(s)
and a list of wwns which are visible to the remote wwn(s).
Example return values:
FC:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
'initiator_target_map': {
'1122334455667788': ['1234567890123',
'0987654321321']
}
}
}
iSCSI:
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqns': ['iqn.2010-10.org.openstack:volume-00001',
'iqn.2010-10.org.openstack:volume-00002'],
'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'],
'target_luns': [1, 1],
}
}
"""
return self.adapter.initialize_connection(volume, connector)
@zm_utils.remove_fc_zone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
return self.adapter.terminate_connection(volume, connector)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
:param refresh: True to get updated data
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
stats = self.adapter.update_volume_stats()
stats['driver_version'] = self.VERSION
stats['vendor_name'] = self.VENDOR
self._stats = stats
def manage_existing(self, volume, existing_ref):
"""Manages an existing LUN in the array.
:param volume: the mapping cinder volume of the Unity LUN.
:param existing_ref: the Unity LUN info.
"""
return self.adapter.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
"""Returns size of volume to be managed by manage_existing."""
return self.adapter.manage_existing_get_size(volume, existing_ref)
def get_pool(self, volume):
"""Returns the pool name of a volume."""
return self.adapter.get_pool_name(volume)
def unmanage(self, volume):
"""Unmanages a volume."""
pass
def backup_use_temp_snapshot(self):
return True
def create_export_snapshot(self, context, snapshot, connector):
"""Creates the mount point of the snapshot for backup.
Not necessary to create on Unity.
"""
pass
def remove_export_snapshot(self, context, snapshot):
"""Deletes the mount point the snapshot for backup.
Not necessary to create on Unity.
"""
pass
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
return self.adapter.initialize_connection_snapshot(snapshot, connector)
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
return self.adapter.terminate_connection_snapshot(snapshot, connector)
def revert_to_snapshot(self, context, volume, snapshot):
"""Reverts a volume to a snapshot."""
return self.adapter.restore_snapshot(volume, snapshot)
|
|
"""
General utility functions.
"""
import json
from pathlib import Path
from typing import Callable
import numpy as np
import talib
from .object import BarData, TickData
from .constant import Exchange, Interval
def extract_vt_symbol(vt_symbol: str):
"""
:return: (symbol, exchange)
"""
symbol, exchange_str = vt_symbol.split(".")
return symbol, Exchange(exchange_str)
def generate_vt_symbol(symbol: str, exchange: Exchange):
return f"{symbol}.{exchange.value}"
def _get_trader_dir(temp_name: str):
"""
Get path where trader is running in.
"""
cwd = Path.cwd()
temp_path = cwd.joinpath(temp_name)
# If .vntrader folder exists in current working directory,
# then use it as trader running path.
if temp_path.exists():
return cwd, temp_path
# Otherwise use home path of system.
home_path = Path.home()
temp_path = home_path.joinpath(temp_name)
# Create .vntrader folder under home path if not exist.
if not temp_path.exists():
temp_path.mkdir()
return home_path, temp_path
TRADER_DIR, TEMP_DIR = _get_trader_dir(".vntrader")
def get_file_path(filename: str):
"""
Get path for temp file with filename.
"""
return TEMP_DIR.joinpath(filename)
def get_folder_path(folder_name: str):
"""
Get path for temp folder with folder name.
"""
folder_path = TEMP_DIR.joinpath(folder_name)
if not folder_path.exists():
folder_path.mkdir()
return folder_path
def get_icon_path(filepath: str, ico_name: str):
"""
Get path for icon file with ico name.
"""
ui_path = Path(filepath).parent
icon_path = ui_path.joinpath("ico", ico_name)
return str(icon_path)
def load_json(filename: str):
"""
Load data from json file in temp path.
"""
filepath = get_file_path(filename)
if filepath.exists():
with open(filepath, mode="r", encoding="UTF-8") as f:
data = json.load(f)
return data
else:
save_json(filename, {})
return {}
def save_json(filename: str, data: dict):
"""
Save data into json file in temp path.
"""
filepath = get_file_path(filename)
with open(filepath, mode="w+", encoding="UTF-8") as f:
json.dump(
data,
f,
indent=4,
ensure_ascii=False
)
def round_to(value: float, target: float):
"""
Round price to price tick value.
"""
rounded = int(round(value / target)) * target
return rounded
class BarGenerator:
"""
For:
1. generating 1 minute bar data from tick data
2. generateing x minute bar/x hour bar data from 1 minute data
Notice:
1. for x minute bar, x must be able to divide 60: 2, 3, 5, 6, 10, 15, 20, 30
2. for x hour bar, x can be any number
"""
def __init__(
self,
on_bar: Callable,
window: int = 0,
on_window_bar: Callable = None,
interval: Interval = Interval.MINUTE
):
"""Constructor"""
self.bar = None
self.on_bar = on_bar
self.interval = interval
self.interval_count = 0
self.window = window
self.window_bar = None
self.on_window_bar = on_window_bar
self.last_tick = None
self.last_bar = None
def update_tick(self, tick: TickData):
"""
Update new tick data into generator.
"""
new_minute = False
# Filter tick data with 0 last price
if not tick.last_price:
return
if not self.bar:
new_minute = True
elif self.bar.datetime.minute != tick.datetime.minute:
self.bar.datetime = self.bar.datetime.replace(
second=0, microsecond=0
)
self.on_bar(self.bar)
new_minute = True
if new_minute:
self.bar = BarData(
symbol=tick.symbol,
exchange=tick.exchange,
interval=Interval.MINUTE,
datetime=tick.datetime,
gateway_name=tick.gateway_name,
open_price=tick.last_price,
high_price=tick.last_price,
low_price=tick.last_price,
close_price=tick.last_price,
open_interest=tick.open_interest
)
else:
self.bar.high_price = max(self.bar.high_price, tick.last_price)
self.bar.low_price = min(self.bar.low_price, tick.last_price)
self.bar.close_price = tick.last_price
self.bar.open_interest = tick.open_interest
self.bar.datetime = tick.datetime
if self.last_tick:
volume_change = tick.volume - self.last_tick.volume
self.bar.volume += max(volume_change, 0)
self.last_tick = tick
def update_bar(self, bar: BarData):
"""
Update 1 minute bar into generator
"""
# If not inited, creaate window bar object
if not self.window_bar:
# Generate timestamp for bar data
if self.interval == Interval.MINUTE:
dt = bar.datetime.replace(second=0, microsecond=0)
else:
dt = bar.datetime.replace(minute=0, second=0, microsecond=0)
self.window_bar = BarData(
symbol=bar.symbol,
exchange=bar.exchange,
datetime=dt,
gateway_name=bar.gateway_name,
open_price=bar.open_price,
high_price=bar.high_price,
low_price=bar.low_price
)
# Otherwise, update high/low price into window bar
else:
self.window_bar.high_price = max(
self.window_bar.high_price, bar.high_price)
self.window_bar.low_price = min(
self.window_bar.low_price, bar.low_price)
# Update close price/volume into window bar
self.window_bar.close_price = bar.close_price
self.window_bar.volume += int(bar.volume)
self.window_bar.open_interest = bar.open_interest
# Check if window bar completed
finished = False
if self.interval == Interval.MINUTE:
# x-minute bar
if not (bar.datetime.minute + 1) % self.window:
finished = True
elif self.interval == Interval.HOUR:
if self.last_bar and bar.datetime.hour != self.last_bar.datetime.hour:
# 1-hour bar
if self.window == 1:
finished = True
# x-hour bar
else:
self.interval_count += 1
if not self.interval_count % self.window:
finished = True
self.interval_count = 0
if finished:
self.on_window_bar(self.window_bar)
self.window_bar = None
# Cache last bar object
self.last_bar = bar
def generate(self):
"""
Generate the bar data and call callback immediately.
"""
self.bar.datetime = self.bar.datetime.replace(
second=0, microsecond=0
)
self.on_bar(self.bar)
self.bar = None
class ArrayManager(object):
"""
For:
1. time series container of bar data
2. calculating technical indicator value
"""
def __init__(self, size=100):
"""Constructor"""
self.count = 0
self.size = size
self.inited = False
self.open_array = np.zeros(size)
self.high_array = np.zeros(size)
self.low_array = np.zeros(size)
self.close_array = np.zeros(size)
self.volume_array = np.zeros(size)
def update_bar(self, bar):
"""
Update new bar data into array manager.
"""
self.count += 1
if not self.inited and self.count >= self.size:
self.inited = True
self.open_array[:-1] = self.open_array[1:]
self.high_array[:-1] = self.high_array[1:]
self.low_array[:-1] = self.low_array[1:]
self.close_array[:-1] = self.close_array[1:]
self.volume_array[:-1] = self.volume_array[1:]
self.open_array[-1] = bar.open_price
self.high_array[-1] = bar.high_price
self.low_array[-1] = bar.low_price
self.close_array[-1] = bar.close_price
self.volume_array[-1] = bar.volume
@property
def open(self):
"""
Get open price time series.
"""
return self.open_array
@property
def high(self):
"""
Get high price time series.
"""
return self.high_array
@property
def low(self):
"""
Get low price time series.
"""
return self.low_array
@property
def close(self):
"""
Get close price time series.
"""
return self.close_array
@property
def volume(self):
"""
Get trading volume time series.
"""
return self.volume_array
def sma(self, n, array=False):
"""
Simple moving average.
"""
result = talib.SMA(self.close, n)
if array:
return result
return result[-1]
def std(self, n, array=False):
"""
Standard deviation
"""
result = talib.STDDEV(self.close, n)
if array:
return result
return result[-1]
def cci(self, n, array=False):
"""
Commodity Channel Index (CCI).
"""
result = talib.CCI(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def atr(self, n, array=False):
"""
Average True Range (ATR).
"""
result = talib.ATR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def rsi(self, n, array=False):
"""
Relative Strenght Index (RSI).
"""
result = talib.RSI(self.close, n)
if array:
return result
return result[-1]
def macd(self, fast_period, slow_period, signal_period, array=False):
"""
MACD.
"""
macd, signal, hist = talib.MACD(
self.close, fast_period, slow_period, signal_period
)
if array:
return macd, signal, hist
return macd[-1], signal[-1], hist[-1]
def adx(self, n, array=False):
"""
ADX.
"""
result = talib.ADX(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def boll(self, n, dev, array=False):
"""
Bollinger Channel.
"""
mid = self.sma(n, array)
std = self.std(n, array)
up = mid + std * dev
down = mid - std * dev
return up, down
def keltner(self, n, dev, array=False):
"""
Keltner Channel.
"""
mid = self.sma(n, array)
atr = self.atr(n, array)
up = mid + atr * dev
down = mid - atr * dev
return up, down
def donchian(self, n, array=False):
"""
Donchian Channel.
"""
up = talib.MAX(self.high, n)
down = talib.MIN(self.low, n)
if array:
return up, down
return up[-1], down[-1]
def virtual(func: "callable"):
"""
mark a function as "virtual", which means that this function can be override.
any base class should use this or @abstractmethod to decorate all functions
that can be (re)implemented by subclasses.
"""
return func
|
|
#! /usr/bin/env python
"""Test the arraymodule.
Roger E. Masse
"""
import unittest
from test import test_support
from weakref import proxy
import array, cStringIO
from cPickle import loads, dumps
class ArraySubclass(array.array):
pass
class ArraySubclassWithKwargs(array.array):
def __init__(self, typecode, newarg=None):
array.array.__init__(typecode)
tests = [] # list to accumulate all tests
typecodes = "cubBhHiIlLfd"
class BadConstructorTest(unittest.TestCase):
def test_constructor(self):
self.assertRaises(TypeError, array.array)
self.assertRaises(TypeError, array.array, spam=42)
self.assertRaises(TypeError, array.array, 'xx')
self.assertRaises(ValueError, array.array, 'x')
tests.append(BadConstructorTest)
class BaseTest(unittest.TestCase):
# Required class attributes (provided by subclasses
# typecode: the typecode to test
# example: an initializer usable in the constructor for this type
# smallerexample: the same length as example, but smaller
# biggerexample: the same length as example, but bigger
# outside: An entry that is not in example
# minitemsize: the minimum guaranteed itemsize
def assertEntryEqual(self, entry1, entry2):
self.assertEqual(entry1, entry2)
def badtypecode(self):
# Return a typecode that is different from our own
return typecodes[(typecodes.index(self.typecode)+1) % len(typecodes)]
def test_constructor(self):
a = array.array(self.typecode)
self.assertEqual(a.typecode, self.typecode)
self.assert_(a.itemsize>=self.minitemsize)
self.assertRaises(TypeError, array.array, self.typecode, None)
def test_len(self):
a = array.array(self.typecode)
a.append(self.example[0])
self.assertEqual(len(a), 1)
a = array.array(self.typecode, self.example)
self.assertEqual(len(a), len(self.example))
def test_buffer_info(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.buffer_info, 42)
bi = a.buffer_info()
self.assert_(isinstance(bi, tuple))
self.assertEqual(len(bi), 2)
self.assert_(isinstance(bi[0], (int, long)))
self.assert_(isinstance(bi[1], int))
self.assertEqual(bi[1], len(a))
def test_byteswap(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.byteswap, 42)
if a.itemsize in (1, 2, 4, 8):
b = array.array(self.typecode, self.example)
b.byteswap()
if a.itemsize==1:
self.assertEqual(a, b)
else:
self.assertNotEqual(a, b)
b.byteswap()
self.assertEqual(a, b)
def test_copy(self):
import copy
a = array.array(self.typecode, self.example)
b = copy.copy(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_deepcopy(self):
import copy
a = array.array(self.typecode, self.example)
b = copy.deepcopy(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_pickle(self):
for protocol in (0, 1, 2):
a = array.array(self.typecode, self.example)
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
a = ArraySubclass(self.typecode, self.example)
a.x = 10
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(type(a), type(b))
def test_pickle_for_empty_array(self):
for protocol in (0, 1, 2):
a = array.array(self.typecode)
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
a = ArraySubclass(self.typecode)
a.x = 10
b = loads(dumps(a, protocol))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(type(a), type(b))
def test_insert(self):
a = array.array(self.typecode, self.example)
a.insert(0, self.example[0])
self.assertEqual(len(a), 1+len(self.example))
self.assertEqual(a[0], a[1])
self.assertRaises(TypeError, a.insert)
self.assertRaises(TypeError, a.insert, None)
self.assertRaises(TypeError, a.insert, 0, None)
a = array.array(self.typecode, self.example)
a.insert(-1, self.example[0])
self.assertEqual(
a,
array.array(
self.typecode,
self.example[:-1] + self.example[:1] + self.example[-1:]
)
)
a = array.array(self.typecode, self.example)
a.insert(-1000, self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example)
)
a = array.array(self.typecode, self.example)
a.insert(1000, self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[:1])
)
def test_tofromfile(self):
a = array.array(self.typecode, 2*self.example)
self.assertRaises(TypeError, a.tofile)
self.assertRaises(TypeError, a.tofile, cStringIO.StringIO())
test_support.unlink(test_support.TESTFN)
f = open(test_support.TESTFN, 'wb')
try:
a.tofile(f)
f.close()
b = array.array(self.typecode)
f = open(test_support.TESTFN, 'rb')
self.assertRaises(TypeError, b.fromfile)
self.assertRaises(
TypeError,
b.fromfile,
cStringIO.StringIO(), len(self.example)
)
b.fromfile(f, len(self.example))
self.assertEqual(b, array.array(self.typecode, self.example))
self.assertNotEqual(a, b)
b.fromfile(f, len(self.example))
self.assertEqual(a, b)
self.assertRaises(EOFError, b.fromfile, f, 1)
f.close()
finally:
if not f.closed:
f.close()
test_support.unlink(test_support.TESTFN)
def test_tofromlist(self):
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tolist, 42)
self.assertRaises(TypeError, b.fromlist)
self.assertRaises(TypeError, b.fromlist, 42)
self.assertRaises(TypeError, b.fromlist, [None])
b.fromlist(a.tolist())
self.assertEqual(a, b)
def test_tofromstring(self):
a = array.array(self.typecode, 2*self.example)
b = array.array(self.typecode)
self.assertRaises(TypeError, a.tostring, 42)
self.assertRaises(TypeError, b.fromstring)
self.assertRaises(TypeError, b.fromstring, 42)
b.fromstring(a.tostring())
self.assertEqual(a, b)
if a.itemsize>1:
self.assertRaises(ValueError, b.fromstring, "x")
def test_repr(self):
a = array.array(self.typecode, 2*self.example)
self.assertEqual(a, eval(repr(a), {"array": array.array}))
a = array.array(self.typecode)
self.assertEqual(repr(a), "array('%s')" % self.typecode)
def test_str(self):
a = array.array(self.typecode, 2*self.example)
str(a)
def test_cmp(self):
a = array.array(self.typecode, self.example)
self.assert_((a == 42) is False)
self.assert_((a != 42) is True)
self.assert_((a == a) is True)
self.assert_((a != a) is False)
self.assert_((a < a) is False)
self.assert_((a <= a) is True)
self.assert_((a > a) is False)
self.assert_((a >= a) is True)
al = array.array(self.typecode, self.smallerexample)
ab = array.array(self.typecode, self.biggerexample)
self.assert_((a == 2*a) is False)
self.assert_((a != 2*a) is True)
self.assert_((a < 2*a) is True)
self.assert_((a <= 2*a) is True)
self.assert_((a > 2*a) is False)
self.assert_((a >= 2*a) is False)
self.assert_((a == al) is False)
self.assert_((a != al) is True)
self.assert_((a < al) is False)
self.assert_((a <= al) is False)
self.assert_((a > al) is True)
self.assert_((a >= al) is True)
self.assert_((a == ab) is False)
self.assert_((a != ab) is True)
self.assert_((a < ab) is True)
self.assert_((a <= ab) is True)
self.assert_((a > ab) is False)
self.assert_((a >= ab) is False)
def test_add(self):
a = array.array(self.typecode, self.example) \
+ array.array(self.typecode, self.example[::-1])
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[::-1])
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__add__, b)
self.assertRaises(TypeError, a.__add__, "bad")
def test_iadd(self):
a = array.array(self.typecode, self.example[::-1])
b = a
a += array.array(self.typecode, 2*self.example)
self.assert_(a is b)
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1]+2*self.example)
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__add__, b)
self.assertRaises(TypeError, a.__iadd__, "bad")
def test_mul(self):
a = 5*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a = array.array(self.typecode, self.example)*5
self.assertEqual(
a,
array.array(self.typecode, self.example*5)
)
a = 0*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode)
)
a = (-1)*array.array(self.typecode, self.example)
self.assertEqual(
a,
array.array(self.typecode)
)
self.assertRaises(TypeError, a.__mul__, "bad")
def test_imul(self):
a = array.array(self.typecode, self.example)
b = a
a *= 5
self.assert_(a is b)
self.assertEqual(
a,
array.array(self.typecode, 5*self.example)
)
a *= 0
self.assert_(a is b)
self.assertEqual(a, array.array(self.typecode))
a *= 1000
self.assert_(a is b)
self.assertEqual(a, array.array(self.typecode))
a *= -1
self.assert_(a is b)
self.assertEqual(a, array.array(self.typecode))
a = array.array(self.typecode, self.example)
a *= -1
self.assertEqual(a, array.array(self.typecode))
self.assertRaises(TypeError, a.__imul__, "bad")
def test_getitem(self):
a = array.array(self.typecode, self.example)
self.assertEntryEqual(a[0], self.example[0])
self.assertEntryEqual(a[0L], self.example[0])
self.assertEntryEqual(a[-1], self.example[-1])
self.assertEntryEqual(a[-1L], self.example[-1])
self.assertEntryEqual(a[len(self.example)-1], self.example[-1])
self.assertEntryEqual(a[-len(self.example)], self.example[0])
self.assertRaises(TypeError, a.__getitem__)
self.assertRaises(IndexError, a.__getitem__, len(self.example))
self.assertRaises(IndexError, a.__getitem__, -len(self.example)-1)
def test_setitem(self):
a = array.array(self.typecode, self.example)
a[0] = a[-1]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[0L] = a[-1]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-1L] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[len(self.example)-1] = a[0]
self.assertEntryEqual(a[0], a[-1])
a = array.array(self.typecode, self.example)
a[-len(self.example)] = a[-1]
self.assertEntryEqual(a[0], a[-1])
self.assertRaises(TypeError, a.__setitem__)
self.assertRaises(TypeError, a.__setitem__, None)
self.assertRaises(TypeError, a.__setitem__, 0, None)
self.assertRaises(
IndexError,
a.__setitem__,
len(self.example), self.example[0]
)
self.assertRaises(
IndexError,
a.__setitem__,
-len(self.example)-1, self.example[0]
)
def test_delitem(self):
a = array.array(self.typecode, self.example)
del a[0]
self.assertEqual(
a,
array.array(self.typecode, self.example[1:])
)
a = array.array(self.typecode, self.example)
del a[-1]
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1])
)
a = array.array(self.typecode, self.example)
del a[len(self.example)-1]
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1])
)
a = array.array(self.typecode, self.example)
del a[-len(self.example)]
self.assertEqual(
a,
array.array(self.typecode, self.example[1:])
)
self.assertRaises(TypeError, a.__delitem__)
self.assertRaises(TypeError, a.__delitem__, None)
self.assertRaises(IndexError, a.__delitem__, len(self.example))
self.assertRaises(IndexError, a.__delitem__, -len(self.example)-1)
def test_getslice(self):
a = array.array(self.typecode, self.example)
self.assertEqual(a[:], a)
self.assertEqual(
a[1:],
array.array(self.typecode, self.example[1:])
)
self.assertEqual(
a[:1],
array.array(self.typecode, self.example[:1])
)
self.assertEqual(
a[:-1],
array.array(self.typecode, self.example[:-1])
)
self.assertEqual(
a[-1:],
array.array(self.typecode, self.example[-1:])
)
self.assertEqual(
a[-1:-1],
array.array(self.typecode)
)
self.assertEqual(
a[2:1],
array.array(self.typecode)
)
self.assertEqual(
a[1000:],
array.array(self.typecode)
)
self.assertEqual(a[-1000:], a)
self.assertEqual(a[:1000], a)
self.assertEqual(
a[:-1000],
array.array(self.typecode)
)
self.assertEqual(a[-1000:1000], a)
self.assertEqual(
a[2000:1000],
array.array(self.typecode)
)
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing
# (Assumes list conversion works correctly, too)
a = array.array(self.typecode, self.example)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Everything except the initial 0 (invalid step)
for step in indices[1:]:
self.assertEqual(list(a[start:stop:step]),
list(a)[start:stop:step])
def test_setslice(self):
a = array.array(self.typecode, self.example)
a[:1] = a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[1:])
)
a = array.array(self.typecode, self.example)
a[:-1] = a
self.assertEqual(
a,
array.array(self.typecode, self.example + self.example[-1:])
)
a = array.array(self.typecode, self.example)
a[-1:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:-1] + self.example)
)
a = array.array(self.typecode, self.example)
a[1:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example)
)
a = array.array(self.typecode, self.example)
a[1:-1] = a
self.assertEqual(
a,
array.array(
self.typecode,
self.example[:1] + self.example + self.example[-1:]
)
)
a = array.array(self.typecode, self.example)
a[1000:] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
a[-1000:] = a
self.assertEqual(
a,
array.array(self.typecode, self.example)
)
a = array.array(self.typecode, self.example)
a[:1000] = a
self.assertEqual(
a,
array.array(self.typecode, self.example)
)
a = array.array(self.typecode, self.example)
a[:-1000] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
a[1:0] = a
self.assertEqual(
a,
array.array(self.typecode, self.example[:1] + self.example + self.example[1:])
)
a = array.array(self.typecode, self.example)
a[2000:1000] = a
self.assertEqual(
a,
array.array(self.typecode, 2*self.example)
)
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.__setslice__, 0, 0, None)
self.assertRaises(TypeError, a.__setitem__, slice(0, 0), None)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1), None)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.__setslice__, 0, 0, b)
self.assertRaises(TypeError, a.__setitem__, slice(0, 0), b)
self.assertRaises(TypeError, a.__setitem__, slice(0, 1), b)
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Everything except the initial 0 (invalid step)
for step in indices[1:]:
a = array.array(self.typecode, self.example)
L = list(a)
# Make sure we have a slice of exactly the right length,
# but with (hopefully) different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
a[start:stop:step] = array.array(self.typecode, data)
self.assertEquals(a, array.array(self.typecode, L))
del L[start:stop:step]
del a[start:stop:step]
self.assertEquals(a, array.array(self.typecode, L))
def test_index(self):
example = 2*self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.index)
for x in example:
self.assertEqual(a.index(x), example.index(x))
self.assertRaises(ValueError, a.index, None)
self.assertRaises(ValueError, a.index, self.outside)
def test_count(self):
example = 2*self.example
a = array.array(self.typecode, example)
self.assertRaises(TypeError, a.count)
for x in example:
self.assertEqual(a.count(x), example.count(x))
self.assertEqual(a.count(self.outside), 0)
self.assertEqual(a.count(None), 0)
def test_remove(self):
for x in self.example:
example = 2*self.example
a = array.array(self.typecode, example)
pos = example.index(x)
example2 = example[:pos] + example[pos+1:]
a.remove(x)
self.assertEqual(a, array.array(self.typecode, example2))
a = array.array(self.typecode, self.example)
self.assertRaises(ValueError, a.remove, self.outside)
self.assertRaises(ValueError, a.remove, None)
def test_pop(self):
a = array.array(self.typecode)
self.assertRaises(IndexError, a.pop)
a = array.array(self.typecode, 2*self.example)
self.assertRaises(TypeError, a.pop, 42, 42)
self.assertRaises(TypeError, a.pop, None)
self.assertRaises(IndexError, a.pop, len(a))
self.assertRaises(IndexError, a.pop, -len(a)-1)
self.assertEntryEqual(a.pop(0), self.example[0])
self.assertEqual(
a,
array.array(self.typecode, self.example[1:]+self.example)
)
self.assertEntryEqual(a.pop(1), self.example[2])
self.assertEqual(
a,
array.array(self.typecode, self.example[1:2]+self.example[3:]+self.example)
)
self.assertEntryEqual(a.pop(0), self.example[1])
self.assertEntryEqual(a.pop(), self.example[-1])
self.assertEqual(
a,
array.array(self.typecode, self.example[3:]+self.example[:-1])
)
def test_reverse(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.reverse, 42)
a.reverse()
self.assertEqual(
a,
array.array(self.typecode, self.example[::-1])
)
def test_extend(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.extend)
a.extend(array.array(self.typecode, self.example[::-1]))
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
b = array.array(self.badtypecode())
self.assertRaises(TypeError, a.extend, b)
a = array.array(self.typecode, self.example)
a.extend(self.example[::-1])
self.assertEqual(
a,
array.array(self.typecode, self.example+self.example[::-1])
)
def test_constructor_with_iterable_argument(self):
a = array.array(self.typecode, iter(self.example))
b = array.array(self.typecode, self.example)
self.assertEqual(a, b)
# non-iterable argument
self.assertRaises(TypeError, array.array, self.typecode, 10)
# pass through errors raised in __iter__
class A:
def __iter__(self):
raise UnicodeError
self.assertRaises(UnicodeError, array.array, self.typecode, A())
# pass through errors raised in next()
def B():
raise UnicodeError
yield None
self.assertRaises(UnicodeError, array.array, self.typecode, B())
def test_coveritertraverse(self):
try:
import gc
except ImportError:
return
a = array.array(self.typecode)
l = [iter(a)]
l.append(l)
gc.collect()
def test_buffer(self):
a = array.array(self.typecode, self.example)
b = buffer(a)
self.assertEqual(b[0], a.tostring()[0])
def test_weakref(self):
s = array.array(self.typecode, self.example)
p = proxy(s)
self.assertEqual(p.tostring(), s.tostring())
s = None
self.assertRaises(ReferenceError, len, p)
def test_bug_782369(self):
import sys
if hasattr(sys, "getrefcount"):
for i in range(10):
b = array.array('B', range(64))
rc = sys.getrefcount(10)
for i in range(10):
b = array.array('B', range(64))
self.assertEqual(rc, sys.getrefcount(10))
def test_subclass_with_kwargs(self):
# SF bug #1486663 -- this used to erroneously raise a TypeError
ArraySubclassWithKwargs('b', newarg=1)
class StringTest(BaseTest):
def test_setitem(self):
super(StringTest, self).test_setitem()
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.__setitem__, 0, self.example[:2])
class CharacterTest(StringTest):
typecode = 'c'
example = '\x01azAZ\x00\xfe'
smallerexample = '\x01azAY\x00\xfe'
biggerexample = '\x01azAZ\x00\xff'
outside = '\x33'
minitemsize = 1
def test_subbclassing(self):
class EditableString(array.array):
def __new__(cls, s, *args, **kwargs):
return array.array.__new__(cls, 'c', s)
def __init__(self, s, color='blue'):
self.color = color
def strip(self):
self[:] = array.array('c', self.tostring().strip())
def __repr__(self):
return 'EditableString(%r)' % self.tostring()
s = EditableString("\ttest\r\n")
s.strip()
self.assertEqual(s.tostring(), "test")
self.assertEqual(s.color, "blue")
s.color = "red"
self.assertEqual(s.color, "red")
self.assertEqual(s.__dict__.keys(), ["color"])
def test_nounicode(self):
a = array.array(self.typecode, self.example)
self.assertRaises(ValueError, a.fromunicode, unicode(''))
self.assertRaises(ValueError, a.tounicode)
tests.append(CharacterTest)
if test_support.have_unicode:
class UnicodeTest(StringTest):
typecode = 'u'
example = unicode(r'\x01\u263a\x00\ufeff', 'unicode-escape')
smallerexample = unicode(r'\x01\u263a\x00\ufefe', 'unicode-escape')
biggerexample = unicode(r'\x01\u263a\x01\ufeff', 'unicode-escape')
outside = unicode('\x33')
minitemsize = 2
def test_unicode(self):
self.assertRaises(TypeError, array.array, 'b', unicode('foo', 'ascii'))
a = array.array('u', unicode(r'\xa0\xc2\u1234', 'unicode-escape'))
a.fromunicode(unicode(' ', 'ascii'))
a.fromunicode(unicode('', 'ascii'))
a.fromunicode(unicode('', 'ascii'))
a.fromunicode(unicode(r'\x11abc\xff\u1234', 'unicode-escape'))
s = a.tounicode()
self.assertEqual(
s,
unicode(r'\xa0\xc2\u1234 \x11abc\xff\u1234', 'unicode-escape')
)
s = unicode(r'\x00="\'a\\b\x80\xff\u0000\u0001\u1234', 'unicode-escape')
a = array.array('u', s)
self.assertEqual(
repr(a),
r"""array('u', u'\x00="\'a\\b\x80\xff\x00\x01\u1234')"""
)
self.assertRaises(TypeError, a.fromunicode)
tests.append(UnicodeTest)
class NumberTest(BaseTest):
def test_extslice(self):
a = array.array(self.typecode, range(5))
self.assertEqual(a[::], a)
self.assertEqual(a[::2], array.array(self.typecode, [0,2,4]))
self.assertEqual(a[1::2], array.array(self.typecode, [1,3]))
self.assertEqual(a[::-1], array.array(self.typecode, [4,3,2,1,0]))
self.assertEqual(a[::-2], array.array(self.typecode, [4,2,0]))
self.assertEqual(a[3::-2], array.array(self.typecode, [3,1]))
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100L:100L:2L], array.array(self.typecode, [0,2,4]))
self.assertEqual(a[1000:2000:2], array.array(self.typecode, []))
self.assertEqual(a[-1000:-2000:-2], array.array(self.typecode, []))
def test_delslice(self):
a = array.array(self.typecode, range(5))
del a[::2]
self.assertEqual(a, array.array(self.typecode, [1,3]))
a = array.array(self.typecode, range(5))
del a[1::2]
self.assertEqual(a, array.array(self.typecode, [0,2,4]))
a = array.array(self.typecode, range(5))
del a[1::-2]
self.assertEqual(a, array.array(self.typecode, [0,2,3,4]))
a = array.array(self.typecode, range(10))
del a[::1000]
self.assertEqual(a, array.array(self.typecode, [1,2,3,4,5,6,7,8,9]))
def test_assignment(self):
a = array.array(self.typecode, range(10))
a[::2] = array.array(self.typecode, [42]*5)
self.assertEqual(a, array.array(self.typecode, [42, 1, 42, 3, 42, 5, 42, 7, 42, 9]))
a = array.array(self.typecode, range(10))
a[::-4] = array.array(self.typecode, [10]*3)
self.assertEqual(a, array.array(self.typecode, [0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = array.array(self.typecode, range(4))
a[::-1] = a
self.assertEqual(a, array.array(self.typecode, [3, 2, 1, 0]))
a = array.array(self.typecode, range(10))
b = a[:]
c = a[:]
ins = array.array(self.typecode, range(2))
a[2:3] = ins
b[slice(2,3)] = ins
c[2:3:] = ins
def test_iterationcontains(self):
a = array.array(self.typecode, range(10))
self.assertEqual(list(a), range(10))
b = array.array(self.typecode, [20])
self.assertEqual(a[-1] in a, True)
self.assertEqual(b[0] not in a, True)
def check_overflow(self, lower, upper):
# method to be used by subclasses
# should not overflow assigning lower limit
a = array.array(self.typecode, [lower])
a[0] = lower
# should overflow assigning less than lower limit
self.assertRaises(OverflowError, array.array, self.typecode, [lower-1])
self.assertRaises(OverflowError, a.__setitem__, 0, lower-1)
# should not overflow assigning upper limit
a = array.array(self.typecode, [upper])
a[0] = upper
# should overflow assigning more than upper limit
self.assertRaises(OverflowError, array.array, self.typecode, [upper+1])
self.assertRaises(OverflowError, a.__setitem__, 0, upper+1)
def test_subclassing(self):
typecode = self.typecode
class ExaggeratingArray(array.array):
__slots__ = ['offset']
def __new__(cls, typecode, data, offset):
return array.array.__new__(cls, typecode, data)
def __init__(self, typecode, data, offset):
self.offset = offset
def __getitem__(self, i):
return array.array.__getitem__(self, i) + self.offset
a = ExaggeratingArray(self.typecode, [3, 6, 7, 11], 4)
self.assertEntryEqual(a[0], 7)
self.assertRaises(AttributeError, setattr, a, "color", "blue")
class SignedNumberTest(NumberTest):
example = [-1, 0, 1, 42, 0x7f]
smallerexample = [-1, 0, 1, 42, 0x7e]
biggerexample = [-1, 0, 1, 43, 0x7f]
outside = 23
def test_overflow(self):
a = array.array(self.typecode)
lower = -1 * long(pow(2, a.itemsize * 8 - 1))
upper = long(pow(2, a.itemsize * 8 - 1)) - 1L
self.check_overflow(lower, upper)
class UnsignedNumberTest(NumberTest):
example = [0, 1, 17, 23, 42, 0xff]
smallerexample = [0, 1, 17, 23, 42, 0xfe]
biggerexample = [0, 1, 17, 23, 43, 0xff]
outside = 0xaa
def test_overflow(self):
a = array.array(self.typecode)
lower = 0
upper = long(pow(2, a.itemsize * 8)) - 1L
self.check_overflow(lower, upper)
class ByteTest(SignedNumberTest):
typecode = 'b'
minitemsize = 1
tests.append(ByteTest)
class UnsignedByteTest(UnsignedNumberTest):
typecode = 'B'
minitemsize = 1
tests.append(UnsignedByteTest)
class ShortTest(SignedNumberTest):
typecode = 'h'
minitemsize = 2
tests.append(ShortTest)
class UnsignedShortTest(UnsignedNumberTest):
typecode = 'H'
minitemsize = 2
tests.append(UnsignedShortTest)
class IntTest(SignedNumberTest):
typecode = 'i'
minitemsize = 2
tests.append(IntTest)
class UnsignedIntTest(UnsignedNumberTest):
typecode = 'I'
minitemsize = 2
tests.append(UnsignedIntTest)
class LongTest(SignedNumberTest):
typecode = 'l'
minitemsize = 4
tests.append(LongTest)
class UnsignedLongTest(UnsignedNumberTest):
typecode = 'L'
minitemsize = 4
tests.append(UnsignedLongTest)
class FPTest(NumberTest):
example = [-42.0, 0, 42, 1e5, -1e10]
smallerexample = [-42.0, 0, 42, 1e5, -2e10]
biggerexample = [-42.0, 0, 42, 1e5, 1e10]
outside = 23
def assertEntryEqual(self, entry1, entry2):
self.assertAlmostEqual(entry1, entry2)
def test_byteswap(self):
a = array.array(self.typecode, self.example)
self.assertRaises(TypeError, a.byteswap, 42)
if a.itemsize in (1, 2, 4, 8):
b = array.array(self.typecode, self.example)
b.byteswap()
if a.itemsize==1:
self.assertEqual(a, b)
else:
# On alphas treating the byte swapped bit patters as
# floats/doubles results in floating point exceptions
# => compare the 8bit string values instead
self.assertNotEqual(a.tostring(), b.tostring())
b.byteswap()
self.assertEqual(a, b)
class FloatTest(FPTest):
typecode = 'f'
minitemsize = 4
tests.append(FloatTest)
class DoubleTest(FPTest):
typecode = 'd'
minitemsize = 8
def test_alloc_overflow(self):
from sys import maxsize
a = array.array('d', [-1]*65536)
try:
a *= maxsize//65536 + 1
except MemoryError:
pass
else:
self.fail("Array of size > maxsize created - MemoryError expected")
b = array.array('d', [ 2.71828183, 3.14159265, -1])
try:
b * (maxsize//3 + 1)
except MemoryError:
pass
else:
self.fail("Array of size > maxsize created - MemoryError expected")
tests.append(DoubleTest)
def test_main(verbose=None):
import sys
test_support.run_unittest(*tests)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*tests)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
|
|
from __future__ import unicode_literals
import collections
import datetime
import decimal
import functools
import math
import types
import uuid
from importlib import import_module
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject
from django.utils import datetime_safe, six
from django.utils.encoding import force_text
from django.utils.functional import LazyObject, Promise
from django.utils.timezone import utc
from django.utils.version import get_docs_version
try:
import enum
except ImportError:
# No support on Python 2 if enum34 isn't installed.
enum = None
class BaseSerializer(object):
def __init__(self, value):
self.value = value
def serialize(self):
raise NotImplementedError('Subclasses of BaseSerializer must implement the serialize() method.')
class BaseSequenceSerializer(BaseSerializer):
def _format(self):
raise NotImplementedError('Subclasses of BaseSequenceSerializer must implement the _format() method.')
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
value = self._format()
return value % (", ".join(strings)), imports
class BaseSimpleSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), set()
class ByteTypeSerializer(BaseSerializer):
def serialize(self):
value_repr = repr(self.value)
if six.PY2:
# Prepend the `b` prefix since we're importing unicode_literals
value_repr = 'b' + value_repr
return value_repr, set()
class DatetimeSerializer(BaseSerializer):
def serialize(self):
if self.value.tzinfo is not None and self.value.tzinfo != utc:
self.value = self.value.astimezone(utc)
value_repr = repr(self.value).replace("<UTC>", "utc")
if isinstance(self.value, datetime_safe.datetime):
value_repr = "datetime.%s" % value_repr
imports = ["import datetime"]
if self.value.tzinfo is not None:
imports.append("from django.utils.timezone import utc")
return value_repr, set(imports)
class DateSerializer(BaseSerializer):
def serialize(self):
value_repr = repr(self.value)
if isinstance(self.value, datetime_safe.date):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
class DecimalSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), {"from decimal import Decimal"}
class DeconstructableSerializer(BaseSerializer):
@staticmethod
def serialize_deconstructed(path, args, kwargs):
name, imports = DeconstructableSerializer._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = serializer_factory(arg).serialize()
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in sorted(kwargs.items()):
arg_string, arg_imports = serializer_factory(arg).serialize()
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@staticmethod
def _serialize_path(path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
def serialize(self):
return self.serialize_deconstructed(*self.value.deconstruct())
class DictionarySerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for k, v in sorted(self.value.items()):
k_string, k_imports = serializer_factory(k).serialize()
v_string, v_imports = serializer_factory(v).serialize()
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
class EnumSerializer(BaseSerializer):
def serialize(self):
enum_class = self.value.__class__
module = enum_class.__module__
imports = {"import %s" % module}
v_string, v_imports = serializer_factory(self.value.value).serialize()
imports.update(v_imports)
return "%s.%s(%s)" % (module, enum_class.__name__, v_string), imports
class FloatSerializer(BaseSimpleSerializer):
def serialize(self):
if math.isnan(self.value) or math.isinf(self.value):
return 'float("{}")'.format(self.value), set()
return super(FloatSerializer, self).serialize()
class FrozensetSerializer(BaseSequenceSerializer):
def _format(self):
return "frozenset([%s])"
class FunctionTypeSerializer(BaseSerializer):
def serialize(self):
if getattr(self.value, "__self__", None) and isinstance(self.value.__self__, type):
klass = self.value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), {"import %s" % module}
# Further error checking
if self.value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if self.value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % self.value)
# Python 3 is a lot easier, and only uses this branch if it's not local.
if getattr(self.value, "__qualname__", None) and getattr(self.value, "__module__", None):
if "<" not in self.value.__qualname__: # Qualname can include <locals>
return "%s.%s" % \
(self.value.__module__, self.value.__qualname__), {"import %s" % self.value.__module__}
# Python 2/fallback version
module_name = self.value.__module__
# Make sure it's actually there and not an unbound method
module = import_module(module_name)
if not hasattr(module, self.value.__name__):
raise ValueError(
"Could not find function %s in %s.\n"
"Please note that due to Python 2 limitations, you cannot "
"serialize unbound method functions (e.g. a method "
"declared and used in the same class body). Please move "
"the function into the main module body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (self.value.__name__, module_name, get_docs_version())
)
# Needed on Python 2 only
if module_name == '__builtin__':
return self.value.__name__, set()
return "%s.%s" % (module_name, self.value.__name__), {"import %s" % module_name}
class FunctoolsPartialSerializer(BaseSerializer):
def serialize(self):
imports = {'import functools'}
# Serialize functools.partial() arguments
func_string, func_imports = serializer_factory(self.value.func).serialize()
args_string, args_imports = serializer_factory(self.value.args).serialize()
keywords_string, keywords_imports = serializer_factory(self.value.keywords).serialize()
# Add any imports needed by arguments
imports.update(func_imports)
imports.update(args_imports)
imports.update(keywords_imports)
return (
"functools.partial(%s, *%s, **%s)" % (
func_string, args_string, keywords_string,
),
imports,
)
class IterableSerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
value = "(%s)" if len(strings) != 1 else "(%s,)"
return value % (", ".join(strings)), imports
class ModelFieldSerializer(DeconstructableSerializer):
def serialize(self):
attr_name, path, args, kwargs = self.value.deconstruct()
return self.serialize_deconstructed(path, args, kwargs)
class ModelManagerSerializer(DeconstructableSerializer):
def serialize(self):
as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct()
if as_manager:
name, imports = self._serialize_path(qs_path)
return "%s.as_manager()" % name, imports
else:
return self.serialize_deconstructed(manager_path, args, kwargs)
class OperationSerializer(BaseSerializer):
def serialize(self):
from django.db.migrations.writer import OperationWriter
string, imports = OperationWriter(self.value, indentation=0).serialize()
# Nested operation, trailing comma is handled in upper OperationWriter._write()
return string.rstrip(','), imports
class RegexSerializer(BaseSerializer):
def serialize(self):
imports = {"import re"}
regex_pattern, pattern_imports = serializer_factory(self.value.pattern).serialize()
regex_flags, flag_imports = serializer_factory(self.value.flags).serialize()
imports.update(pattern_imports)
imports.update(flag_imports)
args = [regex_pattern]
if self.value.flags:
args.append(regex_flags)
return "re.compile(%s)" % ', '.join(args), imports
class SequenceSerializer(BaseSequenceSerializer):
def _format(self):
return "[%s]"
class SetSerializer(BaseSequenceSerializer):
def _format(self):
# Don't use the literal "{%s}" as it doesn't support empty set
return "set([%s])"
class SettingsReferenceSerializer(BaseSerializer):
def serialize(self):
return "settings.%s" % self.value.setting_name, {"from django.conf import settings"}
class TextTypeSerializer(BaseSerializer):
def serialize(self):
value_repr = repr(self.value)
if six.PY2:
# Strip the `u` prefix since we're importing unicode_literals
value_repr = value_repr[1:]
return value_repr, set()
class TimedeltaSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), {"import datetime"}
class TimeSerializer(BaseSerializer):
def serialize(self):
value_repr = repr(self.value)
if isinstance(self.value, datetime_safe.time):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
class TupleSerializer(BaseSequenceSerializer):
def _format(self):
# When len(value)==0, the empty tuple should be serialized as "()",
# not "(,)" because (,) is invalid Python syntax.
return "(%s)" if len(self.value) != 1 else "(%s,)"
class TypeSerializer(BaseSerializer):
def serialize(self):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is self.value:
return string, set(imports)
if hasattr(self.value, "__module__"):
module = self.value.__module__
if module == six.moves.builtins.__name__:
return self.value.__name__, set()
else:
return "%s.%s" % (module, self.value.__name__), {"import %s" % module}
class UUIDSerializer(BaseSerializer):
def serialize(self):
return "uuid.%s" % repr(self.value), {"import uuid"}
def serializer_factory(value):
from django.db.migrations.writer import SettingsReference
if isinstance(value, Promise):
value = force_text(value)
elif isinstance(value, LazyObject):
# The unwrapped value is returned as the first item of the arguments
# tuple.
value = value.__reduce__()[1][0]
if isinstance(value, models.Field):
return ModelFieldSerializer(value)
if isinstance(value, models.manager.BaseManager):
return ModelManagerSerializer(value)
if isinstance(value, Operation):
return OperationSerializer(value)
if isinstance(value, type):
return TypeSerializer(value)
# Anything that knows how to deconstruct itself.
if hasattr(value, 'deconstruct'):
return DeconstructableSerializer(value)
# Unfortunately some of these are order-dependent.
if isinstance(value, frozenset):
return FrozensetSerializer(value)
if isinstance(value, list):
return SequenceSerializer(value)
if isinstance(value, set):
return SetSerializer(value)
if isinstance(value, tuple):
return TupleSerializer(value)
if isinstance(value, dict):
return DictionarySerializer(value)
if enum and isinstance(value, enum.Enum):
return EnumSerializer(value)
if isinstance(value, datetime.datetime):
return DatetimeSerializer(value)
if isinstance(value, datetime.date):
return DateSerializer(value)
if isinstance(value, datetime.time):
return TimeSerializer(value)
if isinstance(value, datetime.timedelta):
return TimedeltaSerializer(value)
if isinstance(value, SettingsReference):
return SettingsReferenceSerializer(value)
if isinstance(value, float):
return FloatSerializer(value)
if isinstance(value, six.integer_types + (bool, type(None))):
return BaseSimpleSerializer(value)
if isinstance(value, six.binary_type):
return ByteTypeSerializer(value)
if isinstance(value, six.text_type):
return TextTypeSerializer(value)
if isinstance(value, decimal.Decimal):
return DecimalSerializer(value)
if isinstance(value, functools.partial):
return FunctoolsPartialSerializer(value)
if isinstance(value, (types.FunctionType, types.BuiltinFunctionType, types.MethodType)):
return FunctionTypeSerializer(value)
if isinstance(value, collections.Iterable):
return IterableSerializer(value)
if isinstance(value, (COMPILED_REGEX_TYPE, RegexObject)):
return RegexSerializer(value)
if isinstance(value, uuid.UUID):
return UUIDSerializer(value)
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
|
|
"""
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
import hashlib
import re
import random
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.http import same_origin
from django.utils.log import getLogger
from django.utils.crypto import constant_time_compare, get_random_string
logger = getLogger('django.request')
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
CSRF_KEY_LENGTH = 32
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return get_random_string(CSRF_KEY_LENGTH)
def get_token(request):
"""
Returns the the CSRF token required for a POST form. The token is an
alphanumeric value.
A side effect of calling this function is to make the the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
request.META["CSRF_COOKIE_USED"] = True
return request.META.get("CSRF_COOKIE", None)
def _sanitize_token(token):
# Allow only alphanum, and ensure we return a 'str' for the sake
# of the post processing middleware.
if len(token) > CSRF_KEY_LENGTH:
return _get_new_csrf_key()
token = re.sub('[^a-zA-Z0-9]+', '', str(token.decode('ascii', 'ignore')))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
csrf_token = _sanitize_token(
request.COOKIES[settings.CSRF_COOKIE_NAME])
# Use same token next time
request.META['CSRF_COOKIE'] = csrf_token
except KeyError:
csrf_token = None
# Generate token and store it in the request, so it's
# available to the view.
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RC2616 needs protection
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite.
# It comes after the creation of CSRF cookies, so that
# everything else continues to work exactly the same
# (e.g. cookies are sent, etc.), but before any
# branches that call reject().
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# nonce we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = request.META.get('HTTP_REFERER')
if referer is None:
logger.warning('Forbidden (%s): %s',
REASON_NO_REFERER, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_REFERER)
# Note that request.get_host() includes the port.
good_referer = 'https://%s/' % request.get_host()
if not same_origin(referer, good_referer):
reason = REASON_BAD_REFERER % (referer, good_referer)
logger.warning('Forbidden (%s): %s', reason, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
logger.warning('Forbidden (%s): %s',
REASON_NO_CSRF_COOKIE, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '')
if not constant_time_compare(request_csrf_token, csrf_token):
logger.warning('Forbidden (%s): %s',
REASON_BAD_TOKEN, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
# If CSRF_COOKIE is unset, then CsrfViewMiddleware.process_view was
# never called, probaby because a request middleware returned a response
# (for example, contrib.auth redirecting to a login page).
if request.META.get("CSRF_COOKIE") is None:
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"],
max_age = 60 * 60 * 24 * 7 * 52,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE
)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
|
|
from __future__ import absolute_import
import re
import six
import logging
from collections import namedtuple
from django.core.files.uploadedfile import InMemoryUploadedFile, TemporaryUploadedFile
from symbolic import parse_addr, arch_from_breakpad, arch_from_macho, arch_is_known, ProcessState, id_from_breakpad
from sentry.interfaces.contexts import DeviceContextType
logger = logging.getLogger(__name__)
KNOWN_DSYM_TYPES = {
'iOS': 'macho',
'tvOS': 'macho',
'macOS': 'macho',
'watchOS': 'macho',
}
# Regular expression to parse OS versions from a minidump OS string
VERSION_RE = re.compile(r'(\d+\.\d+\.\d+)\s+(.*)')
# Regular expression to guess whether we're dealing with Windows or Unix paths
WINDOWS_PATH_RE = re.compile(r'^[a-z]:\\', re.IGNORECASE)
# Mapping of well-known minidump OS constants to our internal names
MINIDUMP_OS_TYPES = {
'Mac OS X': 'macOS',
'Windows NT': 'Windows',
}
AppInfo = namedtuple('AppInfo', ['id', 'version', 'build', 'name'])
def image_name(pkg):
split = '\\' if WINDOWS_PATH_RE.match(pkg) else '/'
return pkg.rsplit(split, 1)[-1]
def find_all_stacktraces(data):
"""Given a data dictionary from an event this returns all
relevant stacktraces in a list. If a frame contains a raw_stacktrace
property it's preferred over the processed one.
"""
rv = []
def _probe_for_stacktrace(container):
raw = container.get('raw_stacktrace')
if raw is not None:
rv.append((raw, container))
else:
processed = container.get('stacktrace')
if processed is not None:
rv.append((processed, container))
exc_container = data.get('sentry.interfaces.Exception')
if exc_container:
for exc in exc_container['values']:
_probe_for_stacktrace(exc)
# The legacy stacktrace interface does not support raw stacktraces
stacktrace = data.get('sentry.interfaces.Stacktrace')
if stacktrace:
rv.append((stacktrace, None))
threads = data.get('threads')
if threads:
for thread in threads['values']:
_probe_for_stacktrace(thread)
return rv
def get_sdk_from_event(event):
sdk_info = (event.get('debug_meta') or {}).get('sdk_info')
if sdk_info:
return sdk_info
os = (event.get('contexts') or {}).get('os')
if os and os.get('type') == 'os':
return get_sdk_from_os(os)
def get_sdk_from_os(data):
if 'name' not in data or 'version' not in data:
return
try:
version = six.text_type(data['version']).split('-', 1)[0] + '.0' * 3
system_version = tuple(int(x) for x in version.split('.')[:3])
except ValueError:
return
return {
'sdk_name': data['name'],
'version_major': system_version[0],
'version_minor': system_version[1],
'version_patchlevel': system_version[2],
'build': data.get('build'),
}
def cpu_name_from_data(data):
"""Returns the CPU name from the given data if it exists."""
device = DeviceContextType.primary_value_for_data(data)
if device:
arch = device.get('arch')
if isinstance(arch, six.string_types):
return arch
# TODO: kill this here. we want to not support that going forward
unique_cpu_name = None
images = (data.get('debug_meta') or {}).get('images') or []
for img in images:
if img.get('arch') and arch_is_known(img['arch']):
cpu_name = img['arch']
elif img.get('cpu_type') is not None \
and img.get('cpu_subtype') is not None:
cpu_name = arch_from_macho(img['cpu_type'], img['cpu_subtype'])
else:
cpu_name = None
if unique_cpu_name is None:
unique_cpu_name = cpu_name
elif unique_cpu_name != cpu_name:
unique_cpu_name = None
break
return unique_cpu_name
def version_build_from_data(data):
"""Returns release and build string from the given data if it exists."""
app_context = data.get('contexts', {}).get('app', {})
if app_context is not None:
if (app_context.get('app_identifier', None) and
app_context.get('app_version', None) and
app_context.get('app_build', None) and
app_context.get('app_name', None)):
return AppInfo(
app_context.get('app_identifier', None),
app_context.get('app_version', None),
app_context.get('app_build', None),
app_context.get('app_name', None),
)
return None
def rebase_addr(instr_addr, obj):
return parse_addr(instr_addr) - parse_addr(obj.addr)
def sdk_info_to_sdk_id(sdk_info):
if sdk_info is None:
return None
rv = '%s_%d.%d.%d' % (
sdk_info['sdk_name'], sdk_info['version_major'], sdk_info['version_minor'],
sdk_info['version_patchlevel'],
)
build = sdk_info.get('build')
if build is not None:
rv = '%s_%s' % (rv, build)
return rv
def merge_minidump_event(data, minidump):
if isinstance(minidump, InMemoryUploadedFile):
minidump.open() # seek to start
state = ProcessState.from_minidump_buffer(minidump.read())
elif isinstance(minidump, TemporaryUploadedFile):
state = ProcessState.from_minidump(minidump.temporary_file_path())
else:
state = ProcessState.from_minidump(minidump)
data['platform'] = 'native'
data['level'] = 'fatal' if state.crashed else 'info'
data['message'] = 'Assertion Error: %s' % state.assertion if state.assertion \
else 'Fatal Error: %s' % state.crash_reason
if state.timestamp:
data['timestamp'] = float(state.timestamp)
# Extract as much context information as we can.
info = state.system_info
context = data.setdefault('contexts', {})
os = context.setdefault('os', {})
device = context.setdefault('device', {})
os['type'] = 'os' # Required by "get_sdk_from_event"
os['name'] = MINIDUMP_OS_TYPES.get(info.os_name, info.os_name)
os['version'] = info.os_version
os['build'] = info.os_build
device['arch'] = arch_from_breakpad(info.cpu_family)
# We can extract stack traces here already but since CFI is not
# available yet (without debug symbols), the stackwalker will
# resort to stack scanning which yields low-quality results. If
# the user provides us with debug symbols, we could reprocess this
# minidump and add improved stacktraces later.
data['threads'] = [{
'id': thread.thread_id,
'crashed': False,
'stacktrace': {
'frames': [{
'instruction_addr': '0x%x' % frame.return_address,
'function': '<unknown>', # Required by interface
'package': frame.module.name if frame.module else None,
} for frame in reversed(list(thread.frames()))],
},
} for thread in state.threads()]
# Mark the crashed thread and add its stacktrace to the exception
crashed_thread = data['threads'][state.requesting_thread]
crashed_thread['crashed'] = True
# Extract the crash reason and infos
data['exception'] = {
'value': data['message'],
'thread_id': crashed_thread['id'],
'type': state.crash_reason,
# Move stacktrace here from crashed_thread (mutating!)
'stacktrace': crashed_thread.pop('stacktrace'),
'mechanism': {
'type': 'minidump',
'handled': False,
# We cannot extract exception codes or signals with the breakpad
# extractor just yet. Once these capabilities are added to symbolic,
# these values should go in the mechanism here.
}
}
# Extract referenced (not all loaded) images
images = [{
'type': 'symbolic',
'id': id_from_breakpad(module.id),
'image_addr': '0x%x' % module.addr,
'image_size': module.size,
'name': module.name,
} for module in state.modules()]
data.setdefault('debug_meta', {})['images'] = images
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from heat.common import template_format
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
tmpl1 = """
heat_template_version: 2014-10-16
resources:
AResource:
type: ResourceWithPropsType
properties:
Foo: 'abc'
"""
tmpl2 = """
heat_template_version: 2014-10-16
resources:
AResource:
type: ResourceWithPropsType
properties:
Foo: 'abc'
BResource:
type: ResourceWithPropsType
properties:
Foo: {get_attr: [AResource, attr_A1]}
metadata:
Foo: {get_attr: [AResource, attr_A1]}
outputs:
out1:
value: {get_attr: [AResource, attr_A1]}
"""
tmpl3 = """
heat_template_version: 2014-10-16
resources:
AResource:
type: ResourceWithPropsType
properties:
Foo: 'abc'
BResource:
type: ResourceWithPropsType
properties:
Foo: {get_attr: [AResource, attr_A1]}
Doo: {get_attr: [AResource, attr_A2]}
Bar: {get_attr: [AResource, attr_A3]}
metadata:
first: {get_attr: [AResource, meta_A1]}
second: {get_attr: [AResource, meta_A2]}
third: {get_attr: [AResource, attr_A3]}
outputs:
out1:
value: {get_attr: [AResource, out_A1]}
out2:
value: {get_attr: [AResource, out_A2]}
"""
tmpl4 = """
heat_template_version: 2014-10-16
resources:
AResource:
type: ResourceWithPropsType
properties:
Foo: 'abc'
BResource:
type: ResourceWithPropsType
properties:
Foo: 'abc'
CResource:
type: ResourceWithPropsType
properties:
Foo: 'abc'
DResource:
type: ResourceWithPropsType
properties:
Foo: {get_attr: [AResource, attr_A1]}
Doo: {get_attr: [BResource, attr_B1]}
metadata:
Doo: {get_attr: [CResource, attr_C1]}
outputs:
out1:
value: [{get_attr: [AResource, attr_A1]},
{get_attr: [BResource, attr_B1]},
{get_attr: [CResource, attr_C1]}]
"""
tmpl5 = """
heat_template_version: 2014-10-16
resources:
AResource:
type: ResourceWithPropsType
properties:
Foo: 'abc'
BResource:
type: ResourceWithPropsType
properties:
Foo: {get_attr: [AResource, attr_A1]}
Doo: {get_attr: [AResource, attr_A2]}
metadata:
first: {get_attr: [AResource, meta_A1]}
CResource:
type: ResourceWithPropsType
properties:
Foo: {get_attr: [AResource, attr_A1]}
Doo: {get_attr: [BResource, attr_B2]}
metadata:
Doo: {get_attr: [BResource, attr_B1]}
first: {get_attr: [AResource, meta_A1]}
second: {get_attr: [BResource, meta_B2]}
outputs:
out1:
value: [{get_attr: [AResource, attr_A3]},
{get_attr: [AResource, attr_A4]},
{get_attr: [BResource, attr_B3]}]
"""
tmpl6 = """
heat_template_version: 2015-04-30
resources:
AResource:
type: ResourceWithComplexAttributesType
BResource:
type: ResourceWithPropsType
properties:
Foo: {get_attr: [AResource, list, 1]}
Doo: {get_attr: [AResource, nested_dict, dict, b]}
outputs:
out1:
value: [{get_attr: [AResource, flat_dict, key2]},
{get_attr: [AResource, nested_dict, string]},
{get_attr: [BResource, attr_B3]}]
out2:
value: {get_resource: BResource}
"""
tmpl7 = """
heat_template_version: 2015-10-15
resources:
AResource:
type: ResourceWithPropsType
properties:
Foo: 'abc'
BResource:
type: ResourceWithPropsType
properties:
Foo: {get_attr: [AResource, attr_A1]}
Doo: {get_attr: [AResource, attr_A2]}
metadata:
first: {get_attr: [AResource, meta_A1]}
CResource:
type: ResourceWithPropsType
properties:
Foo: {get_attr: [AResource, attr_A1]}
Doo: {get_attr: [BResource, attr_B2]}
metadata:
Doo: {get_attr: [BResource, attr_B1]}
first: {get_attr: [AResource, meta_A1]}
second: {get_attr: [BResource, meta_B2]}
outputs:
out1:
value: [{get_attr: [AResource, attr_A3]},
{get_attr: [AResource, attr_A4]},
{get_attr: [BResource, attr_B3]},
{get_attr: [CResource]}]
"""
class DepAttrsTest(common.HeatTestCase):
scenarios = [
('no_attr',
dict(tmpl=tmpl1,
expected={'AResource': set()})),
('one_res_one_attr',
dict(tmpl=tmpl2,
expected={'AResource': {'attr_A1'},
'BResource': set()})),
('one_res_several_attrs',
dict(tmpl=tmpl3,
expected={'AResource': {'attr_A1', 'attr_A2', 'attr_A3',
'meta_A1', 'meta_A2'},
'BResource': set()})),
('several_res_one_attr',
dict(tmpl=tmpl4,
expected={'AResource': {'attr_A1'},
'BResource': {'attr_B1'},
'CResource': {'attr_C1'},
'DResource': set()})),
('several_res_several_attrs',
dict(tmpl=tmpl5,
expected={'AResource': {'attr_A1', 'attr_A2', 'meta_A1'},
'BResource': {'attr_B1', 'attr_B2', 'meta_B2'},
'CResource': set()})),
('nested_attr',
dict(tmpl=tmpl6,
expected={'AResource': set([(u'list', 1),
(u'nested_dict', u'dict', u'b')]),
'BResource': set([])})),
('several_res_several_attrs_and_all_attrs',
dict(tmpl=tmpl7,
expected={'AResource': {'attr_A1', 'attr_A2', 'meta_A1'},
'BResource': {'attr_B1', 'attr_B2', 'meta_B2'},
'CResource': set()}))
]
def setUp(self):
super(DepAttrsTest, self).setUp()
self.ctx = utils.dummy_context()
self.parsed_tmpl = template_format.parse(self.tmpl)
self.stack = stack.Stack(self.ctx, 'test_stack',
template.Template(self.parsed_tmpl))
def test_dep_attrs(self):
for res in self.stack.values():
definitions = (self.stack.defn.resource_definition(n)
for n in self.parsed_tmpl['resources'])
self.assertEqual(self.expected[res.name],
set(itertools.chain.from_iterable(
d.dep_attrs(res.name) for d in definitions)))
def test_all_dep_attrs(self):
for res in self.stack.values():
definitions = (self.stack.defn.resource_definition(n)
for n in self.parsed_tmpl['resources'])
attrs = set(itertools.chain.from_iterable(
d.dep_attrs(res.name, load_all=True) for d in definitions))
self.assertEqual(self.expected[res.name], attrs)
class ReferencedAttrsTest(common.HeatTestCase):
def setUp(self):
super(ReferencedAttrsTest, self).setUp()
parsed_tmpl = template_format.parse(tmpl6)
self.stack = stack.Stack(utils.dummy_context(), 'test_stack',
template.Template(parsed_tmpl))
self.resA = self.stack['AResource']
self.resB = self.stack['BResource']
def test_referenced_attrs_resources(self):
self.assertEqual(self.resA.referenced_attrs(in_resources=True,
in_outputs=False),
{('list', 1), ('nested_dict', 'dict', 'b')})
self.assertEqual(self.resB.referenced_attrs(in_resources=True,
in_outputs=False),
set())
def test_referenced_attrs_outputs(self):
self.assertEqual(self.resA.referenced_attrs(in_resources=False,
in_outputs=True),
{('flat_dict', 'key2'), ('nested_dict', 'string')})
self.assertEqual(self.resB.referenced_attrs(in_resources=False,
in_outputs=True),
{'attr_B3'})
def test_referenced_attrs_single_output(self):
self.assertEqual(self.resA.referenced_attrs(in_resources=False,
in_outputs={'out1'}),
{('flat_dict', 'key2'), ('nested_dict', 'string')})
self.assertEqual(self.resB.referenced_attrs(in_resources=False,
in_outputs={'out1'}),
{'attr_B3'})
self.assertEqual(self.resA.referenced_attrs(in_resources=False,
in_outputs={'out2'}),
set())
self.assertEqual(self.resB.referenced_attrs(in_resources=False,
in_outputs={'out2'}),
set())
def test_referenced_attrs_outputs_list(self):
self.assertEqual(self.resA.referenced_attrs(in_resources=False,
in_outputs={'out1',
'out2'}),
{('flat_dict', 'key2'), ('nested_dict', 'string')})
self.assertEqual(self.resB.referenced_attrs(in_resources=False,
in_outputs={'out1',
'out2'}),
{'attr_B3'})
def test_referenced_attrs_both(self):
self.assertEqual(self.resA.referenced_attrs(in_resources=True,
in_outputs=True),
{('list', 1), ('nested_dict', 'dict', 'b'),
('flat_dict', 'key2'), ('nested_dict', 'string')})
self.assertEqual(self.resB.referenced_attrs(in_resources=True,
in_outputs=True),
{'attr_B3'})
|
|
# pylint: disable=wrong-or-nonexistent-copyright-notice
import itertools
import math
from unittest import mock
import numpy as np
import pytest
import sympy
import cirq
import cirq.contrib.quimb as ccq
import cirq.experiments.google_v2_supremacy_circuit as supremacy_v2
import cirq.testing
from cirq import value
def assert_same_output_as_dense(circuit, qubit_order, initial_state=0, grouping=None):
mps_simulator = ccq.mps_simulator.MPSSimulator(grouping=grouping)
ref_simulator = cirq.Simulator()
actual = mps_simulator.simulate(circuit, qubit_order=qubit_order, initial_state=initial_state)
expected = ref_simulator.simulate(circuit, qubit_order=qubit_order, initial_state=initial_state)
np.testing.assert_allclose(
actual.final_state.to_numpy(), expected.final_state_vector, atol=1e-4
)
assert len(actual.measurements) == 0
def test_various_gates_1d():
gate_op_cls = [cirq.I, cirq.H, cirq.X, cirq.Y, cirq.Z, cirq.T]
cross_gate_op_cls = [cirq.CNOT, cirq.SWAP]
q0, q1 = cirq.LineQubit.range(2)
for q0_gate_op in gate_op_cls:
for q1_gate_op in gate_op_cls:
for cross_gate_op in cross_gate_op_cls:
circuit = cirq.Circuit(q0_gate_op(q0), q1_gate_op(q1), cross_gate_op(q0, q1))
for initial_state in range(2 * 2):
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1], initial_state=initial_state
)
def test_various_gates_1d_flip():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(
cirq.H(q1),
cirq.CNOT(q1, q0),
)
assert_same_output_as_dense(circuit=circuit, qubit_order=[q0, q1])
assert_same_output_as_dense(circuit=circuit, qubit_order=[q1, q0])
def test_various_gates_2d():
gate_op_cls = [cirq.I, cirq.H]
cross_gate_op_cls = [cirq.CNOT, cirq.SWAP]
q0, q1, q2, q3, q4, q5 = cirq.GridQubit.rect(3, 2)
for q0_gate_op in gate_op_cls:
for q1_gate_op in gate_op_cls:
for q2_gate_op in gate_op_cls:
for q3_gate_op in gate_op_cls:
for cross_gate_op1 in cross_gate_op_cls:
for cross_gate_op2 in cross_gate_op_cls:
circuit = cirq.Circuit(
q0_gate_op(q0),
q1_gate_op(q1),
cross_gate_op1(q0, q1),
q2_gate_op(q2),
q3_gate_op(q3),
cross_gate_op2(q3, q1),
)
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1, q2, q3, q4, q5]
)
def test_grouping():
q0, q1, q2 = cirq.LineQubit.range(3)
circuit = cirq.Circuit(
cirq.X(q0) ** 0.1,
cirq.Y(q1) ** 0.2,
cirq.Z(q2) ** 0.3,
cirq.CNOT(q0, q1),
cirq.Y(q1) ** 0.4,
)
groupings = [
None,
{q0: 0, q1: 1, q2: 2},
{q0: 0, q1: 0, q2: 1},
{q0: 0, q1: 1, q2: 0},
{q0: 1, q1: 0, q2: 0},
{q0: 0, q1: 0, q2: 0},
]
for grouping in groupings:
for initial_state in range(2 * 2 * 2):
assert_same_output_as_dense(
circuit=circuit,
qubit_order=[q0, q1, q2],
initial_state=initial_state,
grouping=grouping,
)
def test_grouping_does_not_overlap():
q0, q1 = cirq.LineQubit.range(2)
mps_simulator = ccq.mps_simulator.MPSSimulator(grouping={q0: 0})
with pytest.raises(ValueError, match="Grouping must cover exactly the qubits"):
mps_simulator.simulate(cirq.Circuit(), qubit_order={q0: 0, q1: 1})
def test_same_partial_trace():
qubit_order = cirq.LineQubit.range(2)
q0, q1 = qubit_order
mps_simulator = ccq.mps_simulator.MPSSimulator()
for _ in range(50):
for initial_state in range(4):
circuit = cirq.testing.random_circuit(qubit_order, 3, 0.9)
expected_density_matrix = cirq.final_density_matrix(
circuit, qubit_order=qubit_order, initial_state=initial_state
)
expected_partial_trace = cirq.partial_trace(
expected_density_matrix.reshape(2, 2, 2, 2), keep_indices=[0]
)
final_state = mps_simulator.simulate(
circuit, qubit_order=qubit_order, initial_state=initial_state
).final_state
actual_density_matrix = final_state.partial_trace([q0, q1])
actual_partial_trace = final_state.partial_trace([q0])
np.testing.assert_allclose(actual_density_matrix, expected_density_matrix, atol=1e-4)
np.testing.assert_allclose(actual_partial_trace, expected_partial_trace, atol=1e-4)
def test_probs_dont_sum_up_to_one():
q0 = cirq.NamedQid('q0', dimension=2)
circuit = cirq.Circuit(cirq.measure(q0))
simulator = ccq.mps_simulator.MPSSimulator(
simulation_options=ccq.mps_simulator.MPSOptions(sum_prob_atol=-0.5)
)
with pytest.raises(ValueError, match="Sum of probabilities exceeds tolerance"):
simulator.run(circuit, repetitions=1)
def test_empty():
q0 = cirq.NamedQid('q0', dimension=2)
q1 = cirq.NamedQid('q1', dimension=3)
q2 = cirq.NamedQid('q2', dimension=5)
circuit = cirq.Circuit()
for initial_state in range(2 * 3 * 5):
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1, q2], initial_state=initial_state
)
def test_cnot():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.CNOT(q0, q1))
for initial_state in range(4):
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1], initial_state=initial_state
)
def test_cnot_flipped():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.CNOT(q1, q0))
for initial_state in range(4):
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1], initial_state=initial_state
)
def test_act_on_args():
q0, q1 = qubit_order = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.CNOT(q1, q0))
mps_simulator = ccq.mps_simulator.MPSSimulator()
ref_simulator = cirq.Simulator()
for initial_state in range(4):
args = mps_simulator._create_act_on_args(initial_state=initial_state, qubits=(q0, q1))
actual = mps_simulator.simulate(circuit, qubit_order=qubit_order, initial_state=args)
expected = ref_simulator.simulate(
circuit, qubit_order=qubit_order, initial_state=initial_state
)
np.testing.assert_allclose(
actual.final_state.to_numpy(), expected.final_state_vector, atol=1e-4
)
assert len(actual.measurements) == 0
def test_three_qubits():
q0, q1, q2 = cirq.LineQubit.range(3)
circuit = cirq.Circuit(cirq.CCX(q0, q1, q2))
with pytest.raises(ValueError, match="Can only handle 1 and 2 qubit operations"):
assert_same_output_as_dense(circuit=circuit, qubit_order=[q0, q1, q2])
def test_measurement_1qubit():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.X(q0), cirq.H(q1), cirq.measure(q1))
simulator = ccq.mps_simulator.MPSSimulator()
result = simulator.run(circuit, repetitions=100)
assert sum(result.measurements['1'])[0] < 80
assert sum(result.measurements['1'])[0] > 20
def test_reset():
q = cirq.LineQubit(0)
simulator = ccq.mps_simulator.MPSSimulator()
c = cirq.Circuit(cirq.X(q), cirq.reset(q), cirq.measure(q))
assert simulator.sample(c)['0'][0] == 0
c = cirq.Circuit(cirq.H(q), cirq.reset(q), cirq.measure(q))
assert simulator.sample(c)['0'][0] == 0
c = cirq.Circuit(cirq.reset(q), cirq.measure(q))
assert simulator.sample(c)['0'][0] == 0
def test_measurement_2qubits():
q0, q1, q2 = cirq.LineQubit.range(3)
circuit = cirq.Circuit(cirq.H(q0), cirq.H(q1), cirq.H(q2), cirq.measure(q0, q2))
simulator = ccq.mps_simulator.MPSSimulator()
repetitions = 1024
measurement = simulator.run(circuit, repetitions=repetitions).measurements['0,2']
result_counts = {'00': 0, '01': 0, '10': 0, '11': 0}
for i in range(repetitions):
key = str(measurement[i, 0]) + str(measurement[i, 1])
result_counts[key] += 1
for result_count in result_counts.values():
# Expected value is 1/4:
assert result_count > repetitions * 0.15
assert result_count < repetitions * 0.35
def test_measurement_str():
q0 = cirq.NamedQid('q0', dimension=3)
circuit = cirq.Circuit(cirq.measure(q0))
simulator = ccq.mps_simulator.MPSSimulator()
result = simulator.run(circuit, repetitions=7)
assert str(result) == "q0 (d=3)=0000000"
def test_trial_result_str():
q0 = cirq.LineQubit(0)
final_step_result = mock.Mock(cirq.StepResult)
final_step_result._simulator_state.return_value = ccq.mps_simulator.MPSState(
qubits=(q0,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(),
)
assert (
str(
ccq.mps_simulator.MPSTrialResult(
params=cirq.ParamResolver({}),
measurements={'m': np.array([[1]])},
final_step_result=final_step_result,
)
)
== """measurements: m=1
output state: TensorNetwork([
Tensor(shape=(2,), inds=('i_0',), tags=set()),
])"""
)
def test_trial_result_repr_pretty():
q0 = cirq.LineQubit(0)
final_step_result = mock.Mock(cirq.StepResult)
final_step_result._simulator_state.return_value = ccq.mps_simulator.MPSState(
qubits=(q0,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(),
)
result = ccq.mps_simulator.MPSTrialResult(
params=cirq.ParamResolver({}),
measurements={'m': np.array([[1]])},
final_step_result=final_step_result,
)
cirq.testing.assert_repr_pretty(
result,
"""measurements: m=1
output state: TensorNetwork([
Tensor(shape=(2,), inds=('i_0',), tags=set()),
])""",
)
cirq.testing.assert_repr_pretty(result, "cirq.MPSTrialResult(...)", cycle=True)
def test_empty_step_result():
q0 = cirq.LineQubit(0)
sim = ccq.mps_simulator.MPSSimulator()
step_result = next(sim.simulate_moment_steps(cirq.Circuit(cirq.measure(q0))))
assert (
str(step_result)
== """0=0
TensorNetwork([
Tensor(shape=(2,), inds=('i_0',), tags=set()),
])"""
)
def test_step_result_repr_pretty():
q0 = cirq.LineQubit(0)
sim = ccq.mps_simulator.MPSSimulator()
step_result = next(sim.simulate_moment_steps(cirq.Circuit(cirq.measure(q0))))
cirq.testing.assert_repr_pretty(
step_result,
"""0=0
TensorNetwork([
Tensor(shape=(2,), inds=('i_0',), tags=set()),
])""",
)
cirq.testing.assert_repr_pretty(step_result, "cirq.MPSSimulatorStepResult(...)", cycle=True)
def test_state_equal():
q0, q1 = cirq.LineQubit.range(2)
state0 = ccq.mps_simulator.MPSState(
qubits=(q0,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(cutoff=1e-3, sum_prob_atol=1e-3),
)
state1a = ccq.mps_simulator.MPSState(
qubits=(q1,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(cutoff=1e-3, sum_prob_atol=1e-3),
)
state1b = ccq.mps_simulator.MPSState(
qubits=(q1,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(cutoff=1729.0, sum_prob_atol=1e-3),
)
assert state0 == state0
assert state0 != state1a
assert state1a != state1b
def test_supremacy_equal_more_rows():
circuit = supremacy_v2.generate_boixo_2018_supremacy_circuits_v2_grid(
n_rows=3, n_cols=2, cz_depth=3, seed=0
)
qubits = circuit.all_qubits()
assert_same_output_as_dense(circuit, qubits)
def test_supremacy_equal_more_cols():
circuit = supremacy_v2.generate_boixo_2018_supremacy_circuits_v2_grid(
n_rows=2, n_cols=3, cz_depth=3, seed=0
)
qubits = circuit.all_qubits()
assert_same_output_as_dense(circuit, qubits)
def test_tensor_index_names():
qubits = cirq.LineQubit.range(12)
qubit_map = {qubit: i for i, qubit in enumerate(qubits)}
state = ccq.mps_simulator.MPSState(
qubit_map,
prng=value.parse_random_state(0),
)
assert state.i_str(0) == "i_00"
assert state.i_str(11) == "i_11"
assert state.mu_str(0, 3) == "mu_0_3"
assert state.mu_str(3, 0) == "mu_0_3"
def test_supremacy_big():
circuit = supremacy_v2.generate_boixo_2018_supremacy_circuits_v2_grid(
n_rows=7, n_cols=7, cz_depth=6, seed=0
)
qubit_order = circuit.all_qubits()
q0 = next(iter(qubit_order))
circuit.append(cirq.measure(q0))
mps_simulator_1 = ccq.mps_simulator.MPSSimulator(
simulation_options=ccq.mps_simulator.MPSOptions(cutoff=5e-5)
)
result_1 = mps_simulator_1.simulate(circuit, qubit_order=qubit_order, initial_state=0)
assert result_1.final_state.estimation_stats() == {
'estimated_fidelity': 0.997,
'memory_bytes': 11008,
'num_coefs_used': 688,
}
mps_simulator_2 = ccq.mps_simulator.MPSSimulator(
simulation_options=ccq.mps_simulator.MPSOptions(
method='isvd', max_bond=1, cutoff_mode='sum2'
)
)
result_2 = mps_simulator_2.simulate(circuit, qubit_order=qubit_order, initial_state=0)
assert result_2.final_state.estimation_stats() == {
'estimated_fidelity': 1.0,
'memory_bytes': 1568,
'num_coefs_used': 98,
}
def test_simulate_moment_steps_sample():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1))
simulator = ccq.mps_simulator.MPSSimulator()
for i, step in enumerate(simulator.simulate_moment_steps(circuit)):
if i == 0:
np.testing.assert_almost_equal(
step._simulator_state().to_numpy(),
np.asarray([1.0 / math.sqrt(2), 0.0, 1.0 / math.sqrt(2), 0.0]),
)
assert (
str(step)
== """TensorNetwork([
Tensor(shape=(2,), inds=('i_0',), tags=set()),
Tensor(shape=(2,), inds=('i_1',), tags=set()),
])"""
)
samples = step.sample([q0, q1], repetitions=10)
for sample in samples:
assert np.array_equal(sample, [True, False]) or np.array_equal(
sample, [False, False]
)
np.testing.assert_almost_equal(
step._simulator_state().to_numpy(),
np.asarray([1.0 / math.sqrt(2), 0.0, 1.0 / math.sqrt(2), 0.0]),
)
else:
np.testing.assert_almost_equal(
step._simulator_state().to_numpy(),
np.asarray([1.0 / math.sqrt(2), 0.0, 0.0, 1.0 / math.sqrt(2)]),
)
assert (
str(step)
== """TensorNetwork([
Tensor(shape=(2, 2), inds=('i_0', 'mu_0_1'), tags=set()),
Tensor(shape=(2, 2), inds=('mu_0_1', 'i_1'), tags=set()),
])"""
)
samples = step.sample([q0, q1], repetitions=10)
for sample in samples:
assert np.array_equal(sample, [True, True]) or np.array_equal(
sample, [False, False]
)
def test_sample_seed():
q = cirq.NamedQubit('q')
circuit = cirq.Circuit(cirq.H(q), cirq.measure(q))
simulator = ccq.mps_simulator.MPSSimulator(seed=1234)
result = simulator.run(circuit, repetitions=20)
measured = result.measurements['q']
result_string = ''.join(map(lambda x: str(int(x[0])), measured))
assert result_string == '01011001110111011011'
def test_run_no_repetitions():
q0 = cirq.LineQubit(0)
simulator = ccq.mps_simulator.MPSSimulator()
circuit = cirq.Circuit(cirq.H(q0), cirq.measure(q0))
result = simulator.run(circuit, repetitions=0)
assert len(result.measurements['0']) == 0
def test_run_parameters_not_resolved():
a = cirq.LineQubit(0)
simulator = ccq.mps_simulator.MPSSimulator()
circuit = cirq.Circuit(cirq.XPowGate(exponent=sympy.Symbol('a'))(a), cirq.measure(a))
with pytest.raises(ValueError, match='symbols were not specified'):
_ = simulator.run_sweep(circuit, cirq.ParamResolver({}))
def test_deterministic_gate_noise():
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.I(q), cirq.measure(q))
simulator1 = ccq.mps_simulator.MPSSimulator(noise=cirq.X)
result1 = simulator1.run(circuit, repetitions=10)
simulator2 = ccq.mps_simulator.MPSSimulator(noise=cirq.X)
result2 = simulator2.run(circuit, repetitions=10)
assert result1 == result2
simulator3 = ccq.mps_simulator.MPSSimulator(noise=cirq.Z)
result3 = simulator3.run(circuit, repetitions=10)
assert result1 != result3
def test_nondeterministic_mixture_noise():
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.I(q), cirq.measure(q))
simulator = ccq.mps_simulator.MPSSimulator(
noise=cirq.ConstantQubitNoiseModel(cirq.depolarize(0.5))
)
result1 = simulator.run(circuit, repetitions=50)
result2 = simulator.run(circuit, repetitions=50)
assert result1 != result2
def test_unsupported_noise_fails():
with pytest.raises(ValueError, match='noise must be unitary or mixture but was'):
ccq.mps_simulator.MPSSimulator(noise=cirq.amplitude_damp(0.5))
def test_state_copy():
sim = ccq.mps_simulator.MPSSimulator()
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.H(q), cirq.H(q))
state_Ms = []
for step in sim.simulate_moment_steps(circuit):
state_Ms.append(step.state.M)
for x, y in itertools.combinations(state_Ms, 2):
assert len(x) == len(y)
for i in range(len(x)):
assert not np.shares_memory(x[i], y[i])
def test_state_act_on_args_initializer():
s = ccq.mps_simulator.MPSState(
qubits=(cirq.LineQubit(0),),
prng=np.random.RandomState(0),
log_of_measurement_results={'test': [4]},
)
assert s.qubits == (cirq.LineQubit(0),)
assert s.log_of_measurement_results == {'test': [4]}
def test_act_on_gate():
args = ccq.mps_simulator.MPSState(
qubits=cirq.LineQubit.range(3),
prng=np.random.RandomState(0),
log_of_measurement_results={},
)
cirq.act_on(cirq.X, args, [cirq.LineQubit(1)])
np.testing.assert_allclose(
args.state_vector().reshape((2, 2, 2)),
cirq.one_hot(index=(0, 1, 0), shape=(2, 2, 2), dtype=np.complex64),
)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import a TF v1-style SavedModel when executing eagerly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.eager import context
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import function_deserialization
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import signature_serialization
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
class _Initializer(tracking.CapturableResource):
"""Represents an initialization operation restored from a SavedModel.
Without this object re-export of imported 1.x SavedModels would omit the
original SavedModel's initialization procedure.
Created when `tf.saved_model.load` loads a TF 1.x-style SavedModel with an
initialization op. This object holds a function that runs the
initialization. It does not require any manual user intervention;
`tf.saved_model.save` will see this object and automatically add it to the
exported SavedModel, and `tf.saved_model.load` runs the initialization
function automatically.
"""
def __init__(self, init_fn, asset_paths):
super(_Initializer, self).__init__()
self._asset_paths = asset_paths
self._init_fn = init_fn
def _create_resource(self):
return array_ops.placeholder(
dtype=dtypes.resource, shape=[], name="unused_resource")
def _initialize(self):
return self._init_fn(*[path.asset_path for path in self._asset_paths])
class _EagerSavedModelLoader(loader_impl.SavedModelLoader):
"""Loads a SavedModel without using Sessions."""
def get_meta_graph_def_from_tags(self, tags):
"""Override to support implicit one-MetaGraph loading with tags=None."""
if tags is None:
if len(self._saved_model.meta_graphs) != 1:
tag_sets = [mg.meta_info_def.tags
for mg in self._saved_model.meta_graphs]
raise ValueError(
("Importing a SavedModel with tf.saved_model.load requires a "
"'tags=' argument if there is more than one MetaGraph. Got "
"'tags=None', but there are {} MetaGraphs in the SavedModel with "
"tag sets {}. Pass a 'tags=' argument to load this SavedModel.")
.format(len(self._saved_model.meta_graphs), tag_sets))
return self._saved_model.meta_graphs[0]
return super(_EagerSavedModelLoader, self).get_meta_graph_def_from_tags(
tags)
def load_graph(self, returns, meta_graph_def):
"""Called from wrap_function to import `meta_graph_def`."""
# pylint: disable=protected-access
saver, _ = tf_saver._import_meta_graph_with_return_elements(
meta_graph_def)
# pylint: enable=protected-access
returns[0] = saver
def _extract_saver_restore(self, wrapped, saver):
if saver is None:
return None
saver_def = saver.saver_def
filename_tensor = wrapped.graph.as_graph_element(
saver_def.filename_tensor_name)
# We both feed and fetch filename_tensor so we have an operation to use to
# feed into variable initializers (only relevant for v1 graph building).
return wrapped.prune(
feeds=[filename_tensor],
fetches=[filename_tensor,
wrapped.graph.as_graph_element(saver_def.restore_op_name)])
def restore_variables(self, wrapped, restore_from_saver):
"""Restores variables from the checkpoint."""
if restore_from_saver is not None:
initializer, _ = restore_from_saver(
constant_op.constant(self._variables_path))
if not ops.executing_eagerly_outside_functions():
# Add the initialization operation to the "saved_model_initializers"
# collection in case we don't have any lifted variables to attach it to.
ops.add_to_collection("saved_model_initializers", initializer)
one_unlifted = False
for variable in wrapped.graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES):
if variable.graph is wrapped.graph:
one_unlifted = True
# pylint: disable=protected-access
variable._initializer_op = initializer
# pylint: enable=protected-access
if one_unlifted:
logging.warning(
"Some variables could not be lifted out of a loaded function. "
"Please run "
"`sess.run(tf.get_collection(\"saved_model_initializers\"))`to "
"restore these variables.")
def _extract_signatures(self, wrapped, meta_graph_def):
"""Creates ConcreteFunctions for signatures in `meta_graph_def`."""
signature_functions = {}
for signature_key, signature_def in meta_graph_def.signature_def.items():
if signature_def.inputs:
input_items = sorted(
signature_def.inputs.items(), key=lambda item: item[1].name)
original_input_names, input_specs = zip(*input_items)
else:
original_input_names = []
input_specs = []
# TODO(allenl): Support optional arguments
feeds = [
wrap_function._get_element_from_tensor_info(input_spec, wrapped.graph) # pylint: disable=protected-access
for input_spec in input_specs
]
input_names = []
input_tensors = []
for original_input_name, feed in zip(original_input_names, feeds):
if isinstance(feed, sparse_tensor.SparseTensor):
# We have to give explicit name for SparseTensor arguments, because
# these are not present in the TensorInfo.
indices_name = "%s_indices" % original_input_name
values_name = "%s_values" % original_input_name
dense_shape_name = "%s_dense_shape" % original_input_name
input_names.extend([indices_name, values_name, dense_shape_name])
input_tensors.extend([feed.indices, feed.values, feed.dense_shape])
elif isinstance(feed, composite_tensor.CompositeTensor):
component_tensors = nest.flatten(feed, expand_composites=True)
input_names.extend("%s_component_%d" % (original_input_name, n)
for n in range(len(component_tensors)))
input_tensors.extend(component_tensors)
else:
input_names.append(original_input_name)
input_tensors.append(feed)
fetches = {name: out for name, out in signature_def.outputs.items()}
try:
signature_fn = wrapped.prune(feeds=feeds, fetches=fetches)
except lift_to_graph.UnliftableError as ex:
# Mutate the exception to add a bit more detail.
args = ex.args
if not args:
message = ""
else:
message = args[0]
message = (
("A SavedModel signature needs an input for each placeholder the "
"signature's outputs use. An output for signature '{}' depends on "
"a placeholder which is not an input (i.e. the placeholder is not "
"fed a value).\n\n").format(signature_key)
+ message)
ex.args = (message,) + args[1:]
raise
# pylint: disable=protected-access
signature_fn._arg_keywords = input_names
signature_fn._func_graph.structured_input_signature = (
(),
func_graph.convert_structure_to_signature(
dict(zip(input_names, input_tensors))))
if len(input_names) == 1:
# Allowing positional arguments does not create any ambiguity if there's
# only one.
signature_fn._num_positional_args = 1
else:
signature_fn._num_positional_args = 0
# pylint: enable=protected-access
signature_functions[signature_key] = signature_fn
return signature_functions
def load(self, tags):
"""Creates an object from the MetaGraph identified by `tags`."""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
load_shared_name_suffix = "_load_{}".format(ops.uid())
functions = function_deserialization.load_function_def_library(
meta_graph_def.graph_def.library,
load_shared_name_suffix=load_shared_name_suffix)
# Replace existing functions in the MetaGraphDef with renamed functions so
# we don't have duplicates or name collisions.
meta_graph_def.graph_def.library.Clear()
for function in functions.values():
meta_graph_def.graph_def.library.function.add().CopyFrom(
function.function_def)
# We've renamed functions and shared names. We need the same operation on
# the GraphDef itself for consistency.
for node_def in meta_graph_def.graph_def.node:
function_deserialization.fix_node_def(node_def, functions,
load_shared_name_suffix)
load_graph_returns = [None]
wrapped = wrap_function.wrap_function(
functools.partial(self.load_graph, load_graph_returns, meta_graph_def),
signature=[])
saver, = load_graph_returns
restore_from_saver = self._extract_saver_restore(wrapped, saver)
self.restore_variables(wrapped, restore_from_saver)
with wrapped.graph.as_default():
init_op = loader_impl.get_init_op(
meta_graph_def) or monitored_session.Scaffold.default_local_init_op()
# Add a dummy Tensor we know we can fetch to add control dependencies to.
init_anchor = constant_op.constant(0., name="dummy_fetch")
root = tracking.AutoTrackable()
if restore_from_saver is not None:
root.restore = (
lambda path: restore_from_saver(constant_op.constant(path)))
asset_feed_tensors = []
asset_paths = []
for tensor_name, value in loader_impl.get_asset_tensors(
self._export_dir, meta_graph_def).items():
asset_feed_tensors.append(wrapped.graph.as_graph_element(tensor_name))
asset_paths.append(tracking.Asset(value))
init_fn = wrapped.prune(
feeds=asset_feed_tensors,
fetches=[init_anchor, wrapped.graph.as_graph_element(init_op)])
initializer = _Initializer(init_fn, asset_paths)
# pylint: disable=protected-access
local_init_op, _ = initializer._initialize()
# pylint: enable=protected-access
with ops.init_scope():
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, local_init_op)
for variable in wrapped.graph.get_collection_ref(
ops.GraphKeys.LOCAL_VARIABLES):
# pylint: disable=protected-access
variable._initializer_op = local_init_op
# pylint: enable=protected-access
root.initializer = initializer
root.asset_paths = asset_paths
signature_functions = self._extract_signatures(wrapped, meta_graph_def)
root.signatures = signature_serialization.create_signature_map(
signature_functions)
root.variables = list(wrapped.graph.variables)
root.tensorflow_version = (
meta_graph_def.meta_info_def.tensorflow_version)
root.tensorflow_git_version = (
meta_graph_def.meta_info_def.tensorflow_git_version)
root.graph = wrapped.graph
root.prune = wrapped.prune
return root
def load(export_dir, tags):
"""Load a v1-style SavedModel as an object."""
loader = _EagerSavedModelLoader(export_dir)
return loader.load(tags=tags)
|
|
# -*- coding: utf-8 -*-
'''
watdo.model
~~~~~~~~~~~
This module provides datastructures to represent and helper functions to
access data on the filesystem.
:copyright: (c) 2013 Markus Unterwaditzer
:license: MIT, see LICENSE for more details.
'''
import datetime
import os
from atomicwrites import atomic_write
import icalendar
import icalendar.tools
from ._compat import string_types, to_unicode
from .exceptions import CliError
class Task(object):
#: the absolute path to the directory containing all calendars
basepath = None
#: the calendar name
calendar = None
#: the task's file name
filename = None
#: old locations of the task that should be removed on write
_old_filepaths = None
#: the vcal object from the icalendar module (exposed through self.vcal)
_vcal = None
#: the VTODO object inside self._vcal (exposed through self.main)
_main = None
def __init__(self, **kwargs):
for k, v in kwargs.items(): # meh
setattr(self, k, v)
@property
def filepath(self):
if None in (self.basepath, self.calendar, self.filename):
return None
return os.path.join(self.basepath, self.calendar, self.filename)
@filepath.setter
def filepath(self, new):
if self._old_filepaths is None:
self._old_filepaths = set()
if self.filepath is not None:
self._old_filepaths.add(self.filepath)
self.basepath, self.calendar, self.filename = new.rsplit(u'/', 2)
@property
def vcal(self):
'''full file content, parsed (VCALENDAR)'''
if self._vcal is None:
self._vcal = dummy_vcal()
return self._vcal
@vcal.setter
def vcal(self, val):
if isinstance(val, string_types):
val = icalendar.Calendar.from_ical(val)
self._vcal = val
@property
def main(self):
'''the main object (VTODO, VEVENT)'''
if self._main is None:
for component in self.vcal.walk():
if component.name == 'VTODO':
self._main = component
break
return self._main
@main.setter
def main(self, val):
self._main = val
def write(self, create=False):
if self.filename is None:
if not create:
raise ValueError('Create arg must be true '
'if filename is None.')
self.random_filename()
if self.filepath is None:
raise ValueError('basepath and calendar must be set.')
calendar_path = os.path.join(self.basepath, self.calendar)
if not os.path.exists(calendar_path):
raise CliError('Calendars are not explicitly created. '
'Please create the directory {} yourself.'
.format(calendar_path))
with atomic_write(self.filepath, mode='wb', overwrite=not create) as f:
f.write(self.vcal.to_ical())
while self._old_filepaths:
os.remove(self._old_filepaths.pop())
def random_filename(self):
self.filename = self.main['uid'] + u'.ics'
def update(self, other):
self.due = other.due
self.summary = other.summary
self.description = other.description
self.status = other.status
self.calendar = other.calendar
def bump(self):
self.main.pop('last-modified', None)
self.main.add('last-modified', datetime.datetime.now())
@property
def due(self):
dt = self.main.get('due', None)
if dt is None:
return None
dt = dt.dt
if isinstance(dt, datetime.datetime):
dt = dt.replace(tzinfo=None)
return dt
@due.setter
def due(self, dt):
self.main.pop('due', None)
if dt is not None:
if isinstance(dt, string_types):
dt = to_unicode(dt)
self.main.add('due', dt)
@property
def summary(self):
return self.main.get('summary', u'')
@summary.setter
def summary(self, val):
self.main.pop('summary', None)
if val:
self.main['summary'] = to_unicode(val)
@property
def done(self):
return self.status in (u'COMPLETED', u'CANCELLED')
@property
def done_date(self):
dt = self.main.get('completed', None)
if dt is None:
return None
return dt.dt
@done_date.setter
def done_date(self, dt):
self.main.pop('completed', None)
if dt is not None:
if isinstance(dt, string_types):
dt = to_unicode(dt)
self.main.add('completed', dt)
@property
def description(self):
return self.main.get('description', u'')
@description.setter
def description(self, val):
self.main.pop('description', None)
if val:
self.main['description'] = to_unicode(val)
@property
def status(self):
x = self.main.get('status', u'NEEDS-ACTION')
return x if x != u'NEEDS-ACTION' else u''
@status.setter
def status(self, val):
self.main.pop('status', None)
if val:
self.main['status'] = to_unicode(val)
def __cmp__(self, x):
return 0 if self.__eq__(x) else -1
def __eq__(self, other):
return all((
isinstance(other, type(self)),
self.summary.rstrip(u'\n') == other.summary.rstrip(u'\n'),
self.description.rstrip(u'\n') == other.description.rstrip(u'\n'),
self.due == other.due,
self.status == other.status
))
def __repr__(self):
return 'watdo.model.Task({})'.format({
'description': self.description,
'summary': self.summary,
'due': self.due,
'status': self.status
})
def dummy_vcal():
cal = icalendar.Calendar()
cal.add('prodid', '-//watdo//mimedir.icalendar//EN')
cal.add('version', '2.0')
todo = icalendar.Todo()
todo['uid'] = icalendar.tools.UIDGenerator().uid(host_name='watdo')
cal.add_component(todo)
return cal
class ParsingError(ValueError):
pass
def walk_calendar(dirpath):
for filename in os.listdir(dirpath):
filepath = os.path.join(dirpath, filename)
if not os.path.isfile(filepath) or not filepath.endswith('.ics'):
continue
with open(filepath, 'rb') as f:
vcal = f.read()
try:
task = Task(vcal=vcal, filepath=filepath)
except Exception as e:
print('Error happened during parsing {}: {}'
.format(filepath, str(e)))
else:
if task.main is not None:
yield task
def walk_calendars(path):
'''Yield name of and absolute path to each available calendar.'''
for dirname in os.listdir(path):
dirpath = os.path.join(path, dirname)
if not os.path.isfile(dirpath):
for task in walk_calendar(dirpath):
yield task
|
|
import re
import codecs
import ConfigParser
import os
local_first_URL = u'G:/visual.merriam-webster.com/animal-kingdom/flying-mammal/examples-bats.php.htm'
#note for the "/" , is defferent from the "\" in the original web url, or you will make a mistake
local_current_URL = local_first_URL
local_next_URL = ''
filename = 'profile.ini'
word_counter = 0
progress = 1
cf=ConfigParser.ConfigParser()
if os.path.exists(filename):
cf.read(filename)
if 'mission' not in cf.sections():
cf.add_section('mission')
if 'progress' not in cf.options("mission"):
cf.set('mission','progress',progress)
else:
progress = cf.getint('mission','progress')
if 'url' not in cf.options("mission"):
cf.set('mission','url',local_first_URL)
else:
local_current_URL = cf.get('mission','url')
local_first_URL = local_current_URL
else:
cf.add_section('mission')
cf.set('mission','progress',progress)
cf.set('mission','url',local_first_URL)
section = progress
page_num = 7*section-6
page_end = 7*section
#print "page_num = " + str(page_num)
while True:
html_file = codecs.open(local_current_URL,'r','utf-8')
html_file_text = html_file.read()
##print html_file_text
#print "local_current_URL = " + local_current_URL
local_current_URL = local_current_URL.split('/')
#print "local_current_URL(1) = \n\n\n\n\n"
#print local_current_URL
local_current_URL.pop()
html_file_text = re.sub(r'\&\#160;', " ", html_file_text, re.S)
html_file_text = re.sub(r'\&\#\d{4}', "", html_file_text, re.S)
html_file_text = re.sub(r'\&\#\d{3}', "", html_file_text, re.S)
decker = re.findall(r'index\.php(.*?)</div>', html_file_text, re.S)
decker01 = re.findall(r'>(.*?)</a>', decker[0], re.S)
del decker01[0]
subject = decker01[-1]
subject = re.sub(r':', "", subject, re.S)
decker_name = str(page_num)+'-'+subject+".dec"
decker02 = "::".join(decker01)
#print "deck = " + decker02
#print "decker_name=" + decker_name
with open(decker_name, 'w') as decker_file:
decker_file.write(decker02)
#we will find out the pciture path below
pic_path = re.findall(r'<div><img src=\"(.*?).jpg', html_file_text, re.S)
#print '*'*40
##print pic_path
#print local_current_URL
pic_path_temp = local_current_URL[:]
#print "pic_path_temp(2)="
#print pic_path_temp
temp_pic = pic_path[0].split('/')
temp_temp = []
##print temp
for n in range(len(temp_pic)):
if temp_pic[n]=='..':
pic_path_temp.pop()
else :
temp_temp.append(temp_pic[n])
temp_pic = temp_temp
temp_pic[-1] = temp_pic[-1] + ".jpg"
pic_path_temp.extend(temp_pic)
pic_path = "/".join(pic_path_temp)
#print pic_path
#print '*'*40
#print "local_current_URL = "
#print local_current_URL
#we find out the text below
content = []
noun = ""
sentences = re.findall(r'descript(.*?)v>', html_file_text, re.S)
#print '*'*40
#print sentences
for n in range(len(sentences)):
line = []
#sentence = re.findall(r'h4(.*?)/di', sentences[n], re.S)
##print sentence
sentence = re.sub(r'><', '', sentences[n], re.S)
#print "1"
#print sentence
sentence = re.sub(r'\&\#8217', "'", sentence, re.S)
#print "2"
#print sentence
sentence = ' '.join(sentence.split())
#print "3"
#print sentence
sentence = re.sub(r'> <', '', sentence, re.S)
#print "4"
#print sentence
sentence = re.findall(r'>(.*?)<', sentence, re.S)
#print "5"
#print sentence
if len(sentence)==3:
del sentence[-1]
#print "6"
#print sentence
#del sentence[0]
noun = sentence[0]
line.append(noun)
line.append(pic_path)
sentence = ":".join(sentence)
line.append(sentence)
line.append(sentence)
line = '\\'.join(line)
content.append(line)
word_counter = word_counter + 1
##print '*'*40
#for n in range(len(content)):
#print content[n]
with open(str(page_num)+'-'+subject+".txt","wb") as content_file:
content_file.write("\n".join(content).encode('utf-8'))
##print "content ="
##print content
content_file = str(page_num)+".txt"
local_next_URL = re.findall(r'href(.*?)>next<',html_file_text)
#print "local_next_URL = "
#print local_next_URL
##print html_file_text
#print "URL 1 = " + local_next_URL[0]
local_next_URL = re.findall(r'=\"(.*?)\"',local_next_URL[0])
#print "URL 2 =" + local_next_URL[0]
temp = local_next_URL[0].split('/')
temp_temp = []
#print "temp = "
#print temp
for n in range(len(temp)):
if temp[n]=='..':
local_current_URL.pop()
else :
temp_temp.append(temp[n])
temp = temp_temp
if temp[-1] == "examples-hooves.php.htm":
temp[-1] = "examples of hooves.php.htm"
#print "local_current_URL = "
#print local_current_URL
local_next_URL = local_current_URL
local_next_URL.extend(temp)
local_next_URL = "/".join(local_next_URL)
if local_next_URL == local_first_URL:
break
#print "local_next_URL = " + local_next_URL
local_current_URL = local_next_URL
if page_num == page_end:
break
page_num = page_num + 1
#print "page_num = " + str(page_num)
cf.set('mission','progress',progress+1)
cf.set('mission','URL',local_next_URL)
cf.write(open("profile.ini", "w"))
script=[]
with open("adjust.ahk",'r') as ahk_script:
script = ahk_script.readlines()
#print script
script[0] = 'counter = %s\n' % str(word_counter)
with open("adjust.ahk",'w') as ahk_script:
ahk_script.writelines(script)
|
|
import logging
import socket
import re
import gevent
from StringIO import StringIO
from six.moves import urllib_parse as urlparse
from cachebrowser.models import Host
from cachebrowser.network import ConnectionHandler
from cachebrowser.common import silent_fail
from cachebrowser import http
from cachebrowser import dns
class ProxyConnection(ConnectionHandler):
def __init__(self, *args, **kwargs):
super(ProxyConnection, self).__init__(*args, **kwargs)
self._buffer = StringIO()
self._schema = None
self._local_socket = self.socket
self._remote_socket = None
self.on_data = self.on_local_data
self.on_close = self.on_local_closed
def on_data(self, data):
self.on_local_data(data)
def on_connect(self):
pass
# logging.debug("New proxy connection established with %s" % str(self.address))
@silent_fail(log=True)
def on_local_data(self, data):
if len(data) == 0:
return
if self._schema is not None:
if hasattr(self._schema, 'on_local_data'):
return self._schema.on_local_data(data)
else:
self._buffer.write(data)
schema = self._check_for_schema()
if schema is not None:
self._schema = schema(self, self._buffer)
@silent_fail(log=True)
def on_remote_data(self, data):
if len(data) == 0:
return
if hasattr(self._schema, 'on_remote_data'):
return self._schema.on_remote_data(data)
@silent_fail(log=True)
def on_local_closed(self):
if self._remote_socket is None:
return
try:
self._remote_socket.close()
except socket.error:
pass
@silent_fail(log=True)
def on_remote_closed(self):
try:
self._local_socket.close()
except socket.error:
pass
def start_remote(self, sock):
self._remote_socket = sock
def remote_reader():
try:
while True:
buff = sock.recv(1024)
gevent.sleep(0)
if not buff:
self.on_remote_closed()
break
self.on_remote_data(buff)
except Exception as e:
logging.error(e)
gevent.spawn(remote_reader)
def send_remote(self, data):
if len(data) == 0:
return
self._remote_socket.send(data)
def send_local(self, data):
if len(data) == 0:
return
self._local_socket.send(data)
def close_local(self):
self._local_socket.close()
def close_remote(self):
self._remote_socket.close()
def _check_for_schema(self):
buff = self._buffer.getvalue()
if '\n' in buff:
self._buffer.seek(0)
firstline = self._buffer.readline()
match = re.match("(?:GET|POST|PUT|DELETE|HEAD) (.+) \w+", firstline)
if match is not None:
return HttpSchema
match = re.match("(?:CONNECT) (.+) \w*", firstline)
if match is not None:
return SSLSchema
return None
class HttpSchema(object):
def __init__(self, connection, buff=None):
self._connection = connection
self._upstream_started = False
self.request_builder = http.HttpRequest.Builder()
self.cachebrowsed = False
if buff is not None:
self.on_local_data(buff.getvalue())
def on_local_data(self, data):
if not self._upstream_started:
self.request_builder.write(data)
self._check_request()
def on_remote_data(self, data):
if self.cachebrowsed and 'Location: ' in data:
data = data.replace('Location: https', 'Location: http')
self._connection.send_local(data)
def _check_request(self):
if not self.request_builder.is_ready():
return
self._upstream_started = True
self._start_remote()
def _start_remote(self):
http_request = self.request_builder.http_request
url = http_request.path
parsed_url = urlparse.urlparse(url)
try:
host = Host.get(url=parsed_url.hostname)
if host.ssl:
url = url.replace('http', 'https')
self.cachebrowsed = True
except Host.DoesNotExist:
pass
logging.info("[%s] %s %s" % (http_request.method, url, '<CACHEBROWSED>' if self.cachebrowsed else ''))
request = http_request.get_raw()
# request = re.sub(r'^(GET|POST|PUT|DELETE|HEAD) http[s]?://[^/]+/(.+) (\w+)', r'\1 /\2 \3', request)
response = http.request(url, raw_request=request)
self._connection.start_remote(response)
class SSLSchema(object):
def __init__(self, connection, buff=None):
self.connection = connection
self._buffer = buff or StringIO()
self._upstream_started = False
self._host = None
self._start_upstream()
# connection.close_local()
self.connection = connection
def on_local_data(self, data):
if not self._upstream_started and not self._start_upstream():
self._buffer.seek(0, 2)
self._buffer.write(data)
return
else:
self.connection.send_remote(data)
def on_remote_data(self, data):
self.connection.send_local(data)
def _start_upstream(self):
self._buffer.seek(0)
firstline = self._buffer.readline()
match = re.match("(?:CONNECT) ([^:]+)(?:[:](\d+))? \w+", firstline)
if match is None:
return
host = match.group(1)
port = int(match.group(2) or 443)
cachebrowsed = False
try:
Host.get(url=host)
cachebrowsed = True
except Host.DoesNotExist:
pass
if cachebrowsed:
logging.info("[HTTPS] %s:%s <REJECTED>" % (host, port))
self.connection.close_local()
else:
logging.info("[HTTPS] %s:%s <PROXYING>" % (host, port))
return self._connect_upstream(host, port)
def _connect_upstream(self, host, port):
ip, cachebrowsed = dns.resolve_host(host)
if not ip:
return
# Return response to client
self.connection.send_local("HTTP/1.1 200 OK\r\n\r\n")
# Create remote socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Shouldn't use SSL, ssl is forwarded directly from the client
# sock = ssl.wrap_socket(sock)
sock.connect((ip, port))
self._upstream_started = True
self._host = host
# ! Ref to connection._buffer not updated
self._buffer = StringIO()
# !! Why does this line not work here?
# self.connection.send_local("HTTP/1.1 200 OK\r\n\r\n")
self.connection.start_remote(sock)
return True
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
import warnings
from json import JSONDecodeError
from typing import Dict, Optional, Union
from urllib.parse import parse_qsl, quote, unquote, urlencode, urlparse
from sqlalchemy import Boolean, Column, Integer, String, Text
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import reconstructor, synonym
from airflow.configuration import ensure_secrets_loaded
from airflow.exceptions import AirflowException, AirflowNotFoundException
from airflow.models.base import ID_LEN, Base
from airflow.models.crypto import get_fernet
from airflow.providers_manager import ProvidersManager
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.log.secrets_masker import mask_secret
from airflow.utils.module_loading import import_string
log = logging.getLogger(__name__)
def parse_netloc_to_hostname(*args, **kwargs):
"""This method is deprecated."""
warnings.warn("This method is deprecated.", DeprecationWarning)
return _parse_netloc_to_hostname(*args, **kwargs)
# Python automatically converts all letters to lowercase in hostname
# See: https://issues.apache.org/jira/browse/AIRFLOW-3615
def _parse_netloc_to_hostname(uri_parts):
"""Parse a URI string to get correct Hostname."""
hostname = unquote(uri_parts.hostname or '')
if '/' in hostname:
hostname = uri_parts.netloc
if "@" in hostname:
hostname = hostname.rsplit("@", 1)[1]
if ":" in hostname:
hostname = hostname.split(":", 1)[0]
hostname = unquote(hostname)
return hostname
class Connection(Base, LoggingMixin):
"""
Placeholder to store information about different database instances
connection information. The idea here is that scripts use references to
database instances (conn_id) instead of hard coding hostname, logins and
passwords when using operators or hooks.
.. seealso::
For more information on how to use this class, see: :doc:`/howto/connection`
:param conn_id: The connection ID.
:param conn_type: The connection type.
:param description: The connection description.
:param host: The host.
:param login: The login.
:param password: The password.
:param schema: The schema.
:param port: The port number.
:param extra: Extra metadata. Non-standard data such as private/SSH keys can be saved here. JSON
encoded object.
:param uri: URI address describing connection parameters.
"""
EXTRA_KEY = '__extra__'
__tablename__ = "connection"
id = Column(Integer(), primary_key=True)
conn_id = Column(String(ID_LEN), unique=True, nullable=False)
conn_type = Column(String(500), nullable=False)
description = Column(Text(5000))
host = Column(String(500))
schema = Column(String(500))
login = Column(String(500))
_password = Column('password', String(5000))
port = Column(Integer())
is_encrypted = Column(Boolean, unique=False, default=False)
is_extra_encrypted = Column(Boolean, unique=False, default=False)
_extra = Column('extra', Text())
def __init__(
self,
conn_id: Optional[str] = None,
conn_type: Optional[str] = None,
description: Optional[str] = None,
host: Optional[str] = None,
login: Optional[str] = None,
password: Optional[str] = None,
schema: Optional[str] = None,
port: Optional[int] = None,
extra: Optional[Union[str, dict]] = None,
uri: Optional[str] = None,
):
super().__init__()
self.conn_id = conn_id
self.description = description
if extra and not isinstance(extra, str):
extra = json.dumps(extra)
if uri and (conn_type or host or login or password or schema or port or extra):
raise AirflowException(
"You must create an object using the URI or individual values "
"(conn_type, host, login, password, schema, port or extra)."
"You can't mix these two ways to create this object."
)
if uri:
self._parse_from_uri(uri)
else:
self.conn_type = conn_type
self.host = host
self.login = login
self.password = password
self.schema = schema
self.port = port
self.extra = extra
if self.extra:
self._validate_extra(self.extra, self.conn_id)
if self.password:
mask_secret(self.password)
@staticmethod
def _validate_extra(extra, conn_id) -> None:
"""
Here we verify that ``extra`` is a JSON-encoded Python dict. From Airflow 3.0, we should no
longer suppress these errors but raise instead.
"""
if extra is None:
return None
try:
extra_parsed = json.loads(extra)
if not isinstance(extra_parsed, dict):
warnings.warn(
"Encountered JSON value in `extra` which does not parse as a dictionary in "
f"connection {conn_id!r}. From Airflow 3.0, the `extra` field must contain a JSON "
"representation of a Python dict.",
DeprecationWarning,
stacklevel=3,
)
except json.JSONDecodeError:
warnings.warn(
f"Encountered non-JSON in `extra` field for connection {conn_id!r}. Support for "
"non-JSON `extra` will be removed in Airflow 3.0",
DeprecationWarning,
stacklevel=2,
)
return None
@reconstructor
def on_db_load(self):
if self.password:
mask_secret(self.password)
def parse_from_uri(self, **uri):
"""This method is deprecated. Please use uri parameter in constructor."""
warnings.warn(
"This method is deprecated. Please use uri parameter in constructor.", DeprecationWarning
)
self._parse_from_uri(**uri)
def _parse_from_uri(self, uri: str):
uri_parts = urlparse(uri)
conn_type = uri_parts.scheme
if conn_type == 'postgresql':
conn_type = 'postgres'
elif '-' in conn_type:
conn_type = conn_type.replace('-', '_')
self.conn_type = conn_type
self.host = _parse_netloc_to_hostname(uri_parts)
quoted_schema = uri_parts.path[1:]
self.schema = unquote(quoted_schema) if quoted_schema else quoted_schema
self.login = unquote(uri_parts.username) if uri_parts.username else uri_parts.username
self.password = unquote(uri_parts.password) if uri_parts.password else uri_parts.password
self.port = uri_parts.port
if uri_parts.query:
query = dict(parse_qsl(uri_parts.query, keep_blank_values=True))
if self.EXTRA_KEY in query:
self.extra = query[self.EXTRA_KEY]
else:
self.extra = json.dumps(query)
def get_uri(self) -> str:
"""Return connection in URI format"""
if '_' in self.conn_type:
self.log.warning(
f"Connection schemes (type: {str(self.conn_type)}) "
f"shall not contain '_' according to RFC3986."
)
uri = f"{str(self.conn_type).lower().replace('_', '-')}://"
authority_block = ''
if self.login is not None:
authority_block += quote(self.login, safe='')
if self.password is not None:
authority_block += ':' + quote(self.password, safe='')
if authority_block > '':
authority_block += '@'
uri += authority_block
host_block = ''
if self.host:
host_block += quote(self.host, safe='')
if self.port:
if host_block > '':
host_block += f':{self.port}'
else:
host_block += f'@:{self.port}'
if self.schema:
host_block += f"/{quote(self.schema, safe='')}"
uri += host_block
if self.extra:
try:
query: Optional[str] = urlencode(self.extra_dejson)
except TypeError:
query = None
if query and self.extra_dejson == dict(parse_qsl(query, keep_blank_values=True)):
uri += '?' + query
else:
uri += '?' + urlencode({self.EXTRA_KEY: self.extra})
return uri
def get_password(self) -> Optional[str]:
"""Return encrypted password."""
if self._password and self.is_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
f"Can't decrypt encrypted password for login={self.login} "
f"FERNET_KEY configuration is missing"
)
return fernet.decrypt(bytes(self._password, 'utf-8')).decode()
else:
return self._password
def set_password(self, value: Optional[str]):
"""Encrypt password and set in object attribute."""
if value:
fernet = get_fernet()
self._password = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = fernet.is_encrypted
@declared_attr
def password(cls):
"""Password. The value is decrypted/encrypted when reading/setting the value."""
return synonym('_password', descriptor=property(cls.get_password, cls.set_password))
def get_extra(self) -> Dict:
"""Return encrypted extra-data."""
if self._extra and self.is_extra_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
f"Can't decrypt `extra` params for login={self.login}, "
f"FERNET_KEY configuration is missing"
)
extra_val = fernet.decrypt(bytes(self._extra, 'utf-8')).decode()
else:
extra_val = self._extra
if extra_val:
self._validate_extra(extra_val, self.conn_id)
return extra_val
def set_extra(self, value: str):
"""Encrypt extra-data and save in object attribute to object."""
if value:
fernet = get_fernet()
self._extra = fernet.encrypt(bytes(value, 'utf-8')).decode()
self._validate_extra(self._extra, self.conn_id)
self.is_extra_encrypted = fernet.is_encrypted
else:
self._extra = value
self.is_extra_encrypted = False
@declared_attr
def extra(cls):
"""Extra data. The value is decrypted/encrypted when reading/setting the value."""
return synonym('_extra', descriptor=property(cls.get_extra, cls.set_extra))
def rotate_fernet_key(self):
"""Encrypts data with a new key. See: :ref:`security/fernet`"""
fernet = get_fernet()
if self._password and self.is_encrypted:
self._password = fernet.rotate(self._password.encode('utf-8')).decode()
if self._extra and self.is_extra_encrypted:
self._extra = fernet.rotate(self._extra.encode('utf-8')).decode()
def get_hook(self, *, hook_params=None):
"""Return hook based on conn_type"""
hook = ProvidersManager().hooks.get(self.conn_type, None)
if hook is None:
raise AirflowException(f'Unknown hook type "{self.conn_type}"')
try:
hook_class = import_string(hook.hook_class_name)
except ImportError:
warnings.warn(
"Could not import %s when discovering %s %s",
hook.hook_class_name,
hook.hook_name,
hook.package_name,
)
raise
if hook_params is None:
hook_params = {}
return hook_class(**{hook.connection_id_attribute_name: self.conn_id}, **hook_params)
def __repr__(self):
return self.conn_id
def log_info(self):
"""
This method is deprecated. You can read each field individually or use the
default representation (`__repr__`).
"""
warnings.warn(
"This method is deprecated. You can read each field individually or "
"use the default representation (__repr__).",
DeprecationWarning,
stacklevel=2,
)
return (
f"id: {self.conn_id}. Host: {self.host}, Port: {self.port}, Schema: {self.schema}, "
f"Login: {self.login}, Password: {'XXXXXXXX' if self.password else None}, "
f"extra: {'XXXXXXXX' if self.extra_dejson else None}"
)
def debug_info(self):
"""
This method is deprecated. You can read each field individually or use the
default representation (`__repr__`).
"""
warnings.warn(
"This method is deprecated. You can read each field individually or "
"use the default representation (__repr__).",
DeprecationWarning,
stacklevel=2,
)
return (
f"id: {self.conn_id}. Host: {self.host}, Port: {self.port}, Schema: {self.schema}, "
f"Login: {self.login}, Password: {'XXXXXXXX' if self.password else None}, "
f"extra: {self.extra_dejson}"
)
def test_connection(self):
"""Calls out get_hook method and executes test_connection method on that."""
status, message = False, ''
try:
hook = self.get_hook()
if getattr(hook, 'test_connection', False):
status, message = hook.test_connection()
else:
message = (
f"Hook {hook.__class__.__name__} doesn't implement or inherit test_connection method"
)
except Exception as e:
message = str(e)
return status, message
@property
def extra_dejson(self) -> Dict:
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except JSONDecodeError:
self.log.exception("Failed parsing the json for conn_id %s", self.conn_id)
# Mask sensitive keys from this list
mask_secret(obj)
return obj
@classmethod
def get_connection_from_secrets(cls, conn_id: str) -> 'Connection':
"""
Get connection by conn_id.
:param conn_id: connection id
:return: connection
"""
for secrets_backend in ensure_secrets_loaded():
try:
conn = secrets_backend.get_connection(conn_id=conn_id)
if conn:
return conn
except Exception:
log.exception(
'Unable to retrieve connection from secrets backend (%s). '
'Checking subsequent secrets backend.',
type(secrets_backend).__name__,
)
raise AirflowNotFoundException(f"The conn_id `{conn_id}` isn't defined")
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'IpTcp.Directory' : {
'meta_info' : _MetaInfoClass('IpTcp.Directory',
False,
[
_MetaInfoClassMember('directoryname', ATTRIBUTE, 'str' , None, None,
[], [],
''' Directory name
''',
'directoryname',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('max-debug-files', ATTRIBUTE, 'int' , None, None,
[(1, 10000)], [],
''' Set number of Debug files
''',
'max_debug_files',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('max-file-size-files', ATTRIBUTE, 'int' , None, None,
[(1024, 4294967295)], [],
''' Set size of debug files in bytes
''',
'max_file_size_files',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'directory',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'IpTcp.Throttle' : {
'meta_info' : _MetaInfoClass('IpTcp.Throttle',
False,
[
_MetaInfoClassMember('tcpmaxthrottle', ATTRIBUTE, 'int' , None, None,
[(0, 100)], [],
''' Max throttle
''',
'tcpmaxthrottle',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('tcpmin-throttle', ATTRIBUTE, 'int' , None, None,
[(0, 100)], [],
''' Min throttle
''',
'tcpmin_throttle',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'throttle',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'IpTcp.NumThread' : {
'meta_info' : _MetaInfoClass('IpTcp.NumThread',
False,
[
_MetaInfoClassMember('tcp-in-q-threads', ATTRIBUTE, 'int' , None, None,
[(1, 16)], [],
''' InQ Threads
''',
'tcp_in_q_threads',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('tcp-out-q-threads', ATTRIBUTE, 'int' , None, None,
[(1, 16)], [],
''' OutQ Threads
''',
'tcp_out_q_threads',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'num-thread',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'IpTcp' : {
'meta_info' : _MetaInfoClass('IpTcp',
False,
[
_MetaInfoClassMember('accept-rate', ATTRIBUTE, 'int' , None, None,
[(1, 1000)], [],
''' TCP connection accept rate
''',
'accept_rate',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('directory', REFERENCE_CLASS, 'Directory' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'IpTcp.Directory',
[], [],
''' TCP directory details
''',
'directory',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('maximum-segment-size', ATTRIBUTE, 'int' , None, None,
[(68, 10000)], [],
''' TCP initial maximum segment size
''',
'maximum_segment_size',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('num-thread', REFERENCE_CLASS, 'NumThread' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'IpTcp.NumThread',
[], [],
''' TCP InQueue and OutQueue threads
''',
'num_thread',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('path-mtu-discovery', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Aging time; 0 for infinite, and range be (10,30)
''',
'path_mtu_discovery',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('receive-q', ATTRIBUTE, 'int' , None, None,
[(40, 800)], [],
''' TCP receive Queue Size
''',
'receive_q',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('selective-ack', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable TCP selective-ACK
''',
'selective_ack',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('syn-wait-time', ATTRIBUTE, 'int' , None, None,
[(5, 30)], [],
''' Time to wait on new TCP connections in seconds
''',
'syn_wait_time',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('throttle', REFERENCE_CLASS, 'Throttle' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'IpTcp.Throttle',
[], [],
''' Throttle TCP receive buffer (in percentage)
''',
'throttle',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('timestamp', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable TCP timestamp option
''',
'timestamp',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('window-size', ATTRIBUTE, 'int' , None, None,
[(2048, 65535)], [],
''' TCP receive window size (bytes)
''',
'window_size',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'ip-tcp',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Ipv4.SmallServers.TcpSmallServers' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Ipv4.SmallServers.TcpSmallServers',
False,
[
_MetaInfoClassMember('access-control-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access list
''',
'access_control_list_name',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('small-server', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Set number of allowable TCP small servers,
specify 0 for no-limit
''',
'small_server',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'tcp-small-servers',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Ipv4.SmallServers.UdpSmallServers' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Ipv4.SmallServers.UdpSmallServers',
False,
[
_MetaInfoClassMember('access-control-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Specify the access list
''',
'access_control_list_name',
'Cisco-IOS-XR-ip-udp-cfg', False),
_MetaInfoClassMember('small-server', ATTRIBUTE, 'int' , None, None,
[(0, 2147483647)], [],
''' Set number of allowable small servers, specify
0 for no-limit
''',
'small_server',
'Cisco-IOS-XR-ip-udp-cfg', False),
],
'Cisco-IOS-XR-ip-udp-cfg',
'udp-small-servers',
_yang_ns._namespaces['Cisco-IOS-XR-ip-udp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Ipv4.SmallServers' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Ipv4.SmallServers',
False,
[
_MetaInfoClassMember('tcp-small-servers', REFERENCE_CLASS, 'TcpSmallServers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Ipv4.SmallServers.TcpSmallServers',
[], [],
''' Describing TCP related IPV4 and IPV6 small
servers
''',
'tcp_small_servers',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('udp-small-servers', REFERENCE_CLASS, 'UdpSmallServers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Ipv4.SmallServers.UdpSmallServers',
[], [],
''' UDP small servers configuration
''',
'udp_small_servers',
'Cisco-IOS-XR-ip-udp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'small-servers',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Ipv4' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Ipv4',
False,
[
_MetaInfoClassMember('small-servers', REFERENCE_CLASS, 'SmallServers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Ipv4.SmallServers',
[], [],
''' Describing IPV4 and IPV6 small servers
''',
'small_servers',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'ipv4',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet.Tcp' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet.Tcp',
False,
[
_MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access list
''',
'access_list_name',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('maximum-server', ATTRIBUTE, 'int' , None, None,
[(1, 100)], [],
''' Set number of allowable servers
''',
'maximum_server',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'tcp',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet',
False,
[
_MetaInfoClassMember('tcp', REFERENCE_CLASS, 'Tcp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet.Tcp',
[], [],
''' TCP details
''',
'tcp',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'telnet',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp.Udp' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp.Udp',
False,
[
_MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access list
''',
'access_list_name',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('dscp-value', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Set IP DSCP (DiffServ CodePoint) for TFTP
Server Packets
''',
'dscp_value',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('home-directory', ATTRIBUTE, 'str' , None, None,
[], [],
''' Specify device name where file is read from (e
.g. flash:)
''',
'home_directory',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('maximum-server', ATTRIBUTE, 'int' , None, None,
[(0, 2147483647)], [],
''' Set number of allowable servers, 0 for
no-limit
''',
'maximum_server',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'udp',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp',
False,
[
_MetaInfoClassMember('udp', REFERENCE_CLASS, 'Udp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp.Udp',
[], [],
''' UDP details
''',
'udp',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'tftp',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Vrfs.Vrf.Ipv6' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Vrfs.Vrf.Ipv6',
False,
[
_MetaInfoClassMember('telnet', REFERENCE_CLASS, 'Telnet' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet',
[], [],
''' TELNET server configuration commands
''',
'telnet',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('tftp', REFERENCE_CLASS, 'Tftp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp',
[], [],
''' TFTP server configuration commands
''',
'tftp',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'ipv6',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet.Tcp' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet.Tcp',
False,
[
_MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access list
''',
'access_list_name',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('maximum-server', ATTRIBUTE, 'int' , None, None,
[(1, 100)], [],
''' Set number of allowable servers
''',
'maximum_server',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'tcp',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet',
False,
[
_MetaInfoClassMember('tcp', REFERENCE_CLASS, 'Tcp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet.Tcp',
[], [],
''' TCP details
''',
'tcp',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'telnet',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Tftp.Udp' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Tftp.Udp',
False,
[
_MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access list
''',
'access_list_name',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('dscp-value', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Set IP DSCP (DiffServ CodePoint) for TFTP
Server Packets
''',
'dscp_value',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('home-directory', ATTRIBUTE, 'str' , None, None,
[], [],
''' Specify device name where file is read from (e
.g. flash:)
''',
'home_directory',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('maximum-server', ATTRIBUTE, 'int' , None, None,
[(0, 2147483647)], [],
''' Set number of allowable servers, 0 for
no-limit
''',
'maximum_server',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'udp',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Tftp' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Tftp',
False,
[
_MetaInfoClassMember('udp', REFERENCE_CLASS, 'Udp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Tftp.Udp',
[], [],
''' UDP details
''',
'udp',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'tftp',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Vrfs.Vrf.Ipv4' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Vrfs.Vrf.Ipv4',
False,
[
_MetaInfoClassMember('telnet', REFERENCE_CLASS, 'Telnet' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet',
[], [],
''' TELNET server configuration commands
''',
'telnet',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('tftp', REFERENCE_CLASS, 'Tftp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Tftp',
[], [],
''' TFTP server configuration commands
''',
'tftp',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'ipv4',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Vrfs.Vrf' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Vrfs.Vrf',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Name of the VRF instance
''',
'vrf_name',
'Cisco-IOS-XR-ip-tcp-cfg', True),
_MetaInfoClassMember('ipv4', REFERENCE_CLASS, 'Ipv4' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Vrfs.Vrf.Ipv4',
[], [],
''' IPV4 related services
''',
'ipv4',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('ipv6', REFERENCE_CLASS, 'Ipv6' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Vrfs.Vrf.Ipv6',
[], [],
''' IPV6 related services
''',
'ipv6',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'vrf',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Vrfs' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Vrfs',
False,
[
_MetaInfoClassMember('vrf', REFERENCE_LIST, 'Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Vrfs.Vrf',
[], [],
''' VRF specific data
''',
'vrf',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'vrfs',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Ipv6.SmallServers.TcpSmallServers' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Ipv6.SmallServers.TcpSmallServers',
False,
[
_MetaInfoClassMember('access-control-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access list
''',
'access_control_list_name',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('small-server', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Set number of allowable TCP small servers,
specify 0 for no-limit
''',
'small_server',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'tcp-small-servers',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Ipv6.SmallServers' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Ipv6.SmallServers',
False,
[
_MetaInfoClassMember('tcp-small-servers', REFERENCE_CLASS, 'TcpSmallServers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Ipv6.SmallServers.TcpSmallServers',
[], [],
''' Describing TCP related IPV4 and IPV6 small
servers
''',
'tcp_small_servers',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'small-servers',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services.Ipv6' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services.Ipv6',
False,
[
_MetaInfoClassMember('small-servers', REFERENCE_CLASS, 'SmallServers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Ipv6.SmallServers',
[], [],
''' Describing IPV4 and IPV6 small servers
''',
'small_servers',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'ipv6',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd.Services' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd.Services',
False,
[
_MetaInfoClassMember('ipv4', REFERENCE_CLASS, 'Ipv4' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Ipv4',
[], [],
''' IPV4 related services
''',
'ipv4',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('ipv6', REFERENCE_CLASS, 'Ipv6' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Ipv6',
[], [],
''' IPV6 related services
''',
'ipv6',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('vrfs', REFERENCE_CLASS, 'Vrfs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services.Vrfs',
[], [],
''' VRF table
''',
'vrfs',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'services',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.Cinetd' : {
'meta_info' : _MetaInfoClass('Ip.Cinetd',
False,
[
_MetaInfoClassMember('services', REFERENCE_CLASS, 'Services' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd.Services',
[], [],
''' Describing services of cinetd
''',
'services',
'Cisco-IOS-XR-ip-tcp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'cinetd',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.ForwardProtocol.Udp.Ports.Port' : {
'meta_info' : _MetaInfoClass('Ip.ForwardProtocol.Udp.Ports.Port',
False,
[
_MetaInfoClassMember('port-id', ATTRIBUTE, 'int' , None, None,
[(1, 65535)], [],
''' Port number
''',
'port_id',
'Cisco-IOS-XR-ip-udp-cfg', True),
_MetaInfoClassMember('enable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Specify 'false' to disable well-known ports
Domain (53), TFTP (69), NameServer (42),
TACACS (49), NetBiosNameService (137), or
NetBiosDatagramService (138). Specify
'true' to enable non well-known ports.
''',
'enable',
'Cisco-IOS-XR-ip-udp-cfg', False),
],
'Cisco-IOS-XR-ip-udp-cfg',
'port',
_yang_ns._namespaces['Cisco-IOS-XR-ip-udp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.ForwardProtocol.Udp.Ports' : {
'meta_info' : _MetaInfoClass('Ip.ForwardProtocol.Udp.Ports',
False,
[
_MetaInfoClassMember('port', REFERENCE_LIST, 'Port' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.ForwardProtocol.Udp.Ports.Port',
[], [],
''' Well-known ports are enabled by default and
non well-known ports are disabled by default.
It is not allowed to configure the default.
''',
'port',
'Cisco-IOS-XR-ip-udp-cfg', False),
],
'Cisco-IOS-XR-ip-udp-cfg',
'ports',
_yang_ns._namespaces['Cisco-IOS-XR-ip-udp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.ForwardProtocol.Udp' : {
'meta_info' : _MetaInfoClass('Ip.ForwardProtocol.Udp',
False,
[
_MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable IP Forward Protocol UDP
''',
'disable',
'Cisco-IOS-XR-ip-udp-cfg', False),
_MetaInfoClassMember('ports', REFERENCE_CLASS, 'Ports' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.ForwardProtocol.Udp.Ports',
[], [],
''' Port configuration
''',
'ports',
'Cisco-IOS-XR-ip-udp-cfg', False),
],
'Cisco-IOS-XR-ip-udp-cfg',
'udp',
_yang_ns._namespaces['Cisco-IOS-XR-ip-udp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip.ForwardProtocol' : {
'meta_info' : _MetaInfoClass('Ip.ForwardProtocol',
False,
[
_MetaInfoClassMember('udp', REFERENCE_CLASS, 'Udp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.ForwardProtocol.Udp',
[], [],
''' Packets to a specific UDP port
''',
'udp',
'Cisco-IOS-XR-ip-udp-cfg', False),
],
'Cisco-IOS-XR-ip-udp-cfg',
'forward-protocol',
_yang_ns._namespaces['Cisco-IOS-XR-ip-udp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
'Ip' : {
'meta_info' : _MetaInfoClass('Ip',
False,
[
_MetaInfoClassMember('cinetd', REFERENCE_CLASS, 'Cinetd' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.Cinetd',
[], [],
''' Cinetd configuration data
''',
'cinetd',
'Cisco-IOS-XR-ip-tcp-cfg', False),
_MetaInfoClassMember('forward-protocol', REFERENCE_CLASS, 'ForwardProtocol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg', 'Ip.ForwardProtocol',
[], [],
''' Controls forwarding of physical and directed IP
broadcasts
''',
'forward_protocol',
'Cisco-IOS-XR-ip-udp-cfg', False),
],
'Cisco-IOS-XR-ip-tcp-cfg',
'ip',
_yang_ns._namespaces['Cisco-IOS-XR-ip-tcp-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_tcp_cfg'
),
},
}
_meta_table['IpTcp.Directory']['meta_info'].parent =_meta_table['IpTcp']['meta_info']
_meta_table['IpTcp.Throttle']['meta_info'].parent =_meta_table['IpTcp']['meta_info']
_meta_table['IpTcp.NumThread']['meta_info'].parent =_meta_table['IpTcp']['meta_info']
_meta_table['Ip.Cinetd.Services.Ipv4.SmallServers.TcpSmallServers']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Ipv4.SmallServers']['meta_info']
_meta_table['Ip.Cinetd.Services.Ipv4.SmallServers.UdpSmallServers']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Ipv4.SmallServers']['meta_info']
_meta_table['Ip.Cinetd.Services.Ipv4.SmallServers']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Ipv4']['meta_info']
_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet.Tcp']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet']['meta_info']
_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp.Udp']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp']['meta_info']
_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Telnet']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv6']['meta_info']
_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv6.Tftp']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv6']['meta_info']
_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet.Tcp']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet']['meta_info']
_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Tftp.Udp']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Tftp']['meta_info']
_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Telnet']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv4']['meta_info']
_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv4.Tftp']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv4']['meta_info']
_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv6']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Vrfs.Vrf']['meta_info']
_meta_table['Ip.Cinetd.Services.Vrfs.Vrf.Ipv4']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Vrfs.Vrf']['meta_info']
_meta_table['Ip.Cinetd.Services.Vrfs.Vrf']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Vrfs']['meta_info']
_meta_table['Ip.Cinetd.Services.Ipv6.SmallServers.TcpSmallServers']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Ipv6.SmallServers']['meta_info']
_meta_table['Ip.Cinetd.Services.Ipv6.SmallServers']['meta_info'].parent =_meta_table['Ip.Cinetd.Services.Ipv6']['meta_info']
_meta_table['Ip.Cinetd.Services.Ipv4']['meta_info'].parent =_meta_table['Ip.Cinetd.Services']['meta_info']
_meta_table['Ip.Cinetd.Services.Vrfs']['meta_info'].parent =_meta_table['Ip.Cinetd.Services']['meta_info']
_meta_table['Ip.Cinetd.Services.Ipv6']['meta_info'].parent =_meta_table['Ip.Cinetd.Services']['meta_info']
_meta_table['Ip.Cinetd.Services']['meta_info'].parent =_meta_table['Ip.Cinetd']['meta_info']
_meta_table['Ip.ForwardProtocol.Udp.Ports.Port']['meta_info'].parent =_meta_table['Ip.ForwardProtocol.Udp.Ports']['meta_info']
_meta_table['Ip.ForwardProtocol.Udp.Ports']['meta_info'].parent =_meta_table['Ip.ForwardProtocol.Udp']['meta_info']
_meta_table['Ip.ForwardProtocol.Udp']['meta_info'].parent =_meta_table['Ip.ForwardProtocol']['meta_info']
_meta_table['Ip.Cinetd']['meta_info'].parent =_meta_table['Ip']['meta_info']
_meta_table['Ip.ForwardProtocol']['meta_info'].parent =_meta_table['Ip']['meta_info']
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Script to plot tasks from profiling data.
This script requires the matplotlib and numpy Python modules.
"""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import glob
import os
import sys
import numpy # pylint: disable=import-error
from matplotlib import pyplot # pylint: disable=import-error
class TaskMeasurements(object):
"""Measurements of a task.
Attributes:
completed_time (float): time when the task was completed by the foreman.
created_time (float): time when the task was created by the foreman.
merging_duration (float): time it took the foreman to merge the task.
merging_time (float): time when the task was started to be merged by
the foreman.
pending_merge (float): time when the task was scheduled to be merged by
the foreman.
processed_time (float): time when the task was processed according to
the foreman.
processing_duration (float): time it took the worker to process the task.
processing_time (float): time when the task started to be processed by
the worker.
scheduled_time (float): time when the task was scheduled onto the task
queue by the foreman.
"""
def __init__(self):
"""Initializes a task measurement."""
super(TaskMeasurements, self).__init__()
self.completed_time = None
self.created_time = None
self.merging_duration = None
self.merging_time = None
self.pending_merge_time = None
self.processed_time = None
self.processing_duration = None
self.processing_time = None
self.scheduled_time = None
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Plots memory usage from profiling data.'))
argument_parser.add_argument(
'--output', dest='output_file', type=str, help=(
'path of the output file to write the graph to instead of using '
'interactive mode. The output format deduced from the extension '
'of the filename.'))
argument_parser.add_argument(
'profile_path', type=str, help=(
'path to the directory containing the profiling data.'))
options = argument_parser.parse_args()
if not os.path.isdir(options.profile_path):
print('No such directory: {0:s}'.format(options.profile_path))
return False
names = ['time', 'identifier', 'status']
measurements = {}
glob_expression = os.path.join(options.profile_path, 'tasks-*.csv.gz')
for csv_file_name in glob.glob(glob_expression):
data = numpy.genfromtxt(
csv_file_name, delimiter='\t', dtype=None, encoding='utf-8',
names=names, skip_header=1)
label = os.path.basename(csv_file_name)
label = label.replace('tasks-', '').replace('.csv.gz', '')
for time, identifier, status in data:
if identifier not in measurements:
measurements[identifier] = TaskMeasurements()
task_measurement = measurements[identifier]
if status == 'completed':
task_measurement.completed_time = time
task_measurement.merging_duration = time - task_measurement.merging_time
elif status == 'created':
task_measurement.created_time = time
# TODO: add support for:
# elif status == 'merge_on_hold':
# elif status == 'merge_resumed':
elif status == 'merge_started':
task_measurement.merging_time = time
elif status == 'pending_merge':
task_measurement.pending_merge_time = time
elif status == 'processed':
task_measurement.processed_time = time
elif status == 'processing_started':
task_measurement.processing_time = time
elif status == 'processing_completed':
task_measurement.processing_duration = (
time - task_measurement.processing_time)
elif status == 'scheduled':
task_measurement.scheduled_time = time
before_pending_merge_duration = {}
before_queued_duration = {}
merging_duration = {}
pending_merge_duration = {}
processing_duration = {}
queued_duration = {}
for identifier, task_measurement in measurements.items():
before_pending_merge_duration[task_measurement.scheduled_time] = (
task_measurement.pending_merge_time - (
task_measurement.processing_time +
task_measurement.processing_duration))
before_queued_duration[task_measurement.scheduled_time] = (
task_measurement.scheduled_time - task_measurement.created_time)
merging_duration[task_measurement.merging_time] = (
task_measurement.merging_duration)
pending_merge_duration[task_measurement.processing_time] = (
task_measurement.merging_time - task_measurement.pending_merge_time)
processing_duration[task_measurement.processing_time] = (
task_measurement.processing_duration)
queued_duration[task_measurement.scheduled_time] = (
task_measurement.processing_time - task_measurement.scheduled_time)
if data.size > 0:
keys = sorted(before_pending_merge_duration.keys())
values = [before_pending_merge_duration[key] for key in keys]
pyplot.plot(keys, values, label='Before pending merge')
keys = sorted(before_queued_duration.keys())
values = [before_queued_duration[key] for key in keys]
pyplot.plot(keys, values, label='Before queued')
keys = sorted(merging_duration.keys())
values = [merging_duration[key] for key in keys]
pyplot.plot(keys, values, label='Merging')
keys = sorted(pending_merge_duration.keys())
values = [pending_merge_duration[key] for key in keys]
pyplot.plot(keys, values, label='Pending merge')
keys = sorted(processing_duration.keys())
values = [processing_duration[key] for key in keys]
pyplot.plot(keys, values, label='Processing')
keys = sorted(queued_duration.keys())
values = [queued_duration[key] for key in keys]
pyplot.plot(keys, values, label='Queued')
pyplot.title('Task status duration')
pyplot.xlabel('Time')
pyplot.xscale('linear')
pyplot.ylabel('Duration')
pyplot.yscale('linear')
pyplot.legend()
if options.output_file:
pyplot.savefig(options.output_file)
else:
pyplot.show()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
|
#!/usr/bin/env python
'''
Model code for managing rectangular and hexagonal maps
======================================================
This module provides classes for managing rectangular and hexagonal maps.
---------------
Getting Started
---------------
You may create a map interactively and query it:
TBD
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import math
import xml.dom
import xml.dom.minidom
from resource import Resource, register_factory
from scene2d.drawable import *
@register_factory('rectmap')
def rectmap_factory(resource, tag):
width, height = map(int, tag.getAttribute('tile_size').split('x'))
origin = None
if tag.hasAttribute('origin'):
origin = map(int, tag.getAttribute('origin').split(','))
id = tag.getAttribute('id')
# now load the columns
cells = []
for i, column in enumerate(tag.getElementsByTagName('column')):
c = []
cells.append(c)
for j, cell in enumerate(column.getElementsByTagName('cell')):
tile = cell.getAttribute('tile')
if tile: tile = resource.get_resource(tile)
else: tile = None
properties = resource.handle_properties(cell)
c.append(RectCell(i, j, width, height, properties, tile))
m = RectMap(id, width, height, cells, origin)
resource.add_resource(id, m)
return m
@register_factory('hexmap')
def hexmap_factory(resource, tag):
height = int(tag.getAttribute('tile_height'))
width = hex_width(height)
origin = None
if tag.hasAttribute('origin'):
origin = map(int, tag.getAttribute('origin').split(','))
id = tag.getAttribute('id')
# now load the columns
cells = []
for i, column in enumerate(tag.getElementsByTagName('column')):
c = []
cells.append(c)
for j, cell in enumerate(column.getElementsByTagName('cell')):
tile = cell.getAttribute('tile')
if tile: tile = resource.get_resource(tile)
else: tile = None
properties = resource.handle_properties(tag)
c.append(HexCell(i, j, height, properties, tile))
m = HexMap(id, width, cells, origin)
resource.add_resource(id, m)
return m
def hex_width(height):
'''Determine a regular hexagon's width given its height.
'''
return int(height / math.sqrt(3)) * 2
class Map(object):
'''Base class for Maps.
Both rect and hex maps have the following attributes:
id -- identifies the map in XML and Resources
(width, height) -- size of map in cells
(pxw, pxh) -- size of map in pixels
(tw, th) -- size of each cell in pixels
(x, y, z) -- offset of map top left from origin in pixels
cells -- array [x][y] of Cell instances
'''
class RegularTesselationMap(Map):
'''A class of Map that has a regular array of Cells.
'''
def get_cell(self, x, y):
''' Return Cell at cell pos=(x,y).
Return None if out of bounds.'''
if x < 0 or y < 0:
return None
try:
return self.cells[x][y]
except IndexError:
return None
class RectMap(RegularTesselationMap):
'''Rectangular map.
Cells are stored in column-major order with y increasing up,
allowing [x][y] addressing:
+---+---+---+
| d | e | f |
+---+---+---+
| a | b | c |
+---+---+---+
Thus cells = [['a', 'd'], ['b', 'e'], ['c', 'f']]
and cells[0][1] = 'd'
'''
def __init__(self, id, tw, th, cells, origin=None):
self.id = id
self.tw, self.th = tw, th
if origin is None:
origin = (0, 0, 0)
self.x, self.y, self.z = origin
self.cells = cells
self.pxw = len(cells) * tw
self.pxh = len(cells[0]) * th
def get_in_region(self, x1, y1, x2, y2):
'''Return cells (in [column][row]) that are within the
pixel bounds specified by the bottom-left (x1, y1) and top-right
(x2, y2) corners.
'''
x1 = max(0, x1 // self.tw)
y1 = max(0, y1 // self.th)
x2 = min(len(self.cells), x2 // self.tw + 1)
y2 = min(len(self.cells[0]), y2 // self.th + 1)
return [self.cells[x][y] for x in range(x1, x2) for y in range(y1, y2)]
def get(self, x, y):
''' Return Cell at pixel px=(x,y).
Return None if out of bounds.'''
return self.get_cell(x // self.tw, y // self.th)
UP = (0, 1)
DOWN = (0, -1)
LEFT = (-1, 0)
RIGHT = (1, 0)
def get_neighbor(self, cell, direction):
'''Get the neighbor Cell in the given direction (dx, dy) which
is one of self.UP, self.DOWN, self.LEFT or self.RIGHT.
Returns None if out of bounds.
'''
dx, dy = direction
return self.get_cell(cell.x + dx, cell.y + dy)
@classmethod
def load_xml(cls, filename, id):
'''Load a map from the indicated XML file.
Return a Map instance.'''
return Resource.load(filename)[id]
class Cell(Drawable):
'''Base class for cells from rect and hex maps.
Common attributes:
x, y -- top-left coordinate
width, height -- dimensions
properties -- arbitrary properties
cell -- cell from the Map's cells
'''
def __init__(self, x, y, width, height, properties, tile):
super(Cell, self).__init__()
self.width, self.height = width, height
self.x, self.y = x, y
self.properties = properties
self.tile = tile
if tile is not None:
# pre-calculate the style to force creation of _style
self.get_style()
def __repr__(self):
return '<%s object at 0x%x (%g, %g) properties=%r tile=%r>'%(
self.__class__.__name__, id(self), self.x, self.y,
self.properties, self.tile)
def get_style(self):
if self.tile is None: return None
return super(Cell, self).get_style()
def get_drawstyle(self):
'''Get the possibly-affected style from the tile. Adjust for this
cell's position.
'''
style = self.tile.get_style().copy()
x, y = self.get_origin()
style.x, style.y = style.x + x, style.y + y
return style
class RectCell(Cell):
'''A rectangular cell from a Map.
Read-only attributes:
top -- y extent
bottom -- y extent
left -- x extent
right -- x extent
origin -- (x, y) of bottom-left corner
center -- (x, y)
topleft -- (x, y) of top-left corner
topright -- (x, y) of top-right corner
bottomleft -- (x, y) of bottom-left corner
bottomright -- (x, y) of bottom-right corner
midtop -- (x, y) of middle of top side
midbottom -- (x, y) of middle of bottom side
midleft -- (x, y) of middle of left side
midright -- (x, y) of middle of right side
'''
def get_origin(self):
return self.x * self.width, self.y * self.height
origin = property(get_origin)
# ro, side in pixels, y extent
def get_top(self):
return (self.y + 1) * self.height
top = property(get_top)
# ro, side in pixels, y extent
def get_bottom(self):
return self.y * self.height
bottom = property(get_bottom)
# ro, in pixels, (x, y)
def get_center(self):
return (self.x * self.width + self.width // 2,
self.y * self.height + self.height // 2)
center = property(get_center)
# ro, mid-point in pixels, (x, y)
def get_midtop(self):
return (self.x * self.width + self.width // 2,
(self.y + 1) * self.height)
midtop = property(get_midtop)
# ro, mid-point in pixels, (x, y)
def get_midbottom(self):
return (self.x * self.width + self.width // 2, self.y * self.height)
midbottom = property(get_midbottom)
# ro, side in pixels, x extent
def get_left(self):
return self.x * self.width
left = property(get_left)
# ro, side in pixels, x extent
def get_right(self):
return (self.x + 1) * self.width
right = property(get_right)
# ro, corner in pixels, (x, y)
def get_topleft(self):
return (self.x * self.width, (self.y + 1) * self.height)
topleft = property(get_topleft)
# ro, corner in pixels, (x, y)
def get_topright(self):
return ((self.x + 1) * self.width, (self.y + 1) * self.height)
topright = property(get_topright)
# ro, corner in pixels, (x, y)
def get_bottomleft(self):
return (self.x * self.height, self.y * self.height)
bottomleft = property(get_bottomleft)
origin = property(get_bottomleft)
# ro, corner in pixels, (x, y)
def get_bottomright(self):
return ((self.x + 1) * self.width, self.y * self.height)
bottomright = property(get_bottomright)
# ro, mid-point in pixels, (x, y)
def get_midleft(self):
return (self.x * self.width, self.y * self.height + self.height // 2)
midleft = property(get_midleft)
# ro, mid-point in pixels, (x, y)
def get_midright(self):
return ((self.x + 1) * self.width,
self.y * self.height + self.height // 2)
midright = property(get_midright)
class HexMap(RegularTesselationMap):
'''Map with flat-top, regular hexagonal cells.
Additional attributes extending MapBase:
edge_length -- length of an edge in pixels
Hexmaps store their cells in an offset array, column-major with y
increasing up, such that a map:
/d\ /h\
/b\_/f\_/
\_/c\_/g\
/a\_/e\_/
\_/ \_/
has cells = [['a', 'b'], ['c', 'd'], ['e', 'f'], ['g', 'h']]
'''
def __init__(self, id, th, cells, origin=None):
self.id = id
self.th = th
if origin is None:
origin = (0, 0, 0)
self.x, self.y, self.z = origin
self.cells = cells
# figure some convenience values
s = self.edge_length = int(th / math.sqrt(3))
self.tw = self.edge_length * 2
# now figure map dimensions
width = len(cells); height = len(cells[0])
self.pxw = self.tw + (width - 1) * (s + s // 2)
self.pxh = height * self.th
if not width % 2:
self.pxh += (th // 2)
def get_in_region(self, x1, y1, x2, y2):
'''Return cells (in [column][row]) that are within the pixel bounds
specified by the bottom-left (x1, y1) and top-right (x2, y2) corners.
'''
col_width = self.tw // 2 + self.tw // 4
x1 = max(0, x1 // col_width)
y1 = max(0, y1 // self.th - 1)
x2 = min(len(self.cells), x2 // col_width + 1)
y2 = min(len(self.cells[0]), y2 // self.th + 1)
return [self.cells[x][y] for x in range(x1, x2) for y in range(y1, y2)]
def get(self, x, y):
'''Get the Cell at pixel px=(x,y).
Return None if out of bounds.'''
s = self.edge_length
# map is divided into columns of
# s/2 (shared), s, s/2(shared), s, s/2 (shared), ...
x = x // (s/2 + s)
if x % 2:
# every second cell is up one
y -= self.th // 2
y = y // self.th
return self.get_cell(x, y)
UP = 'up'
DOWN = 'down'
UP_LEFT = 'up left'
UP_RIGHT = 'up right'
DOWN_LEFT = 'down left'
DOWN_RIGHT = 'down right'
def get_neighbor(self, cell, direction):
'''Get the neighbor HexCell in the given direction which
is one of self.UP, self.DOWN, self.UP_LEFT, self.UP_RIGHT,
self.DOWN_LEFT or self.DOWN_RIGHT.
Return None if out of bounds.
'''
if direction is self.UP:
return self.get_cell(cell.x, cell.y + 1)
elif direction is self.DOWN:
return self.get_cell(cell.x, cell.y - 1)
elif direction is self.UP_LEFT:
if cell.x % 2:
return self.get_cell(cell.x - 1, cell.y + 1)
else:
return self.get_cell(cell.x - 1, cell.y)
elif direction is self.UP_RIGHT:
if cell.x % 2:
return self.get_cell(cell.x + 1, cell.y + 1)
else:
return self.get_cell(cell.x + 1, cell.y)
elif direction is self.DOWN_LEFT:
if cell.x % 2:
return self.get_cell(cell.x - 1, cell.y)
else:
return self.get_cell(cell.x - 1, cell.y - 1)
elif direction is self.DOWN_RIGHT:
if cell.x % 2:
return self.get_cell(cell.x + 1, cell.y)
else:
return self.get_cell(cell.x + 1, cell.y - 1)
else:
raise ValueError, 'Unknown direction %r'%direction
# Note that we always add below (not subtract) so that we can try to
# avoid accumulation errors due to rounding ints. We do this so
# we can each point at the same position as a neighbor's corresponding
# point.
class HexCell(Cell):
'''A flat-top, regular hexagon cell from a HexMap.
Read-only attributes:
top -- y extent
bottom -- y extent
left -- (x, y) of left corner
right -- (x, y) of right corner
center -- (x, y)
origin -- (x, y) of bottom-left corner of bounding rect
topleft -- (x, y) of top-left corner
topright -- (x, y) of top-right corner
bottomleft -- (x, y) of bottom-left corner
bottomright -- (x, y) of bottom-right corner
midtop -- (x, y) of middle of top side
midbottom -- (x, y) of middle of bottom side
midtopleft -- (x, y) of middle of left side
midtopright -- (x, y) of middle of right side
midbottomleft -- (x, y) of middle of left side
midbottomright -- (x, y) of middle of right side
'''
def __init__(self, x, y, height, properties, tile):
width = hex_width(height)
Cell.__init__(self, x, y, width, height, properties, tile)
def get_origin(self):
x = self.x * (self.width / 2 + self.width // 4)
y = self.y * self.height
if self.x % 2:
y += self.height // 2
return (x, y)
origin = property(get_origin)
# ro, side in pixels, y extent
def get_top(self):
y = self.get_origin()[1]
return y + self.height
top = property(get_top)
# ro, side in pixels, y extent
def get_bottom(self):
return self.get_origin()[1]
bottom = property(get_bottom)
# ro, in pixels, (x, y)
def get_center(self):
x, y = self.get_origin()
return (x + self.width // 2, y + self.height // 2)
center = property(get_center)
# ro, mid-point in pixels, (x, y)
def get_midtop(self):
x, y = self.get_origin()
return (x + self.width // 2, y + self.height)
midtop = property(get_midtop)
# ro, mid-point in pixels, (x, y)
def get_midbottom(self):
x, y = self.get_origin()
return (x + self.width // 2, y)
midbottom = property(get_midbottom)
# ro, side in pixels, x extent
def get_left(self):
x, y = self.get_origin()
return (x, y + self.height // 2)
left = property(get_left)
# ro, side in pixels, x extent
def get_right(self):
x, y = self.get_origin()
return (x + self.width, y + self.height // 2)
right = property(get_right)
# ro, corner in pixels, (x, y)
def get_topleft(self):
x, y = self.get_origin()
return (x + self.width // 4, y + self.height)
topleft = property(get_topleft)
# ro, corner in pixels, (x, y)
def get_topright(self):
x, y = self.get_origin()
return (x + self.width // 2 + self.width // 4, y + self.height)
topright = property(get_topright)
# ro, corner in pixels, (x, y)
def get_bottomleft(self):
x, y = self.get_origin()
return (x + self.width // 4, y)
bottomleft = property(get_bottomleft)
# ro, corner in pixels, (x, y)
def get_bottomright(self):
x, y = self.get_origin()
return (x + self.width // 2 + self.width // 4, y)
bottomright = property(get_bottomright)
# ro, middle of side in pixels, (x, y)
def get_midtopleft(self):
x, y = self.get_origin()
return (x + self.width // 8, y + self.height // 2 + self.height // 4)
midtopleft = property(get_midtopleft)
# ro, middle of side in pixels, (x, y)
def get_midtopright(self):
x, y = self.get_origin()
return (x + self.width // 2 + self.width // 4 + self.width // 8,
y + self.height // 2 + self.height // 4)
midtopright = property(get_midtopright)
# ro, middle of side in pixels, (x, y)
def get_midbottomleft(self):
x, y = self.get_origin()
return (x + self.width // 8, y + self.height // 4)
midbottomleft = property(get_midbottomleft)
# ro, middle of side in pixels, (x, y)
def get_midbottomright(self):
x, y = self.get_origin()
return (x + self.width // 2 + self.width // 4 + self.width // 8,
y + self.height // 4)
midbottomright = property(get_midbottomright)
|
|
# -*- coding: utf-8 -*-
from bson import ObjectId
import eve
import json
from eve import Eve
from eve.auth import BasicAuth, TokenAuth, HMACAuth
from eve.tests import TestBase
from eve.tests.test_settings import MONGO_DBNAME
class ValidBasicAuth(BasicAuth):
def __init__(self):
self.request_auth_value = 'admin'
super(ValidBasicAuth, self).__init__()
def check_auth(self, username, password, allowed_roles, resource, method):
self.set_request_auth_value(self.request_auth_value)
return username == 'admin' and password == 'secret' and \
(allowed_roles == ['admin'] if allowed_roles else True)
class BadBasicAuth(BasicAuth):
pass
class ValidTokenAuth(TokenAuth):
def check_auth(self, token, allowed_roles, resource, method):
return token == 'test_token' and (allowed_roles == ['admin'] if
allowed_roles else True)
class ValidHMACAuth(HMACAuth):
def check_auth(self, userid, hmac_hash, headers, data, allowed_roles,
resource, method):
return userid == 'admin' and hmac_hash == 'secret' and \
(allowed_roles == ['admin'] if allowed_roles else True)
class BadHMACAuth(HMACAuth):
pass
class TestBasicAuth(TestBase):
def setUp(self):
super(TestBasicAuth, self).setUp()
self.app = Eve(settings=self.settings_file, auth=ValidBasicAuth)
self.test_client = self.app.test_client()
self.content_type = ('Content-Type', 'application/json')
self.valid_auth = [('Authorization', 'Basic YWRtaW46c2VjcmV0'),
self.content_type]
self.invalid_auth = [('Authorization', 'Basic IDontThinkSo'),
self.content_type]
for _, schema in self.app.config['DOMAIN'].items():
schema['allowed_roles'] = ['admin']
schema['allowed_item_roles'] = ['admin']
self.app.set_defaults()
def test_custom_auth(self):
self.assertTrue(isinstance(self.app.auth, ValidBasicAuth))
def test_restricted_home_access(self):
r = self.test_client.get('/')
self.assert401(r.status_code)
def test_restricted_resource_access(self):
r = self.test_client.get(self.known_resource_url)
self.assert401(r.status_code)
r = self.test_client.post(self.known_resource_url)
self.assert401(r.status_code)
r = self.test_client.delete(self.known_resource_url)
self.assert401(r.status_code)
def test_restricted_item_access(self):
r = self.test_client.get(self.item_id_url)
self.assert401(r.status_code)
r = self.test_client.patch(self.item_id_url)
self.assert401(r.status_code)
r = self.test_client.delete(self.item_id_url)
self.assert401(r.status_code)
def test_authorized_home_access(self):
r = self.test_client.get('/', headers=self.valid_auth)
self.assert200(r.status_code)
def test_authorized_resource_access(self):
r = self.test_client.get(self.known_resource_url,
headers=self.valid_auth)
self.assert200(r.status_code)
r = self.test_client.post(self.known_resource_url,
data=json.dumps({"k": "value"}),
headers=self.valid_auth)
self.assert400(r.status_code)
r = self.test_client.delete(self.known_resource_url,
headers=self.valid_auth)
self.assert200(r.status_code)
def test_authorized_item_access(self):
r = self.test_client.get(self.item_id_url, headers=self.valid_auth)
self.assert200(r.status_code)
r = self.test_client.patch(self.item_id_url,
data=json.dumps({"k": "value"}),
headers=self.valid_auth)
self.assert403(r.status_code)
r = self.test_client.delete(self.item_id_url, headers=self.valid_auth)
self.assert403(r.status_code)
def test_unauthorized_home_access(self):
r = self.test_client.get('/', headers=self.invalid_auth)
self.assert401(r.status_code)
def test_unauthorized_resource_access(self):
r = self.test_client.get(self.known_resource_url,
headers=self.invalid_auth)
self.assert401(r.status_code)
r = self.test_client.post(self.known_resource_url,
headers=self.invalid_auth)
self.assert401(r.status_code)
r = self.test_client.delete(self.known_resource_url,
headers=self.invalid_auth)
self.assert401(r.status_code)
def test_unauthorized_item_access(self):
r = self.test_client.get(self.item_id_url, headers=self.invalid_auth)
self.assert401(r.status_code)
r = self.test_client.patch(self.item_id_url, headers=self.invalid_auth)
self.assert401(r.status_code)
r = self.test_client.delete(self.item_id_url,
headers=self.invalid_auth)
self.assert401(r.status_code)
def test_home_public_methods(self):
self.app.config['PUBLIC_METHODS'] = ['GET']
r = self.test_client.get('/')
self.assert200(r.status_code)
self.test_restricted_resource_access()
self.test_restricted_item_access()
def test_public_methods_resource(self):
self.app.config['PUBLIC_METHODS'] = ['GET']
domain = self.app.config['DOMAIN']
for resource, settings in domain.items():
del(settings['public_methods'])
self.app.set_defaults()
del(domain['peopleinvoices'])
for resource in domain:
url = self.app.config['URLS'][resource]
r = self.test_client.get(url)
self.assert200(r.status_code)
r = self.test_client.post(url, data={'key1': 'value1'})
self.assert401or405(r.status_code)
r = self.test_client.delete(url)
self.assert401or405(r.status_code)
self.test_restricted_item_access()
def test_public_methods_but_locked_resource(self):
self.app.config['PUBLIC_METHODS'] = ['GET']
domain = self.app.config['DOMAIN']
for _, settings in domain.items():
del(settings['public_methods'])
self.app.set_defaults()
domain[self.known_resource]['public_methods'] = []
r = self.test_client.get(self.known_resource_url)
self.assert401(r.status_code)
def test_public_methods_but_locked_item(self):
self.app.config['PUBLIC_ITEM_METHODS'] = ['GET']
domain = self.app.config['DOMAIN']
for _, settings in domain.items():
del(settings['public_item_methods'])
self.app.set_defaults()
domain[self.known_resource]['public_item_methods'] = []
r = self.test_client.get(self.item_id_url)
self.assert401(r.status_code)
def test_public_methods_item(self):
self.app.config['PUBLIC_ITEM_METHODS'] = ['GET']
for _, settings in self.app.config['DOMAIN'].items():
del(settings['public_item_methods'])
self.app.set_defaults()
# we're happy with testing just one client endpoint, but for sake of
# completeness we shold probably test item endpoints for every resource
r = self.test_client.get(self.item_id_url)
self.assert200(r.status_code)
r = self.test_client.patch(self.item_id_url)
self.assert401(r.status_code)
r = self.test_client.delete(self.item_id_url)
self.assert401(r.status_code)
def test_bad_auth_class(self):
self.app = Eve(settings=self.settings_file, auth=BadBasicAuth)
self.test_client = self.app.test_client()
r = self.test_client.get('/', headers=self.valid_auth)
# will fail because check_auth() is not implemented in the custom class
self.assert500(r.status_code)
def test_instanced_auth(self):
# tests that the 'auth' argument can also be a class instance. See
# #248.
# current self.app instance has an instanced auth class already, and it
# is consistent with the super class running the test (Token, HMAC or
# Basic), so we are just going to use it (self.app.auth) on a new Eve
# instance.
auth = self.app.auth
self.app = Eve(settings=self.settings_file, auth=auth)
self.test_client = self.app.test_client()
r = self.test_client.get('/', headers=self.valid_auth)
self.assert200(r.status_code)
def test_rfc2617_response(self):
r = self.test_client.get('/')
self.assert401(r.status_code)
self.assertTrue(('WWW-Authenticate', 'Basic realm:"%s"' %
eve.__package__) in r.headers.to_wsgi_list())
class TestTokenAuth(TestBasicAuth):
def setUp(self):
super(TestTokenAuth, self).setUp()
self.app = Eve(settings=self.settings_file, auth=ValidTokenAuth)
self.test_client = self.app.test_client()
self.valid_auth = [('Authorization', 'Basic dGVzdF90b2tlbjo='),
self.content_type]
def test_custom_auth(self):
self.assertTrue(isinstance(self.app.auth, ValidTokenAuth))
class TestHMACAuth(TestBasicAuth):
def setUp(self):
super(TestHMACAuth, self).setUp()
self.app = Eve(settings=self.settings_file, auth=ValidHMACAuth)
self.test_client = self.app.test_client()
self.valid_auth = [('Authorization', 'admin:secret'),
self.content_type]
def test_custom_auth(self):
self.assertTrue(isinstance(self.app.auth, ValidHMACAuth))
def test_bad_auth_class(self):
self.app = Eve(settings=self.settings_file, auth=BadHMACAuth)
self.test_client = self.app.test_client()
r = self.test_client.get('/', headers=self.valid_auth)
# will fail because check_auth() is not implemented in the custom class
self.assert500(r.status_code)
def test_rfc2617_response(self):
r = self.test_client.get('/')
self.assert401(r.status_code)
class TestResourceAuth(TestBase):
def test_resource_only_auth(self):
# no auth at the API level
self.app = Eve(settings=self.settings_file)
self.test_client = self.app.test_client()
# explicit auth for just one resource
self.app.config['DOMAIN']['contacts']['authentication'] = \
ValidBasicAuth()
self.app.config['DOMAIN']['empty']['authentication'] = ValidTokenAuth()
self.app.set_defaults()
basic_auth = [('Authorization', 'Basic YWRtaW46c2VjcmV0')]
token_auth = [('Authorization', 'Basic dGVzdF90b2tlbjo=')]
# 'contacts' endpoints are protected
r = self.test_client.get(self.known_resource_url)
self.assert401(r.status_code)
r = self.test_client.get(self.item_id_url)
self.assert401(r.status_code)
# both with BasicAuth.
_, status = self.parse_response(
self.test_client.get(self.known_resource_url, headers=basic_auth))
self.assert200(status)
_, status = self.parse_response(
self.test_client.get(self.item_id_url, headers=basic_auth))
self.assert200(status)
# 'empty' resource endpoint is also protected
r = self.test_client.get(self.empty_resource_url)
self.assert401(r.status_code)
# but with TokenAuth
r = self.test_client.get(self.empty_resource_url, headers=token_auth)
self.assert200(r.status_code)
# other resources are not protected
r = self.test_client.get(self.readonly_resource_url)
self.assert200(r.status_code)
class TestUserRestrictedAccess(TestBase):
def setUp(self):
super(TestUserRestrictedAccess, self).setUp()
self.app = Eve(settings=self.settings_file, auth=ValidBasicAuth)
# using this endpoint since it is a copy of 'contacts' with
# no filter on the datasource
self.url = 'restricted'
self.resource = self.app.config['DOMAIN'][self.url]
self.test_client = self.app.test_client()
self.valid_auth = [('Authorization', 'Basic YWRtaW46c2VjcmV0')]
self.invalid_auth = [('Authorization', 'Basic IDontThinkSo')]
self.field_name = 'auth_field'
self.data = json.dumps({"ref": "0123456789123456789012345"})
for _, settings in self.app.config['DOMAIN'].items():
settings[self.field_name] = 'username'
self.resource['public_methods'] = []
def test_get(self):
data, status = self.parse_response(
self.test_client.get(self.url, headers=self.valid_auth))
self.assert200(status)
# no data has been saved by user 'admin' yet,
# so assert we get an empty result set back.
self.assertEqual(len(data['_items']), 0)
# Add a user belonging to `admin`
new_user = self.random_contacts(1)[0]
new_user['username'] = 'admin'
_db = self.connection[self.app.config['MONGO_DBNAME']]
_db.contacts.insert(new_user)
# Verify that we can retrieve it
data2, status2 = self.parse_response(
self.test_client.get(self.url,
headers=self.valid_auth))
self.assert200(status2)
self.assertEqual(len(data2['_items']), 1)
def test_get_by_auth_field_criteria(self):
""" If we attempt to retrieve an object by the same field
that is in `auth_field`, then the request is /unauthorized/,
and should fail and return 401.
This test verifies that the `auth_field` does not overwrite
a `client_filter` or url param.
"""
_, status = self.parse_response(
self.test_client.get(self.user_username_url,
headers=self.valid_auth))
self.assert401(status)
def test_get_by_auth_field_id(self):
""" To test handling of ObjectIds
"""
# set auth_field to `_id`
self.app.config['DOMAIN']['users'][self.field_name] = \
self.app.config['ID_FIELD']
_, status = self.parse_response(
self.test_client.get(self.user_id_url,
headers=self.valid_auth))
self.assert401(status)
def test_filter_by_auth_field_id(self):
""" To test handling of ObjectIds when using a `where` clause
We need to make sure we *match* an object ID when it is the
same
"""
_id = ObjectId('deadbeefdeadbeefdeadbeef')
resource_def = self.app.config['DOMAIN']['users']
resource_def['authentication'].request_auth_value = _id
# set auth_field to `_id`
resource_def[self.field_name] = '_id'
# Retrieving a /different user/ by id returns 401
user_url = '/users/'
filter_by_id = 'where=_id==ObjectId("%s")'
filter_query = filter_by_id % self.user_id
_, status = self.parse_response(
self.test_client.get('%s?%s' % (user_url, filter_query),
headers=self.valid_auth))
self.assert401(status)
# Create a user account belonging to admin
new_user = self.random_contacts(1)[0]
new_user['_id'] = _id
new_user['username'] = 'admin'
_db = self.connection[self.app.config['MONGO_DBNAME']]
_db.contacts.insert(new_user)
# Retrieving /the same/ user by id returns OK
filter_query_2 = filter_by_id % 'deadbeefdeadbeefdeadbeef'
data2, status2 = self.parse_response(
self.test_client.get('%s?%s' % (user_url, filter_query_2),
headers=self.valid_auth))
self.assert200(status2)
self.assertEqual(len(data2['_items']), 1)
def test_collection_get_public(self):
""" Test that if GET is in `public_methods` the `auth_field`
criteria is overruled
"""
self.resource['public_methods'].append('GET')
data, status = self.parse_response(
self.test_client.get(self.url)) # no auth
self.assert200(status)
# no data has been saved by user 'admin' yet,
# but we should get all the other results back
self.assertEqual(len(data['_items']), 25)
def test_item_get_public(self):
""" Test that if GET is in `public_item_methods` the `auth_field`
criteria is overruled
"""
self.resource['public_item_methods'].append('GET')
data, status = self.parse_response(
self.test_client.get(self.item_id_url,
headers=self.valid_auth))
self.assert200(status)
self.assertEqual(data['_id'], self.item_id)
def test_post(self):
_, status = self.post()
self.assert201(status)
data, status = self.parse_response(
self.test_client.get(self.url,
headers=self.valid_auth))
self.assert200(status)
# len of 1 as there are is only 1 doc saved by user
self.assertEqual(len(data['_items']), 1)
def test_post_resource_auth(self):
# Ticket #231.
# Test that user restricted access works fine if there's no global
# level auth, which is set at resource level instead.
# no global auth.
self.app = Eve(settings=self.settings_file)
# set auth at resource level instead.
resource_def = self.app.config['DOMAIN'][self.url]
resource_def['authentication'] = ValidBasicAuth()
resource_def['auth_field'] = 'username'
# post with valid auth - must store the document with the correct
# auth_field.
r = self.app.test_client().post(self.url, data=self.data,
headers=self.valid_auth,
content_type='application/json')
_, status = self.parse_response(r)
# Verify that we can retrieve the same document
data, status = self.parse_response(
self.app.test_client().get(self.url, headers=self.valid_auth))
self.assert200(status)
self.assertEqual(len(data['_items']), 1)
self.assertEqual(data['_items'][0]['ref'],
json.loads(self.data)['ref'])
def test_put(self):
new_ref = "9999999999999999999999999"
changes = json.dumps({"ref": new_ref})
# post document
data, status = self.post()
# retrieve document metadata
url = '%s/%s' % (self.url, data['_id'])
response = self.test_client.get(url, headers=self.valid_auth)
etag = response.headers['ETag']
# perform put
headers = [('If-Match', etag), self.valid_auth[0]]
response, status = self.parse_response(
self.test_client.put(url, data=json.dumps(changes),
headers=headers,
content_type='application/json'))
self.assert200(status)
# document still accessible with same auth
data, status = self.parse_response(
self.test_client.get(url, headers=self.valid_auth))
self.assert200(status)
self.assertEqual(data['ref'], new_ref)
def test_put_resource_auth(self):
# no global auth.
self.app = Eve(settings=self.settings_file)
# set auth at resource level instead.
resource_def = self.app.config['DOMAIN'][self.url]
resource_def['authentication'] = ValidBasicAuth()
resource_def['auth_field'] = 'username'
# post
r = self.app.test_client().post(self.url, data=self.data,
headers=self.valid_auth,
content_type='application/json')
data, status = self.parse_response(r)
# retrieve document metadata
url = '%s/%s' % (self.url, data['_id'])
response = self.app.test_client().get(url, headers=self.valid_auth)
etag = response.headers['ETag']
new_ref = "9999999999999999999999999"
changes = json.dumps({"ref": new_ref})
# put
headers = [('If-Match', etag), self.valid_auth[0]]
response, status = self.parse_response(
self.app.test_client().put(url, data=json.dumps(changes),
headers=headers,
content_type='application/json'))
self.assert200(status)
# document still accessible with same auth
data, status = self.parse_response(
self.app.test_client().get(url, headers=self.valid_auth))
self.assert200(status)
self.assertEqual(data['ref'], new_ref)
def test_patch(self):
new_ref = "9999999999999999999999999"
changes = json.dumps({"ref": new_ref})
data, status = self.post()
url = '%s/%s' % (self.url, data['_id'])
response = self.test_client.get(url, headers=self.valid_auth)
etag = response.headers['ETag']
headers = [('If-Match', etag), self.valid_auth[0]]
response, status = self.parse_response(
self.test_client.patch(url, data=json.dumps(changes),
headers=headers,
content_type='application/json'))
self.assert200(status)
data, status = self.parse_response(
self.test_client.get(url, headers=self.valid_auth))
self.assert200(status)
self.assertEqual(data['ref'], new_ref)
def test_delete(self):
_db = self.connection[MONGO_DBNAME]
# make sure that other documents in the collections are untouched.
cursor = _db.contacts.find()
docs_num = cursor.count()
_, _ = self.post()
# after the post we only get back 1 document as it's the only one we
# inserted directly (others are filtered out).
response, status = self.parse_response(
self.test_client.get(self.url, headers=self.valid_auth))
self.assert200(status)
self.assertEqual(len(response[self.app.config['ITEMS']]), 1)
# delete the document we just inserted
response, status = self.parse_response(
self.test_client.delete(self.url, headers=self.valid_auth))
self.assert200(status)
# we now get an empty items list (other documents in collection are
# filtered by auth).
response, status = self.parse_response(
self.test_client.get(self.url, headers=self.valid_auth))
self.assert200(status)
# if it's a dict, we only got 1 item back which is expected
self.assertEqual(len(response[self.app.config['ITEMS']]), 0)
# make sure no other document has been deleted.
cursor = _db.contacts.find()
self.assertEqual(cursor.count(), docs_num)
def test_delete_item(self):
_db = self.connection[MONGO_DBNAME]
# make sure that other documents in the collections are untouched.
cursor = _db.contacts.find()
docs_num = cursor.count()
data, _ = self.post()
# get back the document with its new etag
url = '%s/%s' % (self.url, data['_id'])
response = self.test_client.get(url, headers=self.valid_auth)
etag = response.headers['ETag']
headers = [('If-Match', etag),
('Authorization', 'Basic YWRtaW46c2VjcmV0')]
# delete the document
response, status = self.parse_response(
self.test_client.delete(url, headers=headers))
self.assert200(status)
# make sure no other document has been deleted.
cursor = _db.contacts.find()
self.assertEqual(cursor.count(), docs_num)
def post(self):
r = self.test_client.post(self.url,
data=self.data,
headers=self.valid_auth,
content_type='application/json')
return self.parse_response(r)
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tool for performing authenticated RPCs against App Engine."""
import google
import cookielib
import cStringIO
import fancy_urllib
import gzip
import hashlib
import logging
import os
import re
import socket
import sys
import time
import urllib
import urllib2
_UPLOADING_APP_DOC_URLS = {
"go": "https://developers.google.com/appengine/docs/go/tools/"
"uploadinganapp#Go_Password-less_login_with_OAuth2",
"php": "https://developers.google.com/appengine/docs/php/tools/"
"uploadinganapp#PHP_Password-less_login_with_OAuth2",
"php55": "https://developers.google.com/appengine/docs/php/tools/"
"uploadinganapp#PHP_Password-less_login_with_OAuth2",
"python": "https://developers.google.com/appengine/docs/python/tools/"
"uploadinganapp#Python_Password-less_login_with_OAuth2",
"python27": "https://developers.google.com/appengine/docs/python/tools/"
"uploadinganapp#Python_Password-less_login_with_OAuth2",
"java": "https://developers.google.com/appengine/docs/java/tools/"
"uploadinganapp#Passwordless_Login_with_OAuth2",
"java7": "https://developers.google.com/appengine/docs/java/tools/"
"uploadinganapp#Passwordless_Login_with_OAuth2",
}
logger = logging.getLogger('google.appengine.tools.appengine_rpc')
def GetPlatformToken(os_module=os, sys_module=sys, platform=sys.platform):
"""Returns a 'User-agent' token for the host system platform.
Args:
os_module, sys_module, platform: Used for testing.
Returns:
String containing the platform token for the host system.
"""
if hasattr(sys_module, "getwindowsversion"):
windows_version = sys_module.getwindowsversion()
version_info = ".".join(str(i) for i in windows_version[:4])
return platform + "/" + version_info
elif hasattr(os_module, "uname"):
uname = os_module.uname()
return "%s/%s" % (uname[0], uname[2])
else:
return "unknown"
def HttpRequestToString(req, include_data=True):
"""Converts a urllib2.Request to a string.
Args:
req: urllib2.Request
Returns:
Multi-line string representing the request.
"""
headers = ""
for header in req.header_items():
headers += "%s: %s\n" % (header[0], header[1])
template = ("%(method)s %(selector)s %(type)s/1.1\n"
"Host: %(host)s\n"
"%(headers)s")
if include_data:
template = template + "\n%(data)s"
return template % {
'method': req.get_method(),
'selector': req.get_selector(),
'type': req.get_type().upper(),
'host': req.get_host(),
'headers': headers,
'data': req.get_data(),
}
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self._reason = args.get("Error")
self.info = args.get("Info")
def read(self):
return '%d %s: %s' % (self.code, self.msg, self.reason)
@property
def reason(self):
return self._reason
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
SUGGEST_OAUTH2 = False
RUNTIME = "python"
def __init__(self, host, auth_function, user_agent, source,
host_override=None, extra_headers=None, save_cookies=False,
auth_tries=3, account_type=None, debug_data=True, secure=True,
ignore_certs=False, rpc_tries=3):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
user_agent: The user-agent string to send to the server. Specify None to
omit the user-agent header.
source: The source to specify in authentication requests.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request. Values
supplied here will override other default headers that are supplied.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
auth_tries: The number of times to attempt auth_function before failing.
account_type: One of GOOGLE, HOSTED_OR_GOOGLE, or None for automatic.
debug_data: Whether debugging output should include data contents.
secure: If the requests sent using Send should be sent over HTTPS.
ignore_certs: If the certificate mismatches should be ignored.
rpc_tries: The number of rpc retries upon http server error (i.e.
Response code >= 500 and < 600) before failing.
"""
if secure:
self.scheme = "https"
else:
self.scheme = "http"
self.ignore_certs = ignore_certs
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.source = source
self.authenticated = False
self.auth_tries = auth_tries
self.debug_data = debug_data
self.rpc_tries = rpc_tries
self.account_type = account_type
self.extra_headers = {}
if user_agent:
self.extra_headers["User-Agent"] = user_agent
if extra_headers:
self.extra_headers.update(extra_headers)
self.save_cookies = save_cookies
self.cookie_jar = cookielib.MozillaCookieJar()
self.opener = self._GetOpener()
if self.host_override:
logger.debug("Server: %s; Host: %s", self.host, self.host_override)
else:
logger.debug("Server: %s", self.host)
if ((self.host_override and self.host_override == "localhost") or
self.host == "localhost" or self.host.startswith("localhost:")):
self._DevAppServerAuthenticate()
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
req = fancy_urllib.FancyRequest(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if not account_type:
if (self.host.split(':')[0].endswith(".google.com")
or (self.host_override
and self.host_override.split(':')[0].endswith(".google.com"))):
account_type = "HOSTED_OR_GOOGLE"
else:
account_type = "GOOGLE"
data = {
"Email": email,
"Passwd": password,
"service": "ah",
"source": self.source,
"accountType": account_type
}
req = self._CreateRequest(
url=("https://%s/accounts/ClientLogin" %
os.getenv("APPENGINE_AUTH_SERVER", "www.google.com")),
data=urllib.urlencode(data))
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
if os.getenv("APPENGINE_RPC_USE_SID", "0") == "1":
self.extra_headers["Cookie"] = (
'SID=%s; Path=/;' % response_dict["SID"])
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
login_path = os.environ.get("APPCFG_LOGIN_PATH", "/_ah")
req = self._CreateRequest("%s://%s%s/login?%s" %
(self.scheme, self.host, login_path,
urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for unused_i in range(self.auth_tries):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
if os.getenv("APPENGINE_RPC_USE_SID", "0") == "1":
return
except ClientLoginError, e:
if e.reason == "BadAuthentication":
if e.info == "InvalidSecondFactor":
print >>sys.stderr, ("Use an application-specific password instead "
"of your regular account password.")
print >>sys.stderr, ("See http://www.google.com/"
"support/accounts/bin/answer.py?answer=185833")
if self.SUGGEST_OAUTH2:
print >>sys.stderr, ("However, now the recommended way to log in "
"is using OAuth2. See")
print >>sys.stderr, _UPLOADING_APP_DOC_URLS[self.RUNTIME]
else:
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
@staticmethod
def _CreateDevAppServerCookieData(email, admin):
"""Creates cookie payload data.
Args:
email: The user's email address.
admin: True if the user is an admin; False otherwise.
Returns:
String containing the cookie payload.
"""
if email:
user_id_digest = hashlib.md5(email.lower()).digest()
user_id = "1" + "".join(["%02d" % ord(x) for x in user_id_digest])[:20]
else:
user_id = ""
return "%s:%s:%s" % (email, bool(admin), user_id)
def _DevAppServerAuthenticate(self):
"""Authenticates the user on the dev_appserver."""
credentials = self.auth_function()
value = self._CreateDevAppServerCookieData(credentials[0], True)
self.extra_headers["Cookie"] = ('dev_appserver_login="%s"; Path=/;' % value)
def Send(self, request_path, payload="",
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
auth_tried = False
while True:
tries += 1
url = "%s://%s%s" % (self.scheme, self.host, request_path)
if kwargs:
url += "?" + urllib.urlencode(sorted(kwargs.items()))
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
req.add_header("X-appcfg-api-version", "1")
try:
logger.debug('Sending %s request:\n%s',
self.scheme.upper(),
HttpRequestToString(req, include_data=self.debug_data))
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
logger.debug("Got http error, this is try #%s", tries)
if tries > self.rpc_tries:
raise
elif e.code == 401:
if auth_tried:
raise
auth_tried = True
self._Authenticate()
elif e.code >= 500 and e.code < 600:
continue
elif e.code == 302:
if auth_tried:
raise
auth_tried = True
loc = e.info()["location"]
logger.debug("Got 302 redirect. Location: %s", loc)
if loc.startswith("https://www.google.com/accounts/ServiceLogin"):
self._Authenticate()
elif re.match(
r"https://www\.google\.com/a/[a-z0-9\.\-]+/ServiceLogin", loc):
self.account_type = os.getenv("APPENGINE_RPC_HOSTED_LOGIN_TYPE",
"HOSTED")
self._Authenticate()
elif loc.startswith("http://%s/_ah/login" % (self.host,)):
self._DevAppServerAuthenticate()
else:
raise
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class ContentEncodingHandler(urllib2.BaseHandler):
"""Request and handle HTTP Content-Encoding."""
def http_request(self, request):
request.add_header("Accept-Encoding", "gzip")
for header in request.headers:
if header.lower() == "user-agent":
request.headers[header] += " gzip"
return request
https_request = http_request
def http_response(self, req, resp):
"""Handle encodings in the order that they are encountered."""
encodings = []
headers = resp.headers
for header in headers:
if header.lower() == "content-encoding":
for encoding in headers.get(header, "").split(","):
encoding = encoding.strip()
if encoding:
encodings.append(encoding)
break
if not encodings:
return resp
del headers[header]
fp = resp
while encodings and encodings[-1].lower() == "gzip":
fp = cStringIO.StringIO(fp.read())
fp = gzip.GzipFile(fileobj=fp, mode="r")
encodings.pop()
if encodings:
headers[header] = ", ".join(encodings)
logger.warning("Unrecognized Content-Encoding: %s", encodings[-1])
msg = resp.msg
if sys.version_info >= (2, 6):
resp = urllib2.addinfourl(fp, headers, resp.url, resp.code)
else:
response_code = resp.code
resp = urllib2.addinfourl(fp, headers, resp.url)
resp.code = response_code
resp.msg = msg
return resp
https_response = http_response
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
DEFAULT_COOKIE_FILE_PATH = "~/.appcfg_cookies"
def __init__(self, *args, **kwargs):
self.certpath = os.path.normpath(os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'lib', 'cacerts',
'cacerts.txt'))
self.cert_file_available = ((not kwargs.get("ignore_certs", False))
and os.path.exists(self.certpath))
super(HttpRpcServer, self).__init__(*args, **kwargs)
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
req = super(HttpRpcServer, self)._CreateRequest(url, data)
if self.cert_file_available and fancy_urllib.can_validate_certs():
req.set_ssl_info(ca_certs=self.certpath)
return req
def _CheckCookie(self):
"""Warn if cookie is not valid for at least one minute."""
min_expire = time.time() + 60
for cookie in self.cookie_jar:
if cookie.domain == self.host and not cookie.is_expired(min_expire):
break
else:
print >>sys.stderr, "\nError: Machine system clock is incorrect.\n"
def _Authenticate(self):
"""Save the cookie jar after authentication."""
if self.cert_file_available and not fancy_urllib.can_validate_certs():
logger.warn("""ssl module not found.
Without the ssl module, the identity of the remote host cannot be verified, and
connections may NOT be secure. To fix this, please install the ssl module from
http://pypi.python.org/pypi/ssl .
To learn more, see https://developers.google.com/appengine/kb/general#rpcssl""")
super(HttpRpcServer, self)._Authenticate()
if self.cookie_jar.filename is not None and self.save_cookies:
logger.debug("Saving authentication cookies to %s",
self.cookie_jar.filename)
self.cookie_jar.save()
self._CheckCookie()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(fancy_urllib.FancyProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(fancy_urllib.FancyHTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
opener.add_handler(ContentEncodingHandler())
if self.save_cookies:
self.cookie_jar.filename = os.path.expanduser(
HttpRpcServer.DEFAULT_COOKIE_FILE_PATH)
if os.path.exists(self.cookie_jar.filename):
try:
self.cookie_jar.load()
self.authenticated = True
logger.debug("Loaded authentication cookies from %s",
self.cookie_jar.filename)
except (OSError, IOError, cookielib.LoadError), e:
logger.debug("Could not load authentication cookies; %s: %s",
e.__class__.__name__, e)
self.cookie_jar.filename = None
else:
try:
fd = os.open(self.cookie_jar.filename, os.O_CREAT, 0600)
os.close(fd)
except (OSError, IOError), e:
logger.debug("Could not create authentication cookies file; %s: %s",
e.__class__.__name__, e)
self.cookie_jar.filename = None
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
class HttpRpcServerWithOAuth2Suggestion(HttpRpcServer):
"""An HttpRpcServer variant which suggests using OAuth2 instead of ASP.
Not all systems which use HttpRpcServer can use OAuth2.
"""
SUGGEST_OAUTH2 = True
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Methods for rewriting while_v2 grad functions with IndexedSlices output."""
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.util import nest
def rewrite_grad_indexed_slices(grads, body_grad_graph, loop_vars,
forward_inputs):
"""Handles special case of IndexedSlices returned from while gradient.
Some gradient functions return IndexedSlices instead of a Tensor (e.g. the
gradient of Gather ops). When this happens in the gradient of a while body,
the resulting gradient body function will have mismatched inputs and outputs,
since the input is a single Tensor, but the IndexedSlices gets unnested into
three output Tensors.
This function fixes this by rewriting the gradient body to have three inputs
to match the three outputs, i.e., it effectively converts the input Tensor
into an input IndexedSlices. It also returns new `loop_vars` to reflect the
new inputs.
Args:
grads: the input gradient Tensors to the while gradient computation.
body_grad_graph: _WhileBodyGradFuncGraph.
loop_vars: list of Tensors. The inputs to body_grad_graph.
forward_inputs: list of Tensors. The (flat) inputs to the forward-pass While
op.
Returns:
The new loop_vars to pass to body_grad_graph.
"""
# Match up body_grad_graph.structured_outputs with the corresponding
# forward_inputs.
#
# Note that we don't expect a gradient computation to have structured output
# (e.g. no nested lists), so no need to flatten
# body_grad_graph.structured_outputs. However, structured_outputs may still
# contain composite tensors such as IndexedSlices, unlike
# body_grad_graph.outputs, which contains flattened composite tensors.
inputs_with_grads = [
t for g, t in zip(grads, forward_inputs) if g is not None
]
# Skip loop counter, maximum_iterations and total number of loop iterations.
structured_outputs = body_grad_graph.structured_outputs[3:]
for forward_input, output in zip(inputs_with_grads, structured_outputs):
if not isinstance(output, indexed_slices.IndexedSlices):
continue
if forward_input.dtype == dtypes.resource:
# TODO(skyewm): In theory we should use this for all captured inputs, not
# just resource handles (which can only be captured). We can do this by
# checking that forward_input is passed straight through to its output.
loop_vars = _rewrite_input_as_indexed_slices(body_grad_graph, output,
forward_input, loop_vars)
else:
_rewrite_output_as_tensor(body_grad_graph, output)
return loop_vars
def _get_tensor_index_in_iterable(iterable, t):
"""Returns index of first occurence of `t`, raises ValueError if not found."""
for i, elem in enumerate(iterable):
if t is elem:
return i
raise ValueError(f"Element `{t!r}` is not found in iterable `{iterable!r}`.")
def _rewrite_output_as_tensor(body_grad_graph, grad_output_slices):
"""Rewrites grad_output_slices to be a Tensor output.
Args:
body_grad_graph: _WhileBodyGradFuncGraph.
grad_output_slices: IndexedSlices output of body_grad_graph.
"""
with body_grad_graph.as_default():
new_output = ops.convert_to_tensor_v2(grad_output_slices)
idx = _get_tensor_index_in_iterable(body_grad_graph.structured_outputs,
grad_output_slices)
body_grad_graph.structured_outputs[idx] = new_output
body_grad_graph.outputs = func_graph.flatten(
body_grad_graph.structured_outputs)
def _rewrite_input_as_indexed_slices(body_grad_graph, grad_output_slices,
forward_input, loop_vars):
"""Rewrites grad_output_slices's corresponding input to be an IndexedSlices.
This rewrite requires that forward_input was captured in the forward loop,
i.e. is not a user-specified loop variable. This is important because the
rewrite assumes that forward_input is passed through to its corresponding
output unchanged. This assumption is used in _rewrite_input_as_indexed_slices,
which depends on the exact gradient structure produced by the input's fanout.
This can yield a more efficient computation than using
_rewrite_output_as_tensor, since it preserves the IndexedSlices structure
instead of converting the IndexedSlices to a dense Tensor.
Args:
body_grad_graph: _WhileBodyGradFuncGraph.
grad_output_slices: IndexedSlices output of body_grad_graph.
forward_input: the corresponding Tensor input to the forward loop.
loop_vars: list of Tensors. The inputs to body_grad_graph.
Returns:
The new loop_vars to pass to body_grad_graph.
"""
# Create initial IndexedSlices that will be the input to the grad While
# op. This will start as zeros, and accumulate the IndexedSlices grad output.
# Note that because forward_input is captured and not a loop var, its incoming
# gradient should always be zero.
init_slices = _create_grad_indexed_slices_init(grad_output_slices,
forward_input)
# Create a new version of grad_output_slices's gradient computation that uses
# the new IndexedSlices input instead of the original Tensor input. We'll
# return the new computation and leave the old computation as dead code.
# TODO(skyewm): considering pruning body_grad_graph to remove the old
# computation.
with body_grad_graph.as_default():
input_slices = indexed_slices.IndexedSlices(
values=body_grad_graph.capture(init_slices.values, allowlisted=True),
indices=body_grad_graph.capture(init_slices.indices, allowlisted=True),
dense_shape=body_grad_graph.capture(
init_slices.dense_shape, allowlisted=True))
# Remove the captured tensors from the function inputs. We'll add them back
# at the correct index in _update_indexed_slices_param.
for t in _flatten(init_slices):
captured_t = body_grad_graph.captures.pop(t)
body_grad_graph.inputs.remove(captured_t)
new_output_slices = _rewrite_grad_indexed_slices_output(
grad_output_slices, input_slices)
# Update body_grad_graph's inputs and outputs to reflect the new
# IndexedSlices computation.
return _update_indexed_slices_param(body_grad_graph, loop_vars, init_slices,
input_slices, new_output_slices,
grad_output_slices)
def _create_grad_indexed_slices_init(grad_output_slices, forward_input):
"""Creates an IndexedSlices to pass as input to the while grad function.
Args:
grad_output_slices: IndexedSlices. The corresponding while grad function
output.
forward_input: Tensor. The corresponding input to the forward while op.
Returns:
Zeros IndexedSlices, created in current Graph.
"""
assert isinstance(grad_output_slices, indexed_slices.IndexedSlices)
assert isinstance(forward_input, ops.Tensor)
values_out = grad_output_slices.values
indices_out = grad_output_slices.indices
# Create the initial values tensor.
if values_out.shape.is_fully_defined():
values_shape = tensor_shape.TensorShape([0] +
values_out.shape.as_list()[1:])
values = array_ops.zeros(
values_shape, dtype=values_out.dtype, name="values_init")
else:
if forward_input.dtype == dtypes.resource:
forward_shape = gen_resource_variable_ops.variable_shape(forward_input)
else:
forward_shape = array_ops.shape(forward_input)
values_shape = array_ops.concat([[0], forward_shape[1:]], 0)
values = array_ops.zeros(
values_shape, dtype=values_out.dtype, name="values_init")
# Create the initial indices tensor.
indices = constant_op.constant([], indices_out.dtype, name="indices_init")
# Create the initial dense_shape tensor. We assume is the same shape as
# forward_input, since captured tensors don't change shape across loop
# iterations.
if forward_input.dtype == dtypes.resource:
shape = gen_resource_variable_ops.variable_shape(
forward_input, name="shape_init")
else:
shape = array_ops.shape(forward_input, name="shape_init")
return indexed_slices.IndexedSlices(
values=values, indices=indices, dense_shape=shape)
def _rewrite_grad_indexed_slices_output(old_output_slices, new_input_slices):
"""Creates a new version of old_output_slices with new_input_slices as input.
This method assumes that old_output_slices.{values,indices} are produced by
concatenating the incoming gradient Tensor input with the IndexedSlices
produced by the gradient computation of the while body. See
backprop.aggregate_indexed_slices_gradients for where these concats are
constructed. We build new concats that use new_input_slices instead of the
original Tensor input.
Args:
old_output_slices: original IndexedSlices output of while gradient.
new_input_slices: new IndexedSlices to use as input to while gradient.
Returns:
A new IndexedSlices to replace old_output_slices.
"""
def rewrite(old_output, new_input):
assert old_output.type == "Identity"
concat_op = old_output.inputs[0].op
assert concat_op.type == "ConcatV2"
# Don't include axis arg
old_concat_args = concat_op.inputs[:-1]
# We assume that the original gradient input was the first argument to the
# concat op.
# TODO(skyewm): do this in a more robust way.
return array_ops.concat([new_input] + old_concat_args[1:], 0)
values = rewrite(old_output_slices.values.op, new_input_slices.values)
indices = rewrite(old_output_slices.indices.op, new_input_slices.indices)
return indexed_slices.IndexedSlices(
values=values, indices=indices, dense_shape=new_input_slices.dense_shape)
def _update_indexed_slices_param(graph, loop_vars, init_slices, input_slices,
output_slices, old_output_slices):
"""Updates graph with new IndexedSlices input/output.
Updates graph's metadata to output the gradient computation defined by
init_slices, input_slices, and output_slices, instead of outputting
old_output_slices. Also returns a new version of loop_vars with init_slices
replacing the old input.
Args:
graph: _WhileBodyGradFuncGraph.
loop_vars: the inputs to graph.
init_slices: the new IndexedSlices to use as input to graph.
input_slices: the new IndexedSlices in graph that should be fed by
init_slices.
output_slices: the new IndexedSlices in graph that should be the
corresponding output to input_slices.
old_output_slices: the IndexedSlices in graph that are currently being
output.
Returns:
New loop_vars to pass to graph.
"""
structured_idx = _get_tensor_index_in_iterable(graph.structured_outputs,
old_output_slices)
# We assume that the component tensors of old_output_slices appear
# sequentially in graph.outputs. We use the first of these tensors
# as the reference index.
flat_idx = _get_tensor_index_in_iterable(
graph.outputs,
func_graph.flatten(old_output_slices)[0])
graph.structured_outputs[structured_idx] = output_slices
graph.outputs = func_graph.flatten(graph.structured_outputs)
graph.inputs = (
graph.inputs[:flat_idx] + _flatten(input_slices) +
graph.inputs[flat_idx + 1:])
return loop_vars[:flat_idx] + _flatten(init_slices) + loop_vars[flat_idx + 1:]
def _flatten(arg):
return nest.flatten(arg, expand_composites=True)
|
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as pl
import pyiacsun as ps
from ipdb import set_trace as stop
import scipy.linalg as sl
import scipy.special as sp
import scipy.optimize as op
import scipy.io as io
import waveletTrans as wl
import seaborn as sn
def softThrShifted(a, b, lambd):
return np.sign(a) * np.fmax(np.abs(a + b) - lambd, 0) - b
def softThr(x, lambdaPar, lower=None, upper=None):
out = np.sign(x) * np.fmax(np.abs(x) - lambdaPar, 0)
if (lower != None):
out[out < lower] = 0.0
if (upper != None):
out[out > upper] = 0.0
return out
def hardThr(x, lambdaPar, lower=None, upper=None):
out = np.copy(x)
out[np.abs(x) < lambdaPar] = 0.0
if (lower != None):
out[out < lower] = 0.0
if (upper != None):
out[out > upper] = 0.0
return out
def upLimitThr(x, top):
x[x > top] = 0.0
return x
class inversionLTE(object):
def __init__(self, wavelet='wavelet', family='db4', lambdaL1=None, innerIterations=None):
self.wavelet = wavelet
if (innerIterations == None):
self.innerIterations = 100
else:
self.innerIterations = innerIterations
np.random.seed(10)
obs = np.load('profiles/singleProfile.npy')
# Normalize continuum
x= [7,44,190,216,242,244,286]
y = obs[1,x]
coeff = np.polyfit(x, y, 4)
cont = np.polyval(coeff, np.arange(len(obs[1,:])))
obs[1:,:] /= cont[None,:]
# obs = obs[:,45:173]
obs = obs[:,0:256]
lowerMask = [35]
upperMask = [192]
maskChi2 = []
for i in range(len(lowerMask)):
maskChi2.append(np.arange(upperMask[i] - lowerMask[i]+1) + lowerMask[i])
self.maskChi2 = np.hstack(maskChi2)
self.wavelength = obs[0,self.maskChi2]
self.fullWavelength = obs[0,:]
self.contHSRA = ps.util.contHSRA(np.mean(self.wavelength))
self.obs = obs[1:,self.maskChi2]
self.nLambda = self.wavelength.shape[0]
self.nLambdaTotal = obs[0,:].shape[0]
atmos = np.loadtxt('hsra_64.model', skiprows=2)
lines = np.loadtxt('lines.dat')
self.referenceAtmos = atmos
ps.radtran.initLTENodes(self.referenceAtmos, lines, self.wavelength)
self.noise = 0.01
self.lambdaL1 = lambdaL1
# Define number of nodes and set their ranges
self.nNodes = [5,1,3,0,0,0]
self.nNodesTotal = np.sum(self.nNodes)
self.nUnknowns = self.nNodesTotal
lower = [-2000.0, 0.01, -7.0, 0.0, 0.0, 0.0]
upper = [2000.0, 5.0, 5.0, 3000.0, 180.0, 180.0]
initial = [0.0, 1.0, 0.0, 0.01, 20.0, 20.0]
self.lower = []
self.upper = []
self.initial = []
for i in range(6):
self.lower.append([lower[i]]*self.nNodes[i])
self.upper.append([upper[i]]*self.nNodes[i])
self.initial.append([initial[i]]*self.nNodes[i])
self.lower = np.hstack(self.lower)
self.upper = np.hstack(self.upper)
self.initial = np.hstack(self.initial)
self.nodes = []
for n in self.nNodes:
temp = []
for i in range(n):
temp.append(0)
self.nodes.append(temp)
self.nodePositions = ps.radtran.nodePositions(self.referenceAtmos[:,0],self.nodes)
self.weights = np.asarray([1.0,0.0,0.0,0.0])
self.factor = 1.0 / (self.nLambda * self.noise**2)
self.family = family
if (wavelet == 'wavelet'):
self.wavedec, self.waverec = wl.daubechies_factory((self.nLambda), family)
self.nLevelsIUWT = 6
def logit(self, x):
"""
Logit function
Args:
x (TYPE): x
Returns:
TYPE: transformed x
"""
return np.log(x / (1.0 - x))
def invLogit(self, x):
"""
Inverse logit function
Args:
x (TYPE): x
Returns:
TYPE: transformed x
"""
return 1.0 / (1.0 + np.exp(-x))
def physicalToTransformed(self, x):
"""
Transform from physical parameters to transformed (unconstrained) ones
Args:
x (TYPE): vector of parameters
Returns:
TYPE: transformed vector of parameters
"""
return self.logit( (x-self.lower) / (self.upper - self.lower))
def transformedToPhysical(self, x):
"""
Transform from transformed (unconstrained) parameters to physical ones
Args:
x (TYPE): vector of transformed parameters
Returns:
TYPE: vector of parameters
"""
return self.lower + (self.upper - self.lower) * self.invLogit(x)
def dtransformedToPhysical(self, x):
"""
Transform from transformed (unconstrained) parameters to physical ones
Args:
x (TYPE): vector of transformed parameters
Returns:
TYPE: vector of parameters
"""
return (self.upper - self.lower) * np.exp(-x) * self.invLogit(x)**2
def jacobianTransformedParameters(self, x):
"""
Compute the Jacobian of the transformation from unconstrained parameters to physical parameters
Args:
x (TYPE): vector of parameters
Returns:
TYPE: transformed vector of parameters
"""
temp = self.invLogit(x)
return (self.upper - self.lower) * temp * (1.0 - temp)
def vector2Nodes(self, vector):
"""
Transform from a vector of parameters to the structure of nodes, made of lists of lists
Args:
vector (float): model parameters
Returns:
TYPE: structure of nodes
"""
nodes = []
loop = 0
for n in self.nNodes:
temp = []
for i in range(n):
temp.append(vector[loop])
loop += 1
nodes.append(temp)
return nodes
def nodes2Vector(self, nodes):
"""Summary
Args:
nodes (TYPE): Description
Returns:
TYPE: Description
"""
return np.asarray([item for sublist in nodes for item in sublist])
def computeFunctionAndGradient(self, xLTE, xSys):
"""
Compute the value of the merit function and of the gradient of the merit function with respect to the
temperature
"""
xPhysical = self.transformedToPhysical(xLTE)
nodes = self.vector2Nodes(xPhysical)
stokes, cont, atmosNew, dStokes = ps.radtran.synthLTENodes(self.referenceAtmos, nodes, responseFunction=True)
stokes /= self.contHSRA
dStokes = self.nodes2Vector(dStokes)
dStokes /= self.contHSRA
# Take into account the Jacobian of the transformation
dStokes *= self.jacobianTransformedParameters(xLTE)[:,None,None]
residual = (self.obs - (stokes + xSys))
chi2 = np.sum(self.weights[:,None] * residual**2 * self.factor)
chi2NoWeight = np.sum(residual**2 * self.factor)
dChi2LTE = -2.0 * np.sum(self.weights[None,:,None] * dStokes * residual[None,:,:] * self.factor, axis=(1,2))
ddStokes = dStokes[None,:,:,:] * dStokes[:,None,:,:]
ddChi2LTE = 2.0 * np.sum(self.weights[None,None,:,None] * ddStokes * self.factor, axis=(2,3))
return chi2, chi2NoWeight, dChi2LTE, ddChi2LTE, stokes
def meritFunction(self, xLTE, xSys):
"""
Compute the value of the merit function for Milne-Eddington parameters and given systematics
Args:
xLTE (TYPE): Description
xSys (TYPE): systematics parameters
Deleted Args:
xMilne (TYPE): Milne-Eddington parameters
"""
xPhysical = self.transformedToPhysical(xLTE)
nodes = self.vector2Nodes(xPhysical)
stokes, cont, atmosNew = ps.radtran.synthLTENodes(self.referenceAtmos, nodes)
stokes /= self.contHSRA
sys = np.zeros_like(stokes)
sys[0,:] = xSys
residual = (self.obs - (stokes + sys))
return np.sum(self.weights[:,None] * residual**2 * self.factor), np.sum(residual**2 * self.factor), stokes
def printNodes(self, xLTE):
xPhysical = self.transformedToPhysical(xLTE)
nodes = self.vector2Nodes(xPhysical)
variable = ['T', 'vmic', 'vmac', 'B', 'thetaB', 'phiB']
for i, n in enumerate(nodes):
if (len(n) != 0):
print(" {0} : {1}".format(variable[i], n))
def forwardIUWT(self, x):
"""
Forward IUWT transform
"""
dummy = ps.sparse.iuwt_decomposition(x, self.nLevelsIUWT, 0, True)
return np.vstack((dummy[0],dummy[1][None,:,:]))
def backwardIUWT(self, x):
"""
Backward IUWT transform
"""
detail = x[0:-1,:,:]
smooth = x[-1,:,:]
return ps.sparse.iuwt_recomposition(detail, 0, smooth)
def thresholdIUWT(self, x, thr):
out = np.copy(x)
smooth = x[-1,:,:]
detail = x[0:-1,:,:]
smooth -= smooth[0,0]
out[-1,:,:] = smooth
out[0:-1,:,:] = softThr(detail, thr)
# stop()
return out
def optimize(self, acceleration=True, plot=False, fileExtension=None):
"""
This solves the inversion problem by using the FISTA algorithm
"""
x = np.zeros(self.nUnknowns+self.nLambda)
x[0:self.nUnknowns] = self.physicalToTransformed(self.initial)
chi2 = 1e10
chi2Old = 1e20
relchi2 = np.abs((chi2 - chi2Old) / chi2Old)
xnew = np.copy(x)
loop = 0
loopInt = 0
lambdaLM = 1e-3
chi2Best = 1e10
chi2Old = 1e10
nWorstChi2 = 0
# dChi2Old = 0
self.chi2 = []
self.l0 = []
self.l1 = []
while ((relchi2 > 1e-6) & (loop < 20) & (nWorstChi2 < 8)):
chi2, chi2NW, dChi2, ddChi2, stokes = self.computeFunctionAndGradient(x[0:self.nUnknowns], x[self.nUnknowns:])
chi2Old = np.copy(chi2)
H = 0.5 * ddChi2
H += np.diag(lambdaLM * np.diag(H))
gradF = 0.5 * dChi2
# First deal with the Hazel part
U, w, VT = np.linalg.svd(H[0:self.nUnknowns,0:self.nUnknowns], full_matrices=True)
wmax = np.max(w)
wInv = 1.0 / w
wInv[w < 1e-6*wmax] = 0.0
# xnew = xold - H^-1 * grad F
deltaxnew = -VT.T.dot(np.diag(wInv)).dot(U.T).dot(gradF[0:self.nUnknowns])
xnew[0:self.nUnknowns] = x[0:self.nUnknowns] + deltaxnew
chi2, chi2NW, stokes = self.meritFunction(xnew[0:self.nUnknowns], xnew[self.nUnknowns:])
if ((loop + 1) % 5 == 0):
thr = self.lambdaL1
thr = self.lambdaL1 + 500.*self.lambdaL1*np.exp(-loop)
print(thr)
if (self.wavelet == 'iuwt'):
tmp = (self.obs[0,:] - stokes[0,:])[:,None]
if (self.innerIterations == 1):
res = ps.sparse.iuwt_decomposition(tmp, self.nLevelsIUWT, 0, True)
res[0][np.abs(res[0]) < thr] = 0.0
xnew[self.nUnknowns:] = ps.sparse.iuwt_recomposition(res[0], 0, res[1])[:,0]
else:
xnew[self.nUnknowns:] = ps.sparse.proxes.prox_l1General(tmp, self.forwardIUWT, self.backwardIUWT,
thr, threshold=self.thresholdIUWT, verbose=False)[:,0]
if (self.wavelet == 'wavelet'):
xnew[self.nUnknowns:] = ps.sparse.proxes.prox_l1General(self.obs[0,:] - stokes[0,:], self.wavedec, self.waverec, thr, threshold='hard', verbose=False)
xnew[self.nUnknowns:] /= (1.0+xnew[self.nUnknowns])
xnew[self.nUnknowns:] = upLimitThr(xnew[self.nUnknowns:], 0.0)
#x = np.copy(xnew)
chi2, chi2NW, stokes = self.meritFunction(xnew[0:self.nUnknowns], xnew[self.nUnknowns:])
if (chi2NW < chi2Best):
if (lambdaLM >= 1e4):
lambdaLM /= 100.0
elif ((lambdaLM >= 1e-4) or (lambdaLM < 1e4)):
lambdaLM /= 10.0
elif(lambdaLM < 1e-4):
lambdaLM /= 5.0
if (lambdaLM < 1e-6):
lambdaLM = 1e-6
chi2Best = np.copy(chi2NW)
x = np.copy(xnew)
nWorstChi2 = 0
else:
if (lambdaLM > 1e4):
lambdaLM *= 100.0
elif ((lambdaLM >= 1e-4) or (lambdaLM < 1e4)):
lambdaLM *= 10.0
elif(lambdaLM < 1e-4):
lambdaLM *= 5.0
nWorstChi2 += 1
relchi2 = np.abs((chi2 - chi2Old) / chi2Old)
l1Norm = np.linalg.norm(x[self.nUnknowns:], 1)
if (self.wavelet == 'iuwt'):
tmp = self.forwardIUWT(xnew[self.nUnknowns:][:,None])
l0Norm = np.sum(np.abs(tmp) > 1e-6)
if (self.wavelet == 'wavelet'):
l0Norm = np.sum(np.abs(self.wavedec(x[self.nUnknowns:])) > 1e-10)
print("Iteration {0} - chi2={1:10.4f} - l1={2} - l0={3} - relchi2={4} - lambda={5}".format(loop, chi2NW, l1Norm, l0Norm, relchi2, lambdaLM))
self.printNodes(x[0:self.nUnknowns])
self.chi2.append(chi2NW)
self.l0.append(l0Norm)
self.l1.append(l1Norm)
loop += 1
xPhysical = self.transformedToPhysical(x[0:self.nNodesTotal])
nodes = self.vector2Nodes(xPhysical)
stokes, cont, atmosNew = ps.radtran.synthLTENodes(self.referenceAtmos, nodes)
stokes /= self.contHSRA
sys = x[self.nUnknowns:]
np.savez( "results/lte_{0}_lambda_{1}_inner_{2}.npz".format(self.wavelet,fileExtension,self.innerIterations), self.obs, stokes, sys, self.chi2, x, self.wavelength,
self.l1, self.l0, self.maskChi2)
# pl.close('all')
# f, ax = pl.subplots(nrows=2, ncols=2, figsize=(12,9))
# ax = ax.flatten()
# labelStokes = ['I/Ic','Q/Ic','U/Ic','V/Ic']
# ax[0].plot(self.obs[0,:], label='obs')
# ax[0].plot(stokes[0,:] + sys, label='stokes+sys')
# ax[0].plot(1.0+sys, label='sys')
# ax[0].plot(stokes[0,:], label='stokes')
# ax[0].legend()
# ax[1].plot(self.obs[0,:] / (1.0+sys))
# ax[1].plot(stokes[0,:])
# pl.tight_layout()
if (plot):
pl.savefig('/scratch/Dropbox/CONGRESOS/2015/Hinode9/code/systematicsExampleWithFit.png')
print("--------")
print("l1 norm of systematics : {0}".format(np.linalg.norm(x[self.nUnknowns:], 1)))
return x
lambdas = [0.001,0.005,0.01,0.05]
for l in lambdas:
out = inversionLTE(wavelet='iuwt', lambdaL1=l)
res = out.optimize(acceleration=True, plot=False, fileExtension=l)
lambdas = [0.001,0.005,0.01,0.05]
for l in lambdas:
out = inversionLTE(wavelet='iuwt', lambdaL1=l, innerIterations=1)
res = out.optimize(acceleration=True, plot=False, fileExtension=l)
# lambdas = [1e-3,1e-2,1e-1,1.0]
# for l in lambdas:
# out = inversionWavelet(wavelet='wavelet', family='db8', lambdaL1=l)
# res = out.optimize(acceleration=True, plot=False, fileExtension=l)
|
|
###############################################################################
# The MIT License (MIT)
# Copyright (c) 2007-2016 Roman Rodyakin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
from buildsupport.compilers import Compiler
from buildsupport.PlatformDetection import getTargetOsName
class Clang(Compiler):
"""
A clang-specific compiler instance. See Compilation.py and Linking.py for
more.
(Note that this was originally copied from Gcc.py, so there might still be
some gcc-specific logic below that doesn't work too well with Clang.)
"""
def __init__(self, executable, version):
Compiler.__init__(self, "clang", executable, version)
self._versionList = self._version.split('.')
assert len(version) > 1
try:
# If this is not flexible enough (i.e. versions like 3.2.3a are
# possible), change this to individual assignments. We currently
# depend on self._version[0] being a number
self._versionList = list(map(int, self._versionList))
except:
assert False
def getCompilerArgv(self,
sourceFilePath,
outputFilePath,
defines,
headerDirs,
targetType,
exportedSharedApis,
importedSharedApis,
cppStandard,
disableWarnings,
disableThreading,
optimize):
"""
See the similar function in gcc.py for the explanation of what it does.
"""
assert sourceFilePath and isinstance(sourceFilePath, str)
assert outputFilePath and isinstance(outputFilePath, str)
assert targetType in ["sharedlib", "staticlib", "executable"]
result = [self._executable, "-c"]
if targetType == "sharedlib":
if getTargetOsName() == "darwin":
result.append("-dynamic")
else:
# See the GCC manpage for more on this. It essentially says
# that on m88k, Sparc, m68k and on RS/6000 you might want to try
# -fpic first instead. This is not currently a practical issue,
# fix when it becomes one.
result.append("-fPIC")
if not disableThreading and getTargetOsName() != "darwin":
result.append("-pthread")
result.append("-std=%s" % cppStandard)
result.append("-Xclang")
result.append("-fcolor-diagnostics")
if disableWarnings:
result.append("-w")
else:
result.append("-Wall")
result.append("-Wno-sign-compare")
result.append("-Wno-unused-value") # TODO: introduced to accommodate HowardHinnant's date, see if still required in newer versions
if optimize:
result.append("-O3")
result.append("-flto")
else:
result.append("-g")
result += ["-I%s" % dir for dir in headerDirs]
for symbol in defines:
if defines[symbol] is None:
result.append("-D%s" % symbol)
else:
result.append("-D%s=%s" % (symbol, defines[symbol]))
result.append("-fvisibility=hidden")
sharedLibExport = '-D%s_API=__attribute__((visibility("default")))'
sharedLibImport = '-D%s_API=__attribute__((visibility("hidden")))'
sharedLibTypeInfoExport = '-D%s_TYPEINFO_API=__attribute__((visibility("default")))'
sharedLibTypeInfoImport = '-D%s_TYPEINFO_API=__attribute__((visibility("default")))'
if exportedSharedApis:
for api in exportedSharedApis:
result.append(sharedLibExport % api.upper())
result.append(sharedLibTypeInfoExport % api.upper())
if importedSharedApis:
for api in importedSharedApis:
result.append(sharedLibImport % api.upper())
result.append(sharedLibTypeInfoImport % api.upper())
result.append("-o")
result.append(outputFilePath)
result.append(sourceFilePath)
return result
def getSharedLinkerArgv(self,
sources,
output,
major,
minor,
soname,
libDirs,
dependencies,
optimize,
disableThreading):
"""
See the similar function in gcc.py for the explanation of what it does.
"""
result = [self._executable]
if getTargetOsName() == "darwin":
result += ["-dynamiclib"]
result += ["-current_version", "%s.%s" % (major, minor)]
result += ["-compatibility_version", "%s.0" % major]
else:
result += ["-shared"]
result += ["-Wl,-soname,%s" % soname]
if optimize:
result.append("-O3")
result.append("-flto")
if not disableThreading and getTargetOsName() != "darwin":
result.append("-pthread")
for libdir in libDirs:
result.append("-L%s" % libdir)
for dependency in dependencies:
result.append("-l%s" % dependency)
result.append("-o")
result.append(output)
result += sources
return result
def getExecutableLinkerArgv(self,
sources,
output,
libDirs,
dependencies,
optimize,
disableThreading):
"""
See the similar function in gcc.py for the explanation of what it does.
"""
result = [self._executable]
if optimize:
result.append("-O3")
result.append("-flto")
if not disableThreading and getTargetOsName() != "darwin":
result.append("-pthread")
for libdir in libDirs:
result.append("-L%s" % libdir)
for dependency in dependencies:
result.append("-l%s" % dependency)
result.append("-o")
result.append(output)
result += sources
return result
|
|
from unittest import TestCase
from graphql.ast import Argument, Document, Field, FragmentDefinition, \
FragmentSpread, Mutation, NamedType, NonNullType, Query, Subscription, \
Variable, VariableDefinition
from graphql.parser import GraphQLParser
class GraphQLParseTest(TestCase):
parser = GraphQLParser()
def test_shorthand(self):
self.assertEqual(
self.parser.parse('{ me { name } }'),
Document(definitions=[
Query(selections=[
Field(selections=[Field(name='name')], name='me')
])
])
)
self.assertEqual(
self.parser.parse("""
{
user(id: 4) {
id
name
profilePic
avatar: profilePic(width: 30, height: 30)
}
}
"""),
Document(definitions=[
Query(selections=[
Field(
selections=[
Field(name='id'),
Field(name='name'),
Field(name='profilePic'),
Field(alias='avatar', name='profilePic', arguments=[
Argument(name='width', value=30),
Argument(name='height', value=30)
])
],
name='user',
arguments=[Argument(name='id', value=4)]
)
])
])
)
def test_mutation_shorthand(self):
self.assertEqual(
self.parser.parse("""
mutation {
likeStory(storyID: 12345) {
story {
likeCount
}
}
}
"""),
Document(definitions=[
Mutation(selections=[
Field(
selections=[
Field(name='story', selections=[
Field(name='likeCount')
]),
],
name='likeStory',
arguments=[Argument(name='storyID', value=12345)]
)
])
])
)
def test_with_fragments(self):
self.assertEqual(
self.parser.parse("""
query withNestedFragments {
user(id: 4) {
friends(first: 10) {
...friendFields
}
mutualFriends(first: 10) {
...friendFields
}
}
}
fragment friendFields on User {
id
name
...standardProfilePic
}
fragment standardProfilePic on User {
profilePic(size: "small")
}
"""),
Document(definitions=[
Query(name='withNestedFragments',
selections=[
Field(selections=[
Field(selections=[
FragmentSpread(name='friendFields')
],
name='friends',
arguments=[
Argument(name='first', value=10)
]),
Field(selections=[
FragmentSpread(name='friendFields')
],
name='mutualFriends',
arguments=[
Argument(name='first', value=10)
])
],
name='user',
arguments=[Argument(name='id', value=4)])
]
),
FragmentDefinition(type_condition=NamedType(name='User'),
name='friendFields',
selections=[
Field(name='id'),
Field(name='name'),
FragmentSpread(name='standardProfilePic')
]
),
FragmentDefinition(type_condition=NamedType(name='User'),
name='standardProfilePic',
selections=[
Field(name='profilePic',
arguments=[Argument(name='size', value='small')]
)
])
])
)
def test_shorthand_query_with_fragments(self):
self.assertEqual(
self.parser.parse("""
{
hero {
name
...DroidFields
}
}
fragment DroidFields on Droid {
primaryFunction
}
"""),
Document(definitions=[
Query(selections=[
Field(
name='hero',
selections=[
Field(name='name'),
FragmentSpread(name='DroidFields'),
]
),
]),
FragmentDefinition(type_condition=NamedType(name='Droid'),
name='DroidFields',
selections=[Field(name='primaryFunction')]
),
])
)
def test_shorthand_vs_query(self):
self.assertEqual(
self.parser.parse("""
query {
hero {
name
}
}
"""),
self.parser.parse("""
{
hero {
name
}
}
"""),
)
def test_variables(self):
self.assertEqual(
self.parser.parse("""
query withVariable($userId: Int = 0, $userName: String) {
user(id: $userId, name: $userName) {
nick
}
}
"""),
Document(definitions=[Query(
name='withVariable',
variable_definitions=[VariableDefinition(
name='userId',
type=NamedType(name='Int'),
default_value=0
), VariableDefinition(
name='userName',
type=NamedType(name='String')
)],
selections=[Field(
selections=[Field(name='nick')],
name='user',
arguments=[Argument(
name='id',
value=Variable(name='userId'),
), Argument(
name='name',
value=Variable(name='userName')
)]
)])
])
)
def test_arguments(self):
self.assertEqual(
self.parser.parse("""
{
episodes (number: null, isPrequel: false) {
id
}
}
"""),
Document(definitions=[Query(
selections=[Field(
selections=[Field(name='id')],
name='episodes',
arguments=[Argument(
name='number',
value=None
), Argument(
name='isPrequel',
value=False
)]
)])
])
)
def test_with_subscription(self):
self.assertEqual(
self.parser.parse("""
subscription onSomething($deviceId: ID!) {
onSomething(deviceId: $deviceId,) {
deviceId
deviceType
datapoints {
id
}
}
}
"""),
Document(definitions=[
Subscription(
name="onSomething",
selections=[
Field(
name="onSomething",
arguments=[Argument(
name="deviceId",
value=Variable(
name="deviceId"
)
)],
selections=[
Field(
name="deviceId"
),
Field(
name="deviceType"
),
Field(
name="datapoints",
selections=[
Field(name="id")
]
)
]
)
],
variable_definitions=[
VariableDefinition(
name="deviceId",
type=NonNullType(
type=NamedType(
name="ID"
)
)
)
]
)
])
)
|
|
# Licensed under an MIT open source license - see LICENSE
import numpy as np
import scipy.ndimage as nd
from length import *
import matplotlib.pyplot as p
import copy
def isolateregions(binary_array, size_threshold=0, pad_size=5,
fill_hole=False, rel_size=0.1, morph_smooth=False):
'''
Labels regions in a boolean array and returns individual arrays for each
region. Regions below a threshold can optionlly be removed. Small holes
may also be filled in.
Parameters
----------
binary_array : numpy.ndarray
A binary array of regions.
size_threshold : int, optional
Sets the pixel size on the size of regions.
pad_size : int, optional
Padding to be added to the individual arrays.
fill_hole : int, optional
Enables hole filling.
rel_size : float or int, optional
If < 1.0, sets the minimum size a hole must be relative to the area
of the mask. Otherwise, this is the maximum number of pixels the hole
must have to be deleted.
morph_smooth : bool, optional
Morphologically smooth the image using a binar opening and closing.
Returns
-------
output_arrays : list
Regions separated into individual arrays.
num : int
Number of filaments
corners : list
Contains the indices where each skeleton array was taken from
the original.
'''
output_arrays = []
corners = []
# Label skeletons
labels, num = nd.label(binary_array, eight_con())
# Remove skeletons which have less pixels than the threshold.
if size_threshold != 0:
sums = nd.sum(binary_array, labels, range(1, num + 1))
remove_fils = np.where(sums <= size_threshold)[0]
for lab in remove_fils:
binary_array[np.where(labels == lab + 1)] = 0
# Relabel after deleting short skeletons.
labels, num = nd.label(binary_array, eight_con())
# Split each skeleton into its own array.
for n in range(1, num + 1):
x, y = np.where(labels == n)
# Make an array shaped to the skeletons size and padded on each edge
shapes = (x.max() - x.min() + 2 * pad_size,
y.max() - y.min() + 2 * pad_size)
eachfil = np.zeros(shapes)
eachfil[x - x.min() + pad_size, y - y.min() + pad_size] = 1
# Fill in small holes
if fill_hole:
eachfil = _fix_small_holes(eachfil, rel_size=rel_size)
if morph_smooth:
eachfil = nd.binary_opening(eachfil, np.ones((3, 3)))
eachfil = nd.binary_closing(eachfil, np.ones((3, 3)))
output_arrays.append(eachfil)
# Keep the coordinates from the original image
lower = (x.min() - pad_size, y.min() - pad_size)
upper = (x.max() + pad_size + 1, y.max() + pad_size + 1)
corners.append([lower, upper])
return output_arrays, num, corners
def find_filpix(branches, labelfil, final=True):
'''
Identifies the types of pixels in the given skeletons. Identification is
based on the connectivity of the pixel.
Parameters
----------
branches : list
Contains the number of branches in each skeleton.
labelfil : list
Contains the arrays of each skeleton.
final : bool, optional
If true, corner points, intersections, and body points are all
labeled as a body point for use when the skeletons have already
been cleaned.
Returns
-------
fila_pts : list
All points on the body of each skeleton.
inters : list
All points associated with an intersection in each skeleton.
labelfil : list
Contains the arrays of each skeleton where all intersections
have been removed.
endpts_return : list
The end points of each branch of each skeleton.
'''
initslices = []
initlist = []
shiftlist = []
sublist = []
endpts = []
blockpts = []
bodypts = []
slices = []
vallist = []
shiftvallist = []
cornerpts = []
subvallist = []
subslist = []
pix = []
filpix = []
intertemps = []
fila_pts = []
inters = []
repeat = []
temp_group = []
all_pts = []
pairs = []
endpts_return = []
for k in range(1, branches + 1):
x, y = np.where(labelfil == k)
# pixel_slices = np.empty((len(x)+1,8))
for i in range(len(x)):
if x[i] < labelfil.shape[0] - 1 and y[i] < labelfil.shape[1] - 1:
pix.append((x[i], y[i]))
initslices.append(np.array([[labelfil[x[i] - 1, y[i] + 1],
labelfil[x[i], y[i] + 1],
labelfil[x[i] + 1, y[i] + 1]],
[labelfil[x[i] - 1, y[i]], 0,
labelfil[x[i] + 1, y[i]]],
[labelfil[x[i] - 1, y[i] - 1],
labelfil[x[i], y[i] - 1],
labelfil[x[i] + 1, y[i] - 1]]]))
filpix.append(pix)
slices.append(initslices)
initslices = []
pix = []
for i in range(len(slices)):
for k in range(len(slices[i])):
initlist.append([slices[i][k][0, 0],
slices[i][k][0, 1],
slices[i][k][0, 2],
slices[i][k][1, 2],
slices[i][k][2, 2],
slices[i][k][2, 1],
slices[i][k][2, 0],
slices[i][k][1, 0]])
vallist.append(initlist)
initlist = []
for i in range(len(slices)):
for k in range(len(slices[i])):
shiftlist.append(shifter(vallist[i][k], 1))
shiftvallist.append(shiftlist)
shiftlist = []
for k in range(len(slices)):
for i in range(len(vallist[k])):
for j in range(8):
sublist.append(
int(vallist[k][i][j]) - int(shiftvallist[k][i][j]))
subslist.append(sublist)
sublist = []
subvallist.append(subslist)
subslist = []
# x represents the subtracted list (step-ups) and y is the values of the
# surrounding pixels. The categories of pixels are ENDPTS (x<=1),
# BODYPTS (x=2,y=2),CORNERPTS (x=2,y=3),BLOCKPTS (x=3,y>=4), and
# INTERPTS (x>=3).
# A cornerpt is [*,0,0] (*s) associated with an intersection,
# but their exclusion from
# [1,*,0] the intersection keeps eight-connectivity, they are included
# [0,1,0] intersections for this reason.
# A blockpt is [1,0,1] They are typically found in a group of four,
# where all four
# [0,*,*] constitute a single intersection.
# [1,*,*]
# The "final" designation is used when finding the final branch lengths.
# At this point, blockpts and cornerpts should be eliminated.
for k in range(branches):
for l in range(len(filpix[k])):
x = [j for j, y in enumerate(subvallist[k][l]) if y == k + 1]
y = [j for j, z in enumerate(vallist[k][l]) if z == k + 1]
if len(x) <= 1:
endpts.append(filpix[k][l])
endpts_return.append(filpix[k][l])
elif len(x) == 2:
if final:
bodypts.append(filpix[k][l])
else:
if len(y) == 2:
bodypts.append(filpix[k][l])
elif len(y) == 3:
cornerpts.append(filpix[k][l])
elif len(y) >= 4:
blockpts.append(filpix[k][l])
elif len(x) >= 3:
intertemps.append(filpix[k][l])
endpts = list(set(endpts))
bodypts = list(set(bodypts))
dups = set(endpts) & set(bodypts)
if len(dups) > 0:
for i in dups:
bodypts.remove(i)
# Cornerpts without a partner diagonally attached can be included as a
# bodypt.
if len(cornerpts) > 0:
deleted_cornerpts = []
for i, j in zip(cornerpts, cornerpts):
if i != j:
if distance(i[0], j[0], i[1], j[1]) == np.sqrt(2.0):
proximity = [(i[0], i[1] - 1),
(i[0], i[1] + 1),
(i[0] - 1, i[1]),
(i[0] + 1, i[1]),
(i[0] - 1, i[1] + 1),
(i[0] + 1, i[1] + 1),
(i[0] - 1, i[1] - 1),
(i[0] + 1, i[1] - 1)]
match = set(intertemps) & set(proximity)
if len(match) == 1:
pairs.append([i, j])
deleted_cornerpts.append(i)
deleted_cornerpts.append(j)
cornerpts = list(set(cornerpts).difference(set(deleted_cornerpts)))
if len(cornerpts) > 0:
for l in cornerpts:
proximity = [(l[0], l[1] - 1),
(l[0], l[1] + 1),
(l[0] - 1, l[1]),
(l[0] + 1, l[1]),
(l[0] - 1, l[1] + 1),
(l[0] + 1, l[1] + 1),
(l[0] - 1, l[1] - 1),
(l[0] + 1, l[1] - 1)]
match = set(intertemps) & set(proximity)
if len(match) == 1:
intertemps.append(l)
fila_pts.append(endpts + bodypts)
else:
fila_pts.append(endpts + bodypts + [l])
# cornerpts.remove(l)
else:
fila_pts.append(endpts + bodypts)
# Reset lists
cornerpts = []
endpts = []
bodypts = []
if len(pairs) > 0:
for i in range(len(pairs)):
for j in pairs[i]:
all_pts.append(j)
if len(blockpts) > 0:
for i in blockpts:
all_pts.append(i)
if len(intertemps) > 0:
for i in intertemps:
all_pts.append(i)
# Pairs of cornerpts, blockpts, and interpts are combined into an
# array. If there is eight connectivity between them, they are labelled
# as a single intersection.
arr = np.zeros((labelfil.shape))
for z in all_pts:
labelfil[z[0], z[1]] = 0
arr[z[0], z[1]] = 1
lab, nums = nd.label(arr, eight_con())
for k in range(1, nums + 1):
objs_pix = np.where(lab == k)
for l in range(len(objs_pix[0])):
temp_group.append((objs_pix[0][l], objs_pix[1][l]))
inters.append(temp_group)
temp_group = []
for i in range(len(inters) - 1):
if inters[i] == inters[i + 1]:
repeat.append(inters[i])
for i in repeat:
inters.remove(i)
return fila_pts, inters, labelfil, endpts_return
def find_extran(branches, labelfil):
'''
Identify pixels that are not necessary to keep the connectivity of the
skeleton. It uses the same labeling process as find_filpix. Extraneous
pixels tend to be those from former intersections, whose attached branch
was eliminated in the cleaning process.
Parameters
----------
branches : list
Contains the number of branches in each skeleton.
labelfil : list
Contains arrays of the labeled versions of each skeleton.
Returns
-------
labelfil : list
Contains the updated labeled arrays with extraneous pieces
removed.
'''
initslices = []
initlist = []
shiftlist = []
sublist = []
extran = []
slices = []
vallist = []
shiftvallist = []
subvallist = []
subslist = []
pix = []
filpix = []
for k in range(1, branches + 1):
x, y = np.where(labelfil == k)
for i in range(len(x)):
if x[i] < labelfil.shape[0] - 1 and y[i] < labelfil.shape[1] - 1:
pix.append((x[i], y[i]))
initslices.append(np.array([[labelfil[x[i] - 1, y[i] + 1],
labelfil[x[i], y[i] + 1],
labelfil[x[i] + 1, y[i] + 1]],
[labelfil[x[i] - 1, y[i]], 0,
labelfil[x[i] + 1, y[i]]],
[labelfil[x[i] - 1, y[i] - 1],
labelfil[x[i], y[i] - 1],
labelfil[x[i] + 1, y[i] - 1]]]))
filpix.append(pix)
slices.append(initslices)
initslices = []
pix = []
for i in range(len(slices)):
for k in range(len(slices[i])):
initlist.append([slices[i][k][0, 0],
slices[i][k][0, 1],
slices[i][k][0, 2],
slices[i][k][1, 2],
slices[i][k][2, 2],
slices[i][k][2, 1],
slices[i][k][2, 0],
slices[i][k][1, 0]])
vallist.append(initlist)
initlist = []
for i in range(len(slices)):
for k in range(len(slices[i])):
shiftlist.append(shifter(vallist[i][k], 1))
shiftvallist.append(shiftlist)
shiftlist = []
for k in range(len(slices)):
for i in range(len(vallist[k])):
for j in range(8):
sublist.append(
int(vallist[k][i][j]) - int(shiftvallist[k][i][j]))
subslist.append(sublist)
sublist = []
subvallist.append(subslist)
subslist = []
for k in range(len(slices)):
for l in range(len(filpix[k])):
x = [j for j, y in enumerate(subvallist[k][l]) if y == k + 1]
y = [j for j, z in enumerate(vallist[k][l]) if z == k + 1]
if len(x) == 0:
labelfil[filpix[k][l][0], filpix[k][l][1]] = 0
if len(x) == 1:
if len(y) >= 2:
extran.append(filpix[k][l])
labelfil[filpix[k][l][0], filpix[k][l][1]] = 0
# if len(extran) >= 2:
# for i in extran:
# for j in extran:
# if i != j:
# if distance(i[0], j[0], i[1], j[1]) == np.sqrt(2.0):
# proximity = [(i[0], i[1] - 1),
# (i[0], i[1] + 1),
# (i[0] - 1, i[1]),
# (i[0] + 1, i[1]),
# (i[0] - 1, i[1] + 1),
# (i[0] + 1, i[1] + 1),
# (i[0] - 1, i[1] - 1),
# (i[0] + 1, i[1] - 1)]
# match = set(filpix[k]) & set(proximity)
# if len(match) > 0:
# for z in match:
# labelfil[z[0], z[1]] = 0
return labelfil
######################################################################
# Wrapper Functions
######################################################################
def pix_identify(isolatefilarr, num):
'''
This function is essentially a wrapper on find_filpix. It returns the
outputs of find_filpix in the form that are used during the analysis.
Parameters
----------
isolatefilarr : list
Contains individual arrays of each skeleton.
num : int
The number of skeletons.
Returns
-------
interpts : list
Contains lists of all intersections points in each skeleton.
hubs : list
Contains the number of intersections in each filament. This is
useful for identifying those with no intersections as their analysis
is straight-forward.
ends : list
Contains the positions of all end points in each skeleton.
filbranches : list
Contains the number of branches in each skeleton.
labelisofil : list
Contains individual arrays for each skeleton where the
branches are labeled and the intersections have been removed.
'''
interpts = []
hubs = []
ends = []
filbranches = []
labelisofil = []
for n in range(num):
funcreturn = find_filpix(1, isolatefilarr[n], final=False)
interpts.append(funcreturn[1])
hubs.append(len(funcreturn[1]))
isolatefilarr.pop(n)
isolatefilarr.insert(n, funcreturn[2])
ends.append(funcreturn[3])
label_branch, num_branch = nd.label(isolatefilarr[n], eight_con())
filbranches.append(num_branch)
labelisofil.append(label_branch)
return interpts, hubs, ends, filbranches, labelisofil
def extremum_pts(labelisofil, extremum, ends):
'''
This function returns the the farthest extents of each filament. This
is useful for determining how well the shortest path algorithm has worked.
Parameters
----------
labelisofil : list
Contains individual arrays for each skeleton.
extremum : list
Contains the extents as determined by the shortest
path algorithm.
ends : list
Contains the positions of each end point in eahch filament.
Returns
-------
extrem_pts : list
Contains the indices of the extremum points.
'''
num = len(labelisofil)
extrem_pts = []
for n in range(num):
per_fil = []
for i, j in ends[n]:
if labelisofil[n][i, j] == extremum[n][0] or labelisofil[n][i, j] == extremum[n][1]:
per_fil.append([i, j])
extrem_pts.append(per_fil)
return extrem_pts
def make_final_skeletons(labelisofil, inters, verbose=False, save_png=False,
save_name=None):
'''
Creates the final skeletons outputted by the algorithm.
Parameters
----------
labelisofil : list
List of labeled skeletons.
inters : list
Positions of the intersections in each skeleton.
verbose : bool, optional
Enables plotting of the final skeleton.
save_png : bool, optional
Saves the plot made in verbose mode. Disabled by default.
save_name : str, optional
For use when ``save_png`` is enabled.
**MUST be specified when ``save_png`` is enabled.**
Returns
-------
filament_arrays : list
List of the final skeletons.
'''
filament_arrays = []
for n, (skel_array, intersec) in enumerate(zip(labelisofil, inters)):
copy_array = np.zeros(skel_array.shape, dtype=int)
for inter in intersec:
for pts in inter:
x, y = pts
copy_array[x, y] = 1
copy_array[np.where(skel_array >= 1)] = 1
cleaned_array = find_extran(1, copy_array)
filament_arrays.append(cleaned_array)
if verbose or save_png:
if save_png and save_name is None:
Warning("Must give a save_name when save_png is enabled. No"
" plots will be created.")
p.imshow(cleaned_array, origin='lower', interpolation='nearest')
if save_png:
try_mkdir(save_name)
p.savefig(os.path.join(save_name,
save_name+"_final_skeleton_"+str(n)+".png"))
if verbose:
p.show()
p.clf()
return filament_arrays
def recombine_skeletons(skeletons, offsets, orig_size, pad_size,
verbose=False):
'''
Takes a list of skeleton arrays and combines them back into
the original array.
Parameters
----------
skeletons : list
Arrays of each skeleton.
offsets : list
Coordinates where the skeleton arrays have been sliced from the
image.
orig_size : tuple
Size of the image.
pad_size : int
Size of the array padding.
verbose : bool, optional
Enables printing when a skeleton array needs to be resized to fit
into the image.
Returns
-------
master_array : numpy.ndarray
Contains all skeletons placed in their original positions in the image.
'''
num = len(skeletons)
master_array = np.zeros(orig_size)
for n in range(num):
x_off, y_off = offsets[n][0] # These are the coordinates of the bottom
# left in the master array.
x_top, y_top = offsets[n][1]
# Now check if padding will put the array outside of the original array
# size
excess_x_top = x_top - orig_size[0]
excess_y_top = y_top - orig_size[1]
copy_skeleton = copy.copy(skeletons[n])
size_change_flag = False
if excess_x_top > 0:
copy_skeleton = copy_skeleton[:-excess_x_top, :]
size_change_flag = True
if excess_y_top > 0:
copy_skeleton = copy_skeleton[:, :-excess_y_top]
size_change_flag = True
if x_off < 0:
copy_skeleton = copy_skeleton[-x_off:, :]
x_off = 0
size_change_flag = True
if y_off < 0:
copy_skeleton = copy_skeleton[:, -y_off:]
y_off = 0
size_change_flag = True
if verbose & size_change_flag:
print "REDUCED FILAMENT %s/%s TO FIT IN ORIGINAL ARRAY" % (n, num)
x, y = np.where(copy_skeleton >= 1)
for i in range(len(x)):
master_array[x[i] + x_off, y[i] + y_off] = 1
return master_array
def _fix_small_holes(mask_array, rel_size=0.1):
'''
Helper function to remove only small holes within a masked region.
Parameters
----------
mask_array : numpy.ndarray
Array containing the masked region.
rel_size : float, optional
If < 1.0, sets the minimum size a hole must be relative to the area
of the mask. Otherwise, this is the maximum number of pixels the hole
must have to be deleted.
Returns
-------
mask_array : numpy.ndarray
Altered array.
'''
if rel_size <= 0.0:
raise ValueError("rel_size must be positive.")
elif rel_size > 1.0:
pixel_flag = True
else:
pixel_flag = False
# Find the region area
reg_area = len(np.where(mask_array == 1)[0])
# Label the holes
holes = np.logical_not(mask_array).astype(float)
lab_holes, n_holes = nd.label(holes, eight_con())
# If no holes, return
if n_holes == 1:
return mask_array
# Ignore area outside of the region.
out_label = lab_holes[0, 0]
# Set size to be just larger than the region. Thus it can never be
# deleted.
holes[np.where(lab_holes == out_label)] = reg_area + 1.
# Sum up the regions and find holes smaller than the threshold.
sums = nd.sum(holes, lab_holes, range(1, n_holes + 1))
if pixel_flag: # Use number of pixels
delete_holes = np.where(sums < rel_size)[0]
else: # Use relative size of holes.
delete_holes = np.where(sums / reg_area < rel_size)[0]
# Return if there is nothing to delete.
if delete_holes == []:
return mask_array
# Add one to take into account 0 in list if object label 1.
delete_holes += 1
for label in delete_holes:
mask_array[np.where(lab_holes == label)] = 1
return mask_array
|
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constructs the RPF bundle."""
__author__ = '[email protected] (Jason Stredwick)'
import os
import bundle
import deps as DEPS
import paths as PATHS
SRC = bundle.SRC
DST = bundle.DST
TREE = bundle.TREE
GENFILES_ROOT = os.path.join(PATHS.GENFILES_ROOT, 'rpf')
class RPF(bundle.Bundle):
def __init__(self, deps, debug=True, deps_root='', src_root='', dst_root=''):
bundle.Bundle.__init__(self, deps, debug, deps_root, src_root, dst_root)
def CreateJsTargets(self, src_location, dst_location):
dst_root = os.path.join(dst_location, GENFILES_ROOT)
return {
'background': {
SRC: os.path.join(src_location, PATHS.RPF_ROOT, 'src', 'base',
'background.js'),
DST: os.path.join(dst_root, 'background_script.js')
},
'getactioninfo': {
SRC: os.path.join(src_location, PATHS.RPF_ROOT, 'src', 'libs',
'getactioninfo.js'),
DST: os.path.join(dst_root, 'getactioninfo_script.js')
},
'console': {
SRC: os.path.join(src_location, PATHS.RPF_ROOT, 'src', 'libs',
'console.js'),
DST: os.path.join(dst_root, 'console_script.js')
},
'elementhelper': {
SRC: os.path.join(src_location, 'common', 'extension', 'dom',
'elementhelper.js'),
DST: os.path.join(dst_root, 'elementhelper_script.js')
},
'popup': {
SRC: os.path.join(src_location, PATHS.RPF_ROOT, 'src', 'base',
'popup.js'),
DST: os.path.join(dst_root, 'popup_script.js')
},
## custom build to extract the framework
'rpf_content': {
SRC: os.path.join(src_location, PATHS.RPF_ROOT, 'src', 'libs',
'getactioninfo.js'),
DST: os.path.join(dst_root, 'rpf_content.js')
},
'rpf_background': {
SRC: os.path.join(src_location, PATHS.RPF_ROOT, 'src', 'base',
'rpf_utils.js'),
DST: os.path.join(dst_root, 'rpf_background.js')
}
}
def CreateSoyTargets(self, src_location, dst_location):
dst_root = os.path.join(dst_location, GENFILES_ROOT)
targets = {
'common_ux': {
SRC: os.path.join(src_location, 'common', 'extension', 'ux',
'common_ux.soy'),
DST: os.path.join(dst_root, 'common_ux.soy.js')
}
}
names = ['popup', 'rpfconsole', 'rpf_dialogs', 'locatorsupdater']
src_root = os.path.join(src_location, PATHS.RPF_ROOT, 'templates')
bundle.AddTargets(names, targets, src_root, dst_root, '.soy', '.soy.js')
return targets
def CreateCopyTargets(self, deps, deps_location, src_location, dst_location):
dst_root = os.path.join(dst_location, PATHS.RPF_DST)
targets = {
'manifest': {
SRC: os.path.join(src_location, PATHS.RPF_ROOT, 'manifest.json'),
DST: os.path.join(dst_root, 'manifest.json'),
TREE: False
},
'images': {
SRC: os.path.join(src_location, 'extension', 'imgs'),
DST: os.path.join(dst_location, PATHS.RPF_IMGS_DST),
TREE: True
},
'analytics': {
SRC: os.path.join(src_location, 'common', 'extension', 'analytics',
'analytics.js'),
DST: os.path.join(dst_root, 'analytics.js'),
TREE: False
},
# 'ace': {
# SRC: os.path.join(deps_location, deps[DEPS.ACE][DEPS.ROOT], 'build',
# 'src'),
# DST: os.path.join(dst_root, 'ace'),
# TREE: True
# }
## map directory to this
'ace': {
SRC: os.path.join(deps_location, deps[DEPS.ACE][DEPS.ROOT], 'lib', 'ace'),
DST: os.path.join(dst_root, 'ace'),
TREE: True
}
}
# Styles
names = ['consoles', 'options', 'popup', 'rpf_console']
src_root = os.path.join(src_location, 'extension', 'styles')
dst_root = os.path.join(dst_location, PATHS.RPF_STYLES_DST)
bundle.AddTargets(names, targets, src_root, dst_root, '.css', '.css',
'_css')
names = ['recordmodemanager']
src_root = os.path.join(src_location, PATHS.RPF_ROOT, 'styles')
bundle.AddTargets(names, targets, src_root, dst_root, '.css', '.css',
'_css')
# HTML
names = ['background', 'popup']
src_root = os.path.join(src_location, 'extension', 'html')
dst_root = os.path.join(dst_location, PATHS.RPF_DST)
bundle.AddTargets(names, targets, src_root, dst_root, '.html', '.html',
'_html')
names = ['options']
src_root = os.path.join(src_location, 'extension', 'src', 'options')
bundle.AddTargets(names, targets, src_root, dst_root, '.html', '.html',
'_html')
names = ['console']
src_root = os.path.join(src_location, PATHS.RPF_ROOT, 'html')
bundle.AddTargets(names, targets, src_root, dst_root, '.html', '.html',
'_html')
# Add in known compiled JavaScript files.
js_targets = self.CreateJsTargets(src_location=src_location,
dst_location=dst_location)
for target_name in js_targets:
name = '%s_script' % target_name
filename = '%s.js' % name
dst = os.path.join(dst_root, filename)
targets[name] = {
SRC: js_targets[target_name][DST],
DST: dst,
TREE: False
}
return targets
def CreateClosureCompilerControls(self, deps, src_location, deps_location):
return [
'--root=%s' % os.path.join(src_location, 'common', 'extension'),
'--root=%s' % os.path.join(src_location, PATHS.RPF_ROOT, 'src'),
'--root=%s' % os.path.join(deps_location,
deps[DEPS.CLOSURE_LIB][DEPS.ROOT]),
'--root=%s' % os.path.join(deps_location, DEPS.GetSoyLibraryPath(deps)),
'--root=%s' % os.path.join(deps_location, PATHS.GENFILES_ROOT,
'rpf'),
'--root=%s' % os.path.join(deps_location, deps[DEPS.ATOMS][DEPS.ROOT]),
# add this dep
'--root=%s' % os.path.join(deps_location, deps[DEPS.WGXPATH][DEPS.ROOT]),
'--compiler_flags=--externs=%s' % os.path.join(src_location,
'common', 'extension', 'externs', 'closure.js'),
'--compiler_flags=--externs=%s' % os.path.join(src_location,
'common', 'extension', 'externs', 'chrome_extensions.js'),
'--compiler_flags=--externs=%s' % os.path.join(src_location,
'common', 'extension', 'externs', 'rpf_externs.js'),
'--compiler_flags=--externs=%s' % os.path.join(src_location,
'common', 'extension', 'externs', 'ace_externs.js')
]
|
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import os
import pkg_resources
import sys
if sys.version_info < (2, 4):
from paste.script.util import string24 as string
else:
import string
import cgi
import urllib
import re
Cheetah = None
try:
import subprocess
except ImportError:
try:
from paste.script.util import subprocess24 as subprocess
except ImportError:
subprocess = None # jython
import inspect
class SkipTemplate(Exception):
"""
Raised to indicate that the template should not be copied over.
Raise this exception during the substitution of your template
"""
def copy_dir(source, dest, vars, verbosity, simulate, indent=0,
use_cheetah=False, sub_vars=True, interactive=False,
svn_add=True, overwrite=True, template_renderer=None):
"""
Copies the ``source`` directory to the ``dest`` directory.
``vars``: A dictionary of variables to use in any substitutions.
``verbosity``: Higher numbers will show more about what is happening.
``simulate``: If true, then don't actually *do* anything.
``indent``: Indent any messages by this amount.
``sub_vars``: If true, variables in ``_tmpl`` files and ``+var+``
in filenames will be substituted.
``use_cheetah``: If true, then any templates encountered will be
substituted with Cheetah. Otherwise ``template_renderer`` or
``string.Template`` will be used for templates.
``svn_add``: If true, any files written out in directories with
``.svn/`` directories will be added (via ``svn add``).
``overwrite``: If false, then don't every overwrite anything.
``interactive``: If you are overwriting a file and interactive is
true, then ask before overwriting.
``template_renderer``: This is a function for rendering templates
(if you don't want to use Cheetah or string.Template). It should
have the signature ``template_renderer(content_as_string,
vars_as_dict, filename=filename)``.
"""
# This allows you to use a leading +dot+ in filenames which would
# otherwise be skipped because leading dots make the file hidden:
vars.setdefault('dot', '.')
vars.setdefault('plus', '+')
use_pkg_resources = isinstance(source, tuple)
if use_pkg_resources:
names = pkg_resources.resource_listdir(source[0], source[1])
else:
names = os.listdir(source)
names.sort()
pad = ' '*(indent*2)
if not os.path.exists(dest):
if verbosity >= 1:
print '%sCreating %s/' % (pad, dest)
if not simulate:
svn_makedirs(dest, svn_add=svn_add, verbosity=verbosity,
pad=pad)
elif verbosity >= 2:
print '%sDirectory %s exists' % (pad, dest)
for name in names:
if use_pkg_resources:
full = '/'.join([source[1], name])
else:
full = os.path.join(source, name)
reason = should_skip_file(name)
if reason:
if verbosity >= 2:
reason = pad + reason % {'filename': full}
print reason
continue
if sub_vars:
dest_full = os.path.join(dest, substitute_filename(name, vars))
sub_file = False
if dest_full.endswith('_tmpl'):
dest_full = dest_full[:-5]
sub_file = sub_vars
if use_pkg_resources and pkg_resources.resource_isdir(source[0], full):
if verbosity:
print '%sRecursing into %s' % (pad, os.path.basename(full))
copy_dir((source[0], full), dest_full, vars, verbosity, simulate,
indent=indent+1, use_cheetah=use_cheetah,
sub_vars=sub_vars, interactive=interactive,
svn_add=svn_add, template_renderer=template_renderer)
continue
elif not use_pkg_resources and os.path.isdir(full):
if verbosity:
print '%sRecursing into %s' % (pad, os.path.basename(full))
copy_dir(full, dest_full, vars, verbosity, simulate,
indent=indent+1, use_cheetah=use_cheetah,
sub_vars=sub_vars, interactive=interactive,
svn_add=svn_add, template_renderer=template_renderer)
continue
elif use_pkg_resources:
content = pkg_resources.resource_string(source[0], full)
else:
f = open(full, 'rb')
content = f.read()
f.close()
if sub_file:
try:
content = substitute_content(content, vars, filename=full,
use_cheetah=use_cheetah,
template_renderer=template_renderer)
except SkipTemplate:
continue
if content is None:
continue
already_exists = os.path.exists(dest_full)
if already_exists:
f = open(dest_full, 'rb')
old_content = f.read()
f.close()
if old_content == content:
if verbosity:
print '%s%s already exists (same content)' % (pad, dest_full)
continue
if interactive:
if not query_interactive(
full, dest_full, content, old_content,
simulate=simulate):
continue
elif not overwrite:
continue
if verbosity and use_pkg_resources:
print '%sCopying %s to %s' % (pad, full, dest_full)
elif verbosity:
print '%sCopying %s to %s' % (pad, os.path.basename(full), dest_full)
if not simulate:
f = open(dest_full, 'wb')
f.write(content)
f.close()
if svn_add and not already_exists:
if not os.path.exists(os.path.join(os.path.dirname(os.path.abspath(dest_full)), '.svn')):
if verbosity > 1:
print '%s.svn/ does not exist; cannot add file' % pad
else:
cmd = ['svn', 'add', dest_full]
if verbosity > 1:
print '%sRunning: %s' % (pad, ' '.join(cmd))
if not simulate:
# @@: Should
if subprocess is None:
raise RuntimeError('copydir failed, environment '
'does not support subprocess '
'module')
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
if verbosity > 1 and stdout:
print 'Script output:'
print stdout
elif svn_add and already_exists and verbosity > 1:
print '%sFile already exists (not doing svn add)' % pad
def should_skip_file(name):
"""
Checks if a file should be skipped based on its name.
If it should be skipped, returns the reason, otherwise returns
None.
"""
if name.startswith('.'):
return 'Skipping hidden file %(filename)s'
if name.endswith('~') or name.endswith('.bak'):
return 'Skipping backup file %(filename)s'
if name.endswith('.pyc') or name.endswith('.pyo'):
return 'Skipping %s file %%(filename)s' % os.path.splitext(name)[1]
if name.endswith('$py.class'):
return 'Skipping $py.class file %(filename)s'
if name in ('CVS', '_darcs'):
return 'Skipping version control directory %(filename)s'
return None
# Overridden on user's request:
all_answer = None
def query_interactive(src_fn, dest_fn, src_content, dest_content,
simulate):
global all_answer
from difflib import unified_diff, context_diff
u_diff = list(unified_diff(
dest_content.splitlines(),
src_content.splitlines(),
dest_fn, src_fn))
c_diff = list(context_diff(
dest_content.splitlines(),
src_content.splitlines(),
dest_fn, src_fn))
added = len([l for l in u_diff if l.startswith('+')
and not l.startswith('+++')])
removed = len([l for l in u_diff if l.startswith('-')
and not l.startswith('---')])
if added > removed:
msg = '; %i lines added' % (added-removed)
elif removed > added:
msg = '; %i lines removed' % (removed-added)
else:
msg = ''
print 'Replace %i bytes with %i bytes (%i/%i lines changed%s)' % (
len(dest_content), len(src_content),
removed, len(dest_content.splitlines()), msg)
prompt = 'Overwrite %s [y/n/d/B/?] ' % dest_fn
while 1:
if all_answer is None:
response = raw_input(prompt).strip().lower()
else:
response = all_answer
if not response or response[0] == 'b':
import shutil
new_dest_fn = dest_fn + '.bak'
n = 0
while os.path.exists(new_dest_fn):
n += 1
new_dest_fn = dest_fn + '.bak' + str(n)
print 'Backing up %s to %s' % (dest_fn, new_dest_fn)
if not simulate:
shutil.copyfile(dest_fn, new_dest_fn)
return True
elif response.startswith('all '):
rest = response[4:].strip()
if not rest or rest[0] not in ('y', 'n', 'b'):
print query_usage
continue
response = all_answer = rest[0]
if response[0] == 'y':
return True
elif response[0] == 'n':
return False
elif response == 'dc':
print '\n'.join(c_diff)
elif response[0] == 'd':
print '\n'.join(u_diff)
else:
print query_usage
query_usage = """\
Responses:
Y(es): Overwrite the file with the new content.
N(o): Do not overwrite the file.
D(iff): Show a unified diff of the proposed changes (dc=context diff)
B(ackup): Save the current file contents to a .bak file
(and overwrite)
Type "all Y/N/B" to use Y/N/B for answer to all future questions
"""
def svn_makedirs(dir, svn_add, verbosity, pad):
parent = os.path.dirname(os.path.abspath(dir))
if not os.path.exists(parent):
svn_makedirs(parent, svn_add, verbosity, pad)
os.mkdir(dir)
if not svn_add:
return
if not os.path.exists(os.path.join(parent, '.svn')):
if verbosity > 1:
print '%s.svn/ does not exist; cannot add directory' % pad
return
cmd = ['svn', 'add', dir]
if verbosity > 1:
print '%sRunning: %s' % (pad, ' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
if verbosity > 1 and stdout:
print 'Script output:'
print stdout
def substitute_filename(fn, vars):
for var, value in vars.items():
fn = fn.replace('+%s+' % var, str(value))
return fn
def substitute_content(content, vars, filename='<string>',
use_cheetah=False, template_renderer=None):
global Cheetah
v = standard_vars.copy()
v.update(vars)
vars = v
if template_renderer is not None:
return template_renderer(content, vars, filename=filename)
if not use_cheetah:
tmpl = LaxTemplate(content)
try:
return tmpl.substitute(TypeMapper(v))
except Exception, e:
_add_except(e, ' in file %s' % filename)
raise
if Cheetah is None:
import Cheetah.Template
tmpl = Cheetah.Template.Template(source=content,
searchList=[vars])
return careful_sub(tmpl, vars, filename)
def careful_sub(cheetah_template, vars, filename):
"""
Substitutes the template with the variables, using the
.body() method if it exists. It assumes that the variables
were also passed in via the searchList.
"""
if not hasattr(cheetah_template, 'body'):
return sub_catcher(filename, vars, str, cheetah_template)
body = cheetah_template.body
args, varargs, varkw, defaults = inspect.getargspec(body)
call_vars = {}
for arg in args:
if arg in vars:
call_vars[arg] = vars[arg]
return sub_catcher(filename, vars, body, **call_vars)
def sub_catcher(filename, vars, func, *args, **kw):
"""
Run a substitution, returning the value. If an error occurs, show
the filename. If the error is a NameError, show the variables.
"""
try:
return func(*args, **kw)
except SkipTemplate, e:
print 'Skipping file %s' % filename
if str(e):
print str(e)
raise
except Exception, e:
print 'Error in file %s:' % filename
if isinstance(e, NameError):
items = vars.items()
items.sort()
for name, value in items:
print '%s = %r' % (name, value)
raise
def html_quote(s):
if s is None:
return ''
return cgi.escape(str(s), 1)
def url_quote(s):
if s is None:
return ''
return urllib.quote(str(s))
def test(conf, true_cond, false_cond=None):
if conf:
return true_cond
else:
return false_cond
def skip_template(condition=True, *args):
"""
Raise SkipTemplate, which causes copydir to skip the template
being processed. If you pass in a condition, only raise if that
condition is true (allows you to use this with string.Template)
If you pass any additional arguments, they will be used to
instantiate SkipTemplate (generally use like
``skip_template(license=='GPL', 'Skipping file; not using GPL')``)
"""
if condition:
raise SkipTemplate(*args)
def _add_except(exc, info):
if not hasattr(exc, 'args') or exc.args is None:
return
args = list(exc.args)
if args:
args[0] += ' ' + info
else:
args = [info]
exc.args = tuple(args)
return
standard_vars = {
'nothing': None,
'html_quote': html_quote,
'url_quote': url_quote,
'empty': '""',
'test': test,
'repr': repr,
'str': str,
'bool': bool,
'SkipTemplate': SkipTemplate,
'skip_template': skip_template,
}
class TypeMapper(dict):
def __getitem__(self, item):
options = item.split('|')
for op in options[:-1]:
try:
value = eval_with_catch(op, dict(self.items()))
break
except (NameError, KeyError):
pass
else:
value = eval(options[-1], dict(self.items()))
if value is None:
return ''
else:
return str(value)
def eval_with_catch(expr, vars):
try:
return eval(expr, vars)
except Exception, e:
_add_except(e, 'in expression %r' % expr)
raise
class LaxTemplate(string.Template):
# This change of pattern allows for anything in braces, but
# only identifiers outside of braces:
pattern = r"""
\$(?:
(?P<escaped>\$) | # Escape sequence of two delimiters
(?P<named>[_a-z][_a-z0-9]*) | # delimiter and a Python identifier
{(?P<braced>.*?)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
|
|
### IMPORTS ###
import OAuth2Util
import praw
import re
import sys
import threading
import time
import traceback
from wordnik import *
from datetime import datetime
### CLASS ###
class Define_It:
def __init__(self, reddit, footer='', sidebar='', username='', subreddit='', api_file='define_it.conf'):
self._r = reddit
self._o = OAuth2Util.OAuth2Util(self._r)
self._done = DoneList()
self._avoid = AvoidList()
self.message_footer = footer
self.sidebar = sidebar
self.match_pattern = re.compile(r'(?:\n|^)define(?:: ?| )(-ignore|(?:["*\']+)?([^\n,.!?#&_:;"*\\(){}<>[\]]+))(, ?((pro)?noun|(ad)?verb(-(in)?transitive)?|adjective|(abbrevia|preposi|conjunc|interjec)tion))?')
self._api_file = api_file
self._load_dictionary()
self._create_api()
self.username = username
self.subreddit = subreddit
# ### WORDNIK ### #
def _load_dictionary(self):
try:
with open(self._api_file, 'r') as f:
lines = [x.strip() for x in f.readlines()]
self._api_url,self._api_key = lines
except OSError:
print('Could not find config file.')
def _create_api(self):
self._client = swagger.ApiClient(self._api_key,self._api_url)
self._wordApi = WordApi.WordApi(self._client)
# ### REDDIT ### #
def search(self, body):
found = self.match_pattern.search(body)
return found is not None
def _strip_unwanted(self, word):
if isinstance(word, str):
try:
if (word[0] == word[-1] and
word[0] in '"*\''):
word = word[1:-1]
if ' - ' in word:
word = word.split('-')[0].strip()
return word
except IndexError as e:
Error(e, tb=traceback)
def _make(self, body):
pat = re.compile(r' ?(because|but|please).*',re.IGNORECASE)
found = self.match_pattern.search(body)
if found is None:
return
if found.group(3) != None:
return re.sub('["*]+','',body[found.start():found.end()].lstrip()[7:].strip()).split(',')
body = re.sub('["*]+','',body[found.start():found.end()].lstrip()[7:].strip())
if len(body.split(' ')) > 1:
return pat.sub('', self._strip_unwanted(body))
return self._strip_unwanted(body)
def ignore(self, comment):
self._avoid.add(comment.author.name)
comment.reply('This message confirms that you have been added to the ignore list.' + self.message_footer)
def delete(self, comment):
if comment.is_root:
return
parent_id = comment.parent_id
parent = self._r.get_info(thing_id=parent_id)
if parent.author.name != self.username:
return
request_id = parent.parent_id
request = self._r.get_info(thing_id=request_id)
if comment.author.name != request.author.name:
return
parent.delete()
print('%s requested comment get deleted'%comment.author.name)
def _begin(self, comment):
id = comment.id
already_done = self._done.get()
avoid = self._avoid.get()
if id not in already_done:
self._done.add('%s\n'%id)
author = comment.author.name
body = re.sub(r'/u/%s'%self.username,'define',comment.body,flags=re.IGNORECASE)
formatted = self._make(body)
if formatted != None and author not in avoid:
if isinstance(formatted, list):
word = formatted[0]
if word == '-ignore':
self.ignore(comment)
return
elif word == '-delete':
self.delete(comment)
return
part = formatted[1]
else:
if formatted == '-ignore' and author not in avoid:
self.ignore(comment)
return
elif formatted == '-delete':
self.delete(comment)
return
word = formatted
part = ''
self._create_api()
partText = part if part == '' else (' as a ' + part)
definitions = Definition(self._wordApi, word=word, part=part)
formatted = definitions.format()
if len(definitions.definitions) > 0:
print('%s requested "%s"%s'%(author,word,partText))
comment.reply(formatted + self.message_footer)
try:
if self.sidebar != '':
self._r.get_subreddit(self.subreddit).update_settings(description=self.sidebar.format(requester=author,definitions=formatted))
except Exception as e:
Error(e, tb=traceback)
def run(self):
self._o.refresh()
while True:
try:
for x in praw.helpers.comment_stream(self._r, 'all'):
self._o.refresh()
if not self.search(x.body): continue
t2 = threading.Thread(target=self._begin(x))
t2.start()
time.sleep(5)
try:
zzz = next(self._r.get_unread())
messages = True
except StopIteration:
messages = False
if messages:
for y in self._r.get_unread():
try:
y.mark_as_read()
if y.subject == 'comment reply' or (y.subject == 'username mention' and y.was_comment):
t3 = threading.Thread(target=self._begin(y))
t3.start()
except AttributeError:
pass
except praw.errors.Forbidden:
pass
except KeyboardInterrupt:
print('Exiting...')
sys.exit(-1)
class Definition:
def __init__(self, api, **kwargs):
self._api = api
if 'word' in kwargs and 'part' in kwargs:
self.word = kwargs['word']
self.definitions = self.define(kwargs['word'],kwargs['part'])
def define(self, word, part):
f = self._api.getDefinitions
definitions = []
for i in range(3):
try:
d = f(word, partOfSpeech=part, sourceDictionaries='all')
if d is None:
d = f(word.lower(), partOfSpeech=part, sourceDictionaries='all')
if d is not None:
definitions.append((d[i].word,d[i].partOfSpeech,d[i].text))
continue
d = f(word.upper(), partOfSpeech=part, sourceDictionaries='all')
if d is not None:
definitions.append((d[i].word,d[i].partOfSpeech,d[i].text))
continue
d = f(word.capitalize(), partOfSpeech=part, sourceDictionaries='all')
if d is not None:
definitions.append((d[i].word,d[i].partOfSpeech,d[i].text))
continue
break
definitions.append((d[i].word,d[i].partOfSpeech,d[i].text))
except IndexError as e:
Error(e,tb=traceback)
break
except Exception:
break
return definitions
def format(self):
s = ''
if len(self.definitions) >= 1:
for definition in self.definitions:
word = definition[0]
if definition[1] != 'abbreviation':
word = ' '.join([x.capitalize() for x in word.split(' ')])
s += '%s (%s): %s\n\n' % (word, definition[1], definition[2])
return s
class DoneList:
def __init__(self):
self.list = self.get()
def add(self,content,a=True):
if a:
self.read()
with open('done.txt', 'a') as f:
f.write(content)
def read(self):
with open('done.txt') as f:
for i,l in enumerate(f):
pass
if (i+1) >= 200000:
t = self._tail(open('done.txt'), 50000)
open('done.txt', 'w').close()
for x in t[0]:
self.add('%s\n'%x,False)
def _tail(self, f, n, offset=None):
avg_line_length = 7
to_read = n + (offset or 0)
while 1:
try:
f.seek(-(avg_line_length * to_read), 2)
except IOError:
f.seek(0)
pos = f.tell()
lines = f.read().splitlines()
if len(lines) >= to_read or pos == 0:
f.close()
return lines[-to_read:offset and -offset or None], \
len(lines) > to_read or pos > 0
avg_line_length *= 1.3
def get(self):
with open('done.txt') as f:
return [x.strip() for x in f.readlines()]
class AvoidList:
def __init__(self):
self.list = self.get()
def add(self, name):
with open('avoid.txt', 'a') as f:
f.write('%s\n'%name)
def get(self):
with open('avoid.txt') as f:
return [x.strip() for x in f.readlines()]
class Error:
def __init__(self, error, message=None, tb=None):
if message is not None:
print(str(type(error)) + ' ' + message)
else:
print(str(type(error)))
if tb is not None:
d = datetime.now()
name = 'errors\\error{0}.txt'.format(d.strftime('%Y%m%d%H%M%S'))
f = open(name, 'w')
tb.print_exc(file=f)
f.close()
|
|
'''
Created on May 14, 2020
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from intelligence.intelligence import Intelligence
# Variable name for tracking people
AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME = "amplitude_user"
# HTTP timeout
AMPLITUDE_HTTP_TIMEOUT_S = 2
class LocationAmplitudeMicroservice(Intelligence):
def __init__(self, botengine, parent):
"""
Instantiate this object
:param parent: Parent object, either a location or a device object.
"""
Intelligence.__init__(self, botengine, parent)
self.analytics_track(botengine, {'event_name': 'reset', 'properties': None})
def analytics_track(self, botengine, content):
"""
Track an event.
This will buffer your events and flush them to the server altogether at the end of all bot executions,
and before variables get saved.
:param botengine: BotEngine environment
:param event_name: (string) A name describing the event
:param properties: (dict) Additional data to record; keys should be strings and values should be strings, numbers, or booleans
"""
if botengine.is_test_location():
return
event_name = content['event_name']
properties = content['properties']
botengine.get_logger().info("Analytics: Tracking {}".format(event_name))
if properties is None:
properties = {}
properties["locationId"] = botengine.get_location_id()
properties["organizationId"] = botengine.get_organization_id()
self._flush(botengine,
[
{
"user_id": self._get_user_id(botengine),
"device_id": self._get_device_id(botengine),
"time": botengine.get_timestamp(),
"event_type": event_name,
"event_properties": properties,
"user_properties": {
"locationId": botengine.get_location_id(),
"organizationId": botengine.get_organization_id()
}
}
])
def analytics_people_set(self, botengine, content):
"""
Set some key/value attributes for this user
:param botengine: BotEngine environment
:param properties_dict: Dictionary of key/value pairs to track
"""
if botengine.is_test_location():
return
properties_dict = content['properties_dict']
botengine.get_logger().info("analytics.py: Setting user info - {}".format(properties_dict))
focused_properties = botengine.load_variable(AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME)
if focused_properties is None:
focused_properties = properties_dict
focused_properties.update(properties_dict)
focused_properties["locationId"] = botengine.get_location_id()
focused_properties["organizationId"] = botengine.get_organization_id()
botengine.save_variable(AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME, focused_properties, required_for_each_execution=False)
self._flush(botengine,
[
{
"user_id": self._get_user_id(botengine),
"device_id": self._get_device_id(botengine),
"time": botengine.get_timestamp(),
"user_properties": focused_properties
}
])
def analytics_people_increment(self, botengine, content):
"""
Adds numerical values to properties of a people record. Nonexistent properties on the record default to zero. Negative values in properties will decrement the given property.
:param botengine: BotEngine environment
:param properties_dict: Dictionary of key/value pairs. The value is numeric, either positive or negative. Default record is 0. The value will increment or decrement the property by that amount.
"""
if botengine.is_test_location():
return
properties_dict = content['properties_dict']
botengine.get_logger().info("Analytics: Incrementing user info - {}".format(properties_dict))
focused_properties = botengine.load_variable(AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME)
if focused_properties is None:
focused_properties = properties_dict
for p in properties_dict:
if p not in focused_properties:
focused_properties[p] = 0
focused_properties[p] += properties_dict[p]
focused_properties["locationId"] = botengine.get_location_id()
focused_properties["organizationId"] = botengine.get_organization_id()
botengine.save_variable(AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME, focused_properties, required_for_each_execution=False)
self._flush(botengine,
[
{
"user_id": self._get_user_id(botengine),
"device_id": self._get_device_id(botengine),
"time": botengine.get_timestamp(),
"user_properties": focused_properties
}
])
def analytics_people_unset(self, botengine, content):
"""
Delete a property from a user
:param botengine: BotEngine
:param properties_dict: Key/Value dictionary pairs to remove from a people record.
"""
if botengine.is_test_location():
return
properties_list = content['properties_list']
botengine.get_logger().info("Analytics: Removing user info - {}".format(properties_list))
focused_properties = botengine.load_variable(AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME)
if focused_properties is None:
# Nothing to unset
return
for p in properties_list:
if p in focused_properties:
del focused_properties[p]
focused_properties["locationId"] = botengine.get_location_id()
focused_properties["organizationId"] = botengine.get_organization_id()
botengine.save_variable(AMPLITUDE_USER_PROPERTIES_VARIABLE_NAME, focused_properties, required_for_each_execution=False)
self._flush(botengine,
[
{
"user_id": self._get_user_id(botengine),
"device_id": self._get_device_id(botengine),
"time": botengine.get_timestamp(),
"user_properties": focused_properties
}
])
def _flush(self, botengine, data):
"""
Required. Implement the mechanisms to flush your analytics.
:param botengine: BotEngine
"""
if botengine.is_test_location():
botengine.get_logger().info("Analytics: This test location will not record analytics.")
return
import domain
import json
import requests
import bundle
token = None
for cloud_address in domain.AMPLITUDE_TOKENS:
if cloud_address in bundle.CLOUD_ADDRESS:
token = domain.AMPLITUDE_TOKENS[cloud_address]
if token is None:
# Nothing to do
botengine.get_logger().info("analytics_amplitude.flush(): No analytics token for {}".format(bundle.CLOUD_ADDRESS))
return
if token == "":
# Nothing to do
botengine.get_logger().info("analytics_amplitude.flush(): No analytics token for {}".format(bundle.CLOUD_ADDRESS))
return
http_headers = {"Content-Type": "application/json"}
body = {
"api_key": token,
"events": data
}
url = "https://api.amplitude.com/2/httpapi"
try:
requests.post(url, headers=http_headers, data=json.dumps(body), timeout=AMPLITUDE_HTTP_TIMEOUT_S)
botengine.get_logger().info("location_amplitude_microservice: Flushed()")
except self.requests.HTTPError:
self.get_logger().info("Generic HTTP error calling POST " + url)
except self.requests.ConnectionError:
self.get_logger().info("Connection HTTP error calling POST " + url)
except self.requests.Timeout:
self.get_logger().info(str(AMPLITUDE_HTTP_TIMEOUT_S) + " second HTTP Timeout calling POST " + url)
except self.requests.TooManyRedirects:
self.get_logger().info("Too many redirects HTTP error calling POST " + url)
except Exception as e:
return
def _get_user_id(self, botengine):
"""
Generate an Amplitude User ID
To us, this user ID will always have a "bot_" prefix, followed by the bot instance ID.
:return:
"""
return "bot_{}".format(botengine.bot_instance_id)
def _get_device_id(self, botengine):
"""
Get the Device ID
:param botengine:
:return:
"""
return botengine.get_bundle_id()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"vmssExtensionName": _SERIALIZER.url("vmss_extension_name", vmss_extension_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"vmssExtensionName": _SERIALIZER.url("vmss_extension_name", vmss_extension_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"vmssExtensionName": _SERIALIZER.url("vmss_extension_name", vmss_extension_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
subscription_id: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"vmssExtensionName": _SERIALIZER.url("vmss_extension_name", vmss_extension_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class VirtualMachineScaleSetExtensionsOperations(object):
"""VirtualMachineScaleSetExtensionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
extension_parameters: "_models.VirtualMachineScaleSetExtension",
**kwargs: Any
) -> "_models.VirtualMachineScaleSetExtension":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetExtension"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(extension_parameters, 'VirtualMachineScaleSetExtension')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineScaleSetExtension', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachineScaleSetExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
extension_parameters: "_models.VirtualMachineScaleSetExtension",
**kwargs: Any
) -> LROPoller["_models.VirtualMachineScaleSetExtension"]:
"""The operation to create or update an extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set where the extension should be create or
updated.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension.
:type vmss_extension_name: str
:param extension_parameters: Parameters supplied to the Create VM scale set Extension
operation.
:type extension_parameters:
~azure.mgmt.compute.v2021_03_01.models.VirtualMachineScaleSetExtension
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineScaleSetExtension or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineScaleSetExtension]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetExtension"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
extension_parameters=extension_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineScaleSetExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
extension_parameters: "_models.VirtualMachineScaleSetExtensionUpdate",
**kwargs: Any
) -> "_models.VirtualMachineScaleSetExtension":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetExtension"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(extension_parameters, 'VirtualMachineScaleSetExtensionUpdate')
request = build_update_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineScaleSetExtension', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachineScaleSetExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
extension_parameters: "_models.VirtualMachineScaleSetExtensionUpdate",
**kwargs: Any
) -> LROPoller["_models.VirtualMachineScaleSetExtension"]:
"""The operation to update an extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set where the extension should be updated.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension.
:type vmss_extension_name: str
:param extension_parameters: Parameters supplied to the Update VM scale set Extension
operation.
:type extension_parameters:
~azure.mgmt.compute.v2021_03_01.models.VirtualMachineScaleSetExtensionUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineScaleSetExtension or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineScaleSetExtension]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetExtension"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
extension_parameters=extension_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineScaleSetExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""The operation to delete the extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set where the extension should be deleted.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension.
:type vmss_extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.VirtualMachineScaleSetExtension":
"""The operation to get the extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set containing the extension.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension.
:type vmss_extension_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineScaleSetExtension, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.VirtualMachineScaleSetExtension
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetExtension"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineScaleSetExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> Iterable["_models.VirtualMachineScaleSetExtensionListResult"]:
"""Gets a list of all extensions in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set containing the extension.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineScaleSetExtensionListResult or the
result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineScaleSetExtensionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetExtensionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineScaleSetExtensionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions'} # type: ignore
|
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
from blinkpy.common.host_mock import MockHost
from blinkpy.common.net.git_cl import TryJobStatus
from blinkpy.common.net.git_cl_mock import MockGitCL
from blinkpy.common.net.results_fetcher import Build
from blinkpy.common.net.web_test_results import WebTestResults
from blinkpy.common.system.log_testing import LoggingTestCase
from blinkpy.w3c.wpt_manifest import BASE_MANIFEST_NAME
from blinkpy.web_tests.builder_list import BuilderList
from blinkpy.web_tests.port.factory_mock import MockPortFactory
from blinkpy.web_tests.port.android import (
PRODUCTS_TO_EXPECTATION_FILE_PATHS, ANDROID_DISABLED_TESTS,
ANDROID_WEBLAYER, ANDROID_WEBVIEW, CHROME_ANDROID,
PRODUCTS_TO_STEPNAMES)
from blinkpy.w3c.android_wpt_expectations_updater import (
AndroidWPTExpectationsUpdater)
WEBLAYER_WPT_STEP = PRODUCTS_TO_STEPNAMES[ANDROID_WEBLAYER]
WEBVIEW_WPT_STEP = PRODUCTS_TO_STEPNAMES[ANDROID_WEBVIEW]
CHROME_ANDROID_WPT_STEP = PRODUCTS_TO_STEPNAMES[CHROME_ANDROID]
class AndroidWPTExpectationsUpdaterTest(LoggingTestCase):
_raw_baseline_expectations = (
'# results: [ Failure Crash Timeout]\n'
'\n'
'crbug.com/1111111 external/wpt/new1.html [ Failure Timeout ]\n')
# baseline for external/wpt/new2.html
_raw_expected_text = (
'This is a testharness.js-based test.\n'
'FAIL a failed subtest\n'
'Harness: the test ran to completion.\n')
_raw_android_never_fix_tests = (
'# tags: [ android-weblayer android-webview chrome-android ]\n'
'# results: [ Skip ]\n'
'\n'
'# Add untriaged disabled tests in this block\n'
'crbug.com/1050754 [ android-webview ] external/wpt/disabled.html [ Skip ]\n')
def _setup_host(self, raw_android_expectations):
"""Returns a mock host with fake values set up for testing."""
self.set_logging_level(logging.DEBUG)
host = MockHost()
host.port_factory = MockPortFactory(host)
host.executive._output = ''
# Set up a fake list of try builders.
host.builders = BuilderList({
'MOCK Android Weblayer - Pie': {
'port_name': 'test-android-pie',
'specifiers': ['Precise', 'Release',
'anDroid', 'android_Weblayer'],
'is_try_builder': True,
},
})
host.filesystem.write_text_file(
host.port_factory.get().web_tests_dir() + '/external/' +
BASE_MANIFEST_NAME,
json.dumps({
'items': {
'testharness': {
'foo1.html': ['abcdef123', [None, {}]],
'foo2.html': ['abcdef123', [None, {}]],
'bar.html': ['abcdef123', [None, {}]],
},
},
}))
# Write dummy expectations
path = host.port_factory.get().web_tests_dir() + '/TestExpectations'
host.filesystem.write_text_file(
path, self._raw_baseline_expectations)
path = host.port_factory.get().web_tests_dir() + '/external/wpt/new2-expected.txt'
host.filesystem.write_text_file(
path, self._raw_expected_text)
for path in PRODUCTS_TO_EXPECTATION_FILE_PATHS.values():
host.filesystem.write_text_file(
path, raw_android_expectations)
host.filesystem.write_text_file(
ANDROID_DISABLED_TESTS, self._raw_android_never_fix_tests)
return host
def testUpdateTestExpectationsForWeblayer(self):
raw_android_expectations = (
'# results: [ Failure Crash Timeout]\n'
'\n'
'crbug.com/1000754 external/wpt/foo.html [ Failure ]\n'
'\n'
'# Add untriaged failures in this block\n'
'crbug.com/1050754 external/wpt/bar.html [ Failure ]\n'
'\n'
'# This comment will not be deleted\n')
host = self._setup_host(raw_android_expectations)
# Add results for Weblayer
# new1.html is covered by default expectations
# new2.html is covered by baseline
# new3.html is a new test. We should create WebLayer expectation for it.
result = """
[{
"testId": "ninja://weblayer/shell/android:weblayer_shell_wpt/external/wpt/new1.html",
"variant": {
"def": {
"builder": "android-weblayer-pie-x86-wpt-fyi-rel",
"os": "Ubuntu-16.04",
"test_suite": "weblayer_shell_wpt"
}
},
"status": "FAIL"
},
{
"testId": "ninja://weblayer/shell/android:weblayer_shell_wpt/external/wpt/new2.html",
"variant": {
"def": {
"builder": "android-weblayer-pie-x86-wpt-fyi-rel",
"os": "Ubuntu-16.04",
"test_suite": "weblayer_shell_wpt"
}
},
"status": "FAIL"
},
{
"testId": "ninja://weblayer/shell/android:weblayer_shell_wpt/external/wpt/new3.html",
"variant": {
"def": {
"builder": "android-weblayer-pie-x86-wpt-fyi-rel",
"os": "Ubuntu-16.04",
"test_suite": "weblayer_shell_wpt"
}
},
"status": "CRASH"
},
{
"testId": "ninja://weblayer/shell/android:weblayer_shell_wpt/external/wpt/new3.html",
"variant": {
"def": {
"builder": "android-weblayer-pie-x86-wpt-fyi-rel",
"os": "Ubuntu-16.04",
"test_suite": "weblayer_shell_wpt"
}
},
"status": "FAIL"
}]"""
host.results_fetcher.set_results_to_resultdb(
Build('MOCK Android Weblayer - Pie', 123, '123'),
json.loads(result) * 3)
updater = AndroidWPTExpectationsUpdater(
host, ['-vvv', '--android-product', ANDROID_WEBLAYER,
'--include-unexpected-pass'])
updater.git_cl = MockGitCL(host, {
Build('MOCK Android Weblayer - Pie', 123, '123'):
TryJobStatus('COMPLETED', 'FAILURE')})
# Run command
updater.run()
# Get new expectations
content = host.filesystem.read_text_file(
PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBLAYER])
_new_expectations = (
'# results: [ Failure Crash Timeout]\n'
'\n'
'crbug.com/1000754 external/wpt/foo.html [ Failure ]\n'
'\n'
'# Add untriaged failures in this block\n'
'crbug.com/1050754 external/wpt/bar.html [ Failure ]\n'
'crbug.com/1050754 external/wpt/new3.html [ Crash Failure ]\n'
'\n'
'# This comment will not be deleted\n')
self.assertEqual(content, _new_expectations)
# Check that ANDROID_DISABLED_TESTS expectation files were not changed
self.assertEqual(
self._raw_android_never_fix_tests,
host.filesystem.read_text_file(ANDROID_DISABLED_TESTS))
def testCleanupAndUpdateTestExpectationsForAll(self):
# Full integration test for expectations cleanup and update
# using builder results.
raw_android_expectations = (
'# results: [ Failure Crash Timeout]\n'
'\n'
'crbug.com/1000754 external/wpt/foo1.html [ Failure ]\n'
'crbug.com/1000754 external/wpt/foo2.html [ Failure ]\n'
'crbug.com/1000754 external/wpt/bar.html [ Failure ]\n'
'\n'
'# Add untriaged failures in this block\n'
'\n'
'# This comment will not be deleted\n')
host = self._setup_host(raw_android_expectations)
# Add results for Weblayer
result = """
{
"testId": "ninja://weblayer/shell/android:weblayer_shell_wpt/external/wpt/bar.html",
"variant": {
"def": {
"builder": "android-weblayer-pie-x86-wpt-fyi-rel",
"os": "Ubuntu-16.04",
"test_suite": "weblayer_shell_wpt"
}
},
"status": "CRASH"
}"""
host.results_fetcher.set_results_to_resultdb(
Build('MOCK Android Weblayer - Pie', 123, '123'),
[json.loads(result)] * 3)
updater = AndroidWPTExpectationsUpdater(
host, ['-vvv',
'--clean-up-test-expectations',
'--clean-up-affected-tests-only',
'--include-unexpected-pass',
'--android-product', ANDROID_WEBLAYER])
def _git_command_return_val(cmd):
if '--diff-filter=D' in cmd:
return 'external/wpt/foo2.html'
if '--diff-filter=R' in cmd:
return 'C\texternal/wpt/foo1.html\texternal/wpt/foo3.html'
if '--diff-filter=M' in cmd:
return 'external/wpt/bar.html'
return ''
updater.git_cl = MockGitCL(host, {
Build('MOCK Android Weblayer - Pie', 123, '123'):
TryJobStatus('COMPLETED', 'FAILURE')})
updater.git.run = _git_command_return_val
updater._relative_to_web_test_dir = lambda test_path: test_path
# Run command
updater.run()
# Check expectations for weblayer
content = host.filesystem.read_text_file(
PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBLAYER])
_new_expectations = (
'# results: [ Failure Crash Timeout]\n'
'\n'
'crbug.com/1000754 external/wpt/foo3.html [ Failure ]\n'
'\n'
'# Add untriaged failures in this block\n'
'crbug.com/1050754 external/wpt/bar.html [ Crash ]\n'
'\n'
'# This comment will not be deleted\n')
self.assertEqual(content, _new_expectations)
|
|
"""
SMARTctl - command ``/sbin/smartctl -a {device}``
=================================================
"""
from insights.core import Parser
from insights.core.plugins import parser
from insights.parsers import ParseException
import re
from insights.specs import smartctl
@parser(smartctl)
class SMARTctl(Parser):
"""
Parser for output of ``smartctl -a`` for each drive in system.
This stores the information from the output of `smartctl` in the
following properties:
* ``device`` - the name of the device after /dev/ - e.g. sda
* ``information`` - the -i info (vendor, product, etc)
* ``health`` - overall health assessment (-H)
* ``values`` - the SMART values (-c) - SMART config on drive firmware
* ``attributes`` - the SMART attributes (-A) - run time data
For legacy access, these are also available as values in the ``info``
dictionary property, keyed to their name (i.e. info['device'])
Each object contains a different device; the shared information for this
parser in Insights will be one or more devices, so see the example below
for how to iterate through the available SMARTctl information for each
device.
Sample (abbreviated) output::
smartctl 6.2 2013-07-26 r3841 [x86_64-linux-3.10.0-267.el7.x86_64] (local build)
Copyright (C) 2002-13, Bruce Allen, Christian Franke, www.smartmontools.org
=== START OF INFORMATION SECTION ===
Device Model: ST500LM021-1KJ152
Serial Number: W620AT02
LU WWN Device Id: 5 000c50 07817bb36
...
=== START OF READ SMART DATA SECTION ===
SMART overall-health self-assessment test result: PASSED
General SMART Values:
Offline data collection status: (0x00) Offline data collection activity
was never started.
Auto Offline Data Collection: Disabled.
...
SMART Attributes Data Structure revision number: 10
Vendor Specific SMART Attributes with Thresholds:
ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE
1 Raw_Read_Error_Rate 0x000f 118 099 034 Pre-fail Always - 179599704
3 Spin_Up_Time 0x0003 098 098 000 Pre-fail Always - 0
4 Start_Stop_Count 0x0032 100 100 020 Old_age Always - 546
5 Reallocated_Sector_Ct 0x0033 100 100 036 Pre-fail Always - 0
...
Examples:
>>> for drive in shared[SMARTctl]:
... print "Device:", drive.device
... print "Model:", drive.information['Device Model']
... print "Health check:", drive.health
... print "Last self-test status:", drive.values['Self-test execution status']
... print "Raw read error rate:", drive.attributes['Raw_Read_Error_Rate']['RAW_VALUE']
...
Device: /dev/sda
Model: ST500LM021-1KJ152
Health check: PASSED
Last self-test status: 0
Raw read error rate: 179599704
"""
_INFO_LINE_STR = r'(?P<key>\w+(?:\s\w+)*):\s+' + \
r'(?P<value>\S.*?)\s*$'
_INFO_LINE_RE = re.compile(_INFO_LINE_STR)
_VALUE_LINE_STR = r'(?P<key>\w[A-Za-z _.-]+):\s+' + \
r'\(\s*(?P<value>\S.*?)\)'
_VALUE_LINE_RE = re.compile(_VALUE_LINE_STR)
_ATTR_LINE_STR = r'^\s*(?P<id>\d+)\s(?P<name>\w+)\s+' + \
r'(?P<flag>0x[0-9a-fA-F]{4})\s+(?P<value>\d{3})\s+' + \
r'(?P<worst>\d{3})\s+(?P<threshold>\d{3})\s+' + \
r'(?P<type>[A-Za-z_-]+)\s+(?P<updated>[A-Za-z_-]+)\s+' + \
r'(?P<when_failed>\S+)\s+(?P<raw_value>\S.*)$'
_ATTR_LINE_RE = re.compile(_ATTR_LINE_STR)
def __init__(self, context):
filename_re = re.compile(r'smartctl_-a_\.dev\.(?P<device>\w+)$')
match = filename_re.search(context.path)
if match:
self.device = '/dev/' + match.group('device')
else:
raise ParseException('Cannot parse device name from path {p}'.format(p=context.path))
super(SMARTctl, self).__init__(context)
def parse_content(self, content):
self.information = {}
self.health = 'not parsed'
self.values = {}
self.attributes = {}
# hack for persistent line storage in parse_content context -
# otherwise it gets treated as a local variable within the sub-
# functions
self.full_line = ''
# Parsing using a state machine, sorry. We use a state variable, and
# functions to parse lines in each of the different states. The
# function returns the state as a result of reading that line, and we
# look up the parse function out of an array based on the parse state.
PARSE_FORMATTED_INFO = 0
PARSE_FREEFORM_INFO = 1
PARSE_ATTRIBUTE_INFO = 2
PARSE_COMPLETE = 3
parse_state = PARSE_FORMATTED_INFO
# Information section:
def parse_information(line):
# Exit parsing information section if we go into the next section
if line.startswith('=== START OF READ SMART DATA SECTION ==='):
return PARSE_FREEFORM_INFO
match = self._INFO_LINE_RE.search(line)
if match:
self.information[match.group('key')] = match.group('value')
else:
# Translate some of the less structured information
if line == 'Device does not support SMART':
self.information['SMART support is'] = 'Not supported'
elif line == 'Device supports SMART and is Enabled':
self.information['SMART support is'] = 'Enabled'
elif line == 'Error Counter logging not supported':
self.information['Error Counter logging'] = \
'Not supported'
elif line == 'Device does not support Self Test logging':
self.information['Self Test logging'] = 'Not supported'
elif line == 'Temperature Warning Disabled or Not Supported':
self.information['Temperature Warning'] = \
'Disabled or Not Supported'
return PARSE_FORMATTED_INFO
# Values section:
def parse_values(line):
if line.startswith('Vendor Specific SMART Attributes with Thres'):
return PARSE_ATTRIBUTE_INFO
if line.startswith('SMART overall-health self-assessment test r'):
self.health = ''.join((line.split(': '))[1:])
return PARSE_FREEFORM_INFO
# Values section begins with this - ignore:
if line.startswith('General SMART Values:'):
return PARSE_FREEFORM_INFO
# Lines starting with a space are continuations of the commentary
# on the previous setting - ignore
if len(line) == 0 or line[0] == ' ' or line[0] == "\t":
return PARSE_FREEFORM_INFO
# Otherwise, join this line to the full line
if self.full_line:
self.full_line += ' '
self.full_line += line.strip()
match = self._VALUE_LINE_RE.search(self.full_line)
if match:
# Handle the recommended polling time lines, which are joined
# with the previous line and values are in minutes.
(key, value) = match.group('key', 'value')
self.values[key] = value
self.full_line = ''
elif self.full_line.startswith('SMART Attributes Data Structure revision number: '):
(key, value) = self.full_line.split(': ')
self.values[key] = value
self.full_line = ''
return PARSE_FREEFORM_INFO
# Attributes sections
def parse_attributes(line):
if line.startswith('SMART Error Log Version:'):
return PARSE_COMPLETE
if len(line) == 0:
return PARSE_ATTRIBUTE_INFO
match = self._ATTR_LINE_RE.match(line)
if match:
name = match.group('name')
self.attributes[name] = match.groupdict()
return PARSE_ATTRIBUTE_INFO
parse_for_state = [
parse_information,
parse_values,
parse_attributes,
]
for line in content:
parse_state = parse_for_state[parse_state](line)
if parse_state == PARSE_COMPLETE:
break
# Delete temporary full line storage
del self.full_line
|
|
"""Test service helpers."""
from collections import OrderedDict
from copy import deepcopy
import unittest
from unittest.mock import AsyncMock, Mock, patch
import pytest
import voluptuous as vol
# To prevent circular import when running just this file
from homeassistant import core as ha, exceptions
from homeassistant.auth.permissions import PolicyPermissions
import homeassistant.components # noqa: F401, pylint: disable=unused-import
from homeassistant.const import (
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers import (
device_registry as dev_reg,
entity_registry as ent_reg,
service,
template,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.setup import async_setup_component
from tests.common import (
MockEntity,
get_test_home_assistant,
mock_device_registry,
mock_registry,
mock_service,
)
SUPPORT_A = 1
SUPPORT_B = 2
SUPPORT_C = 4
@pytest.fixture
def mock_handle_entity_call():
"""Mock service platform call."""
with patch(
"homeassistant.helpers.service._handle_entity_call",
return_value=None,
) as mock_call:
yield mock_call
@pytest.fixture
def mock_entities(hass):
"""Return mock entities in an ordered dict."""
kitchen = MockEntity(
entity_id="light.kitchen",
available=True,
should_poll=False,
supported_features=SUPPORT_A,
)
living_room = MockEntity(
entity_id="light.living_room",
available=True,
should_poll=False,
supported_features=SUPPORT_B,
)
bedroom = MockEntity(
entity_id="light.bedroom",
available=True,
should_poll=False,
supported_features=(SUPPORT_A | SUPPORT_B),
)
bathroom = MockEntity(
entity_id="light.bathroom",
available=True,
should_poll=False,
supported_features=(SUPPORT_B | SUPPORT_C),
)
entities = OrderedDict()
entities[kitchen.entity_id] = kitchen
entities[living_room.entity_id] = living_room
entities[bedroom.entity_id] = bedroom
entities[bathroom.entity_id] = bathroom
return entities
@pytest.fixture
def area_mock(hass):
"""Mock including area info."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
hass.states.async_set("light.Kitchen", STATE_OFF)
device_in_area = dev_reg.DeviceEntry(area_id="test-area")
device_no_area = dev_reg.DeviceEntry(id="device-no-area-id")
device_diff_area = dev_reg.DeviceEntry(area_id="diff-area")
device_area_a = dev_reg.DeviceEntry(id="device-area-a-id", area_id="area-a")
mock_device_registry(
hass,
{
device_in_area.id: device_in_area,
device_no_area.id: device_no_area,
device_diff_area.id: device_diff_area,
device_area_a.id: device_area_a,
},
)
entity_in_own_area = ent_reg.RegistryEntry(
entity_id="light.in_own_area",
unique_id="in-own-area-id",
platform="test",
area_id="own-area",
)
entity_in_area = ent_reg.RegistryEntry(
entity_id="light.in_area",
unique_id="in-area-id",
platform="test",
device_id=device_in_area.id,
)
entity_in_other_area = ent_reg.RegistryEntry(
entity_id="light.in_other_area",
unique_id="in-area-a-id",
platform="test",
device_id=device_in_area.id,
area_id="other-area",
)
entity_assigned_to_area = ent_reg.RegistryEntry(
entity_id="light.assigned_to_area",
unique_id="assigned-area-id",
platform="test",
device_id=device_in_area.id,
area_id="test-area",
)
entity_no_area = ent_reg.RegistryEntry(
entity_id="light.no_area",
unique_id="no-area-id",
platform="test",
device_id=device_no_area.id,
)
entity_diff_area = ent_reg.RegistryEntry(
entity_id="light.diff_area",
unique_id="diff-area-id",
platform="test",
device_id=device_diff_area.id,
)
entity_in_area_a = ent_reg.RegistryEntry(
entity_id="light.in_area_a",
unique_id="in-area-a-id",
platform="test",
device_id=device_area_a.id,
area_id="area-a",
)
entity_in_area_b = ent_reg.RegistryEntry(
entity_id="light.in_area_b",
unique_id="in-area-b-id",
platform="test",
device_id=device_area_a.id,
area_id="area-b",
)
mock_registry(
hass,
{
entity_in_own_area.entity_id: entity_in_own_area,
entity_in_area.entity_id: entity_in_area,
entity_in_other_area.entity_id: entity_in_other_area,
entity_assigned_to_area.entity_id: entity_assigned_to_area,
entity_no_area.entity_id: entity_no_area,
entity_diff_area.entity_id: entity_diff_area,
entity_in_area_a.entity_id: entity_in_area_a,
entity_in_area_b.entity_id: entity_in_area_b,
},
)
class TestServiceHelpers(unittest.TestCase):
"""Test the Home Assistant service helpers."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.calls = mock_service(self.hass, "test_domain", "test_service")
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_service_call(self):
"""Test service call with templating."""
config = {
"service": "{{ 'test_domain.test_service' }}",
"entity_id": "hello.world",
"data": {
"hello": "{{ 'goodbye' }}",
"effect": {"value": "{{ 'complex' }}", "simple": "simple"},
},
"data_template": {"list": ["{{ 'list' }}", "2"]},
"target": {"area_id": "test-area-id", "entity_id": "will.be_overridden"},
}
service.call_from_config(self.hass, config)
self.hass.block_till_done()
assert dict(self.calls[0].data) == {
"hello": "goodbye",
"effect": {
"value": "complex",
"simple": "simple",
},
"list": ["list", "2"],
"entity_id": ["hello.world"],
"area_id": ["test-area-id"],
}
config = {
"service": "{{ 'test_domain.test_service' }}",
"target": {
"area_id": ["area-42", "{{ 'area-51' }}"],
"device_id": ["abcdef", "{{ 'fedcba' }}"],
"entity_id": ["light.static", "{{ 'light.dynamic' }}"],
},
}
service.call_from_config(self.hass, config)
self.hass.block_till_done()
assert dict(self.calls[1].data) == {
"area_id": ["area-42", "area-51"],
"device_id": ["abcdef", "fedcba"],
"entity_id": ["light.static", "light.dynamic"],
}
config = {
"service": "{{ 'test_domain.test_service' }}",
"target": "{{ var_target }}",
}
service.call_from_config(
self.hass,
config,
variables={
"var_target": {
"entity_id": "light.static",
"area_id": ["area-42", "area-51"],
},
},
)
service.call_from_config(self.hass, config)
self.hass.block_till_done()
assert dict(self.calls[2].data) == {
"area_id": ["area-42", "area-51"],
"entity_id": ["light.static"],
}
def test_service_template_service_call(self):
"""Test legacy service_template call with templating."""
config = {
"service_template": "{{ 'test_domain.test_service' }}",
"entity_id": "hello.world",
"data": {"hello": "goodbye"},
}
service.call_from_config(self.hass, config)
self.hass.block_till_done()
assert self.calls[0].data["hello"] == "goodbye"
def test_passing_variables_to_templates(self):
"""Test passing variables to templates."""
config = {
"service_template": "{{ var_service }}",
"entity_id": "hello.world",
"data_template": {"hello": "{{ var_data }}"},
}
service.call_from_config(
self.hass,
config,
variables={
"var_service": "test_domain.test_service",
"var_data": "goodbye",
},
)
self.hass.block_till_done()
assert self.calls[0].data["hello"] == "goodbye"
def test_bad_template(self):
"""Test passing bad template."""
config = {
"service_template": "{{ var_service }}",
"entity_id": "hello.world",
"data_template": {"hello": "{{ states + unknown_var }}"},
}
service.call_from_config(
self.hass,
config,
variables={
"var_service": "test_domain.test_service",
"var_data": "goodbye",
},
)
self.hass.block_till_done()
assert len(self.calls) == 0
def test_split_entity_string(self):
"""Test splitting of entity string."""
service.call_from_config(
self.hass,
{
"service": "test_domain.test_service",
"entity_id": "hello.world, sensor.beer",
},
)
self.hass.block_till_done()
assert ["hello.world", "sensor.beer"] == self.calls[-1].data.get("entity_id")
def test_not_mutate_input(self):
"""Test for immutable input."""
config = cv.SERVICE_SCHEMA(
{
"service": "test_domain.test_service",
"entity_id": "hello.world, sensor.beer",
"data": {"hello": 1},
"data_template": {"nested": {"value": "{{ 1 + 1 }}"}},
}
)
orig = deepcopy(config)
# Only change after call is each template getting hass attached
template.attach(self.hass, orig)
service.call_from_config(self.hass, config, validate_config=False)
assert orig == config
@patch("homeassistant.helpers.service._LOGGER.error")
def test_fail_silently_if_no_service(self, mock_log):
"""Test failing if service is missing."""
service.call_from_config(self.hass, None)
assert mock_log.call_count == 1
service.call_from_config(self.hass, {})
assert mock_log.call_count == 2
service.call_from_config(self.hass, {"service": "invalid"})
assert mock_log.call_count == 3
async def test_extract_entity_ids(hass):
"""Test extract_entity_ids method."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
hass.states.async_set("light.Kitchen", STATE_OFF)
assert await async_setup_component(hass, "group", {})
await hass.async_block_till_done()
await hass.components.group.Group.async_create_group(
hass, "test", ["light.Ceiling", "light.Kitchen"]
)
call = ha.ServiceCall("light", "turn_on", {ATTR_ENTITY_ID: "light.Bowl"})
assert {"light.bowl"} == await service.async_extract_entity_ids(hass, call)
call = ha.ServiceCall("light", "turn_on", {ATTR_ENTITY_ID: "group.test"})
assert {"light.ceiling", "light.kitchen"} == await service.async_extract_entity_ids(
hass, call
)
assert {"group.test"} == await service.async_extract_entity_ids(
hass, call, expand_group=False
)
assert (
await service.async_extract_entity_ids(
hass,
ha.ServiceCall("light", "turn_on", {ATTR_ENTITY_ID: ENTITY_MATCH_NONE}),
)
== set()
)
async def test_extract_entity_ids_from_area(hass, area_mock):
"""Test extract_entity_ids method with areas."""
call = ha.ServiceCall("light", "turn_on", {"area_id": "own-area"})
assert {
"light.in_own_area",
} == await service.async_extract_entity_ids(hass, call)
call = ha.ServiceCall("light", "turn_on", {"area_id": "test-area"})
assert {
"light.in_area",
"light.assigned_to_area",
} == await service.async_extract_entity_ids(hass, call)
call = ha.ServiceCall("light", "turn_on", {"area_id": ["test-area", "diff-area"]})
assert {
"light.in_area",
"light.diff_area",
"light.assigned_to_area",
} == await service.async_extract_entity_ids(hass, call)
assert (
await service.async_extract_entity_ids(
hass, ha.ServiceCall("light", "turn_on", {"area_id": ENTITY_MATCH_NONE})
)
== set()
)
async def test_extract_entity_ids_from_devices(hass, area_mock):
"""Test extract_entity_ids method with devices."""
assert await service.async_extract_entity_ids(
hass, ha.ServiceCall("light", "turn_on", {"device_id": "device-no-area-id"})
) == {
"light.no_area",
}
assert await service.async_extract_entity_ids(
hass, ha.ServiceCall("light", "turn_on", {"device_id": "device-area-a-id"})
) == {
"light.in_area_a",
"light.in_area_b",
}
assert (
await service.async_extract_entity_ids(
hass, ha.ServiceCall("light", "turn_on", {"device_id": "non-existing-id"})
)
== set()
)
async def test_async_get_all_descriptions(hass):
"""Test async_get_all_descriptions."""
group = hass.components.group
group_config = {group.DOMAIN: {}}
await async_setup_component(hass, group.DOMAIN, group_config)
descriptions = await service.async_get_all_descriptions(hass)
assert len(descriptions) == 1
assert "description" in descriptions["group"]["reload"]
assert "fields" in descriptions["group"]["reload"]
logger = hass.components.logger
logger_config = {logger.DOMAIN: {}}
await async_setup_component(hass, logger.DOMAIN, logger_config)
descriptions = await service.async_get_all_descriptions(hass)
assert len(descriptions) == 2
assert "description" in descriptions[logger.DOMAIN]["set_level"]
assert "fields" in descriptions[logger.DOMAIN]["set_level"]
async def test_call_with_required_features(hass, mock_entities):
"""Test service calls invoked only if entity has required features."""
test_service_mock = AsyncMock(return_value=None)
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
test_service_mock,
ha.ServiceCall("test_domain", "test_service", {"entity_id": "all"}),
required_features=[SUPPORT_A],
)
assert test_service_mock.call_count == 2
expected = [
mock_entities["light.kitchen"],
mock_entities["light.bedroom"],
]
actual = [call[0][0] for call in test_service_mock.call_args_list]
assert all(entity in actual for entity in expected)
async def test_call_with_both_required_features(hass, mock_entities):
"""Test service calls invoked only if entity has both features."""
test_service_mock = AsyncMock(return_value=None)
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
test_service_mock,
ha.ServiceCall("test_domain", "test_service", {"entity_id": "all"}),
required_features=[SUPPORT_A | SUPPORT_B],
)
assert test_service_mock.call_count == 1
assert [call[0][0] for call in test_service_mock.call_args_list] == [
mock_entities["light.bedroom"]
]
async def test_call_with_one_of_required_features(hass, mock_entities):
"""Test service calls invoked with one entity having the required features."""
test_service_mock = AsyncMock(return_value=None)
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
test_service_mock,
ha.ServiceCall("test_domain", "test_service", {"entity_id": "all"}),
required_features=[SUPPORT_A, SUPPORT_C],
)
assert test_service_mock.call_count == 3
expected = [
mock_entities["light.kitchen"],
mock_entities["light.bedroom"],
mock_entities["light.bathroom"],
]
actual = [call[0][0] for call in test_service_mock.call_args_list]
assert all(entity in actual for entity in expected)
async def test_call_with_sync_func(hass, mock_entities):
"""Test invoking sync service calls."""
test_service_mock = Mock(return_value=None)
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
test_service_mock,
ha.ServiceCall("test_domain", "test_service", {"entity_id": "light.kitchen"}),
)
assert test_service_mock.call_count == 1
async def test_call_with_sync_attr(hass, mock_entities):
"""Test invoking sync service calls."""
mock_method = mock_entities["light.kitchen"].sync_method = Mock(return_value=None)
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
"sync_method",
ha.ServiceCall(
"test_domain",
"test_service",
{"entity_id": "light.kitchen", "area_id": "abcd"},
),
)
assert mock_method.call_count == 1
# We pass empty kwargs because both entity_id and area_id are filtered out
assert mock_method.mock_calls[0][2] == {}
async def test_call_context_user_not_exist(hass):
"""Check we don't allow deleted users to do things."""
with pytest.raises(exceptions.UnknownUser) as err:
await service.entity_service_call(
hass,
[],
Mock(),
ha.ServiceCall(
"test_domain",
"test_service",
context=ha.Context(user_id="non-existing"),
),
)
assert err.value.context.user_id == "non-existing"
async def test_call_context_target_all(hass, mock_handle_entity_call, mock_entities):
"""Check we only target allowed entities if targeting all."""
with patch(
"homeassistant.auth.AuthManager.async_get_user",
return_value=Mock(
permissions=PolicyPermissions(
{"entities": {"entity_ids": {"light.kitchen": True}}}, None
)
),
):
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall(
"test_domain",
"test_service",
data={"entity_id": ENTITY_MATCH_ALL},
context=ha.Context(user_id="mock-id"),
),
)
assert len(mock_handle_entity_call.mock_calls) == 1
assert mock_handle_entity_call.mock_calls[0][1][1].entity_id == "light.kitchen"
async def test_call_context_target_specific(
hass, mock_handle_entity_call, mock_entities
):
"""Check targeting specific entities."""
with patch(
"homeassistant.auth.AuthManager.async_get_user",
return_value=Mock(
permissions=PolicyPermissions(
{"entities": {"entity_ids": {"light.kitchen": True}}}, None
)
),
):
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall(
"test_domain",
"test_service",
{"entity_id": "light.kitchen"},
context=ha.Context(user_id="mock-id"),
),
)
assert len(mock_handle_entity_call.mock_calls) == 1
assert mock_handle_entity_call.mock_calls[0][1][1].entity_id == "light.kitchen"
async def test_call_context_target_specific_no_auth(
hass, mock_handle_entity_call, mock_entities
):
"""Check targeting specific entities without auth."""
with pytest.raises(exceptions.Unauthorized) as err, patch(
"homeassistant.auth.AuthManager.async_get_user",
return_value=Mock(permissions=PolicyPermissions({}, None)),
):
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall(
"test_domain",
"test_service",
{"entity_id": "light.kitchen"},
context=ha.Context(user_id="mock-id"),
),
)
assert err.value.context.user_id == "mock-id"
assert err.value.entity_id == "light.kitchen"
async def test_call_no_context_target_all(hass, mock_handle_entity_call, mock_entities):
"""Check we target all if no user context given."""
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall(
"test_domain", "test_service", data={"entity_id": ENTITY_MATCH_ALL}
),
)
assert len(mock_handle_entity_call.mock_calls) == 4
assert [call[1][1] for call in mock_handle_entity_call.mock_calls] == list(
mock_entities.values()
)
async def test_call_no_context_target_specific(
hass, mock_handle_entity_call, mock_entities
):
"""Check we can target specified entities."""
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall(
"test_domain",
"test_service",
{"entity_id": ["light.kitchen", "light.non-existing"]},
),
)
assert len(mock_handle_entity_call.mock_calls) == 1
assert mock_handle_entity_call.mock_calls[0][1][1].entity_id == "light.kitchen"
async def test_call_with_match_all(
hass, mock_handle_entity_call, mock_entities, caplog
):
"""Check we only target allowed entities if targeting all."""
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall("test_domain", "test_service", {"entity_id": "all"}),
)
assert len(mock_handle_entity_call.mock_calls) == 4
assert [call[1][1] for call in mock_handle_entity_call.mock_calls] == list(
mock_entities.values()
)
async def test_call_with_omit_entity_id(hass, mock_handle_entity_call, mock_entities):
"""Check service call if we do not pass an entity ID."""
await service.entity_service_call(
hass,
[Mock(entities=mock_entities)],
Mock(),
ha.ServiceCall("test_domain", "test_service"),
)
assert len(mock_handle_entity_call.mock_calls) == 0
async def test_register_admin_service(hass, hass_read_only_user, hass_admin_user):
"""Test the register admin service."""
calls = []
async def mock_service(call):
calls.append(call)
hass.helpers.service.async_register_admin_service("test", "test", mock_service)
hass.helpers.service.async_register_admin_service(
"test",
"test2",
mock_service,
vol.Schema({vol.Required("required"): cv.boolean}),
)
with pytest.raises(exceptions.UnknownUser):
await hass.services.async_call(
"test",
"test",
{},
blocking=True,
context=ha.Context(user_id="non-existing"),
)
assert len(calls) == 0
with pytest.raises(exceptions.Unauthorized):
await hass.services.async_call(
"test",
"test",
{},
blocking=True,
context=ha.Context(user_id=hass_read_only_user.id),
)
assert len(calls) == 0
with pytest.raises(vol.Invalid):
await hass.services.async_call(
"test",
"test",
{"invalid": True},
blocking=True,
context=ha.Context(user_id=hass_admin_user.id),
)
assert len(calls) == 0
with pytest.raises(vol.Invalid):
await hass.services.async_call(
"test",
"test2",
{},
blocking=True,
context=ha.Context(user_id=hass_admin_user.id),
)
assert len(calls) == 0
await hass.services.async_call(
"test",
"test2",
{"required": True},
blocking=True,
context=ha.Context(user_id=hass_admin_user.id),
)
assert len(calls) == 1
assert calls[0].context.user_id == hass_admin_user.id
async def test_domain_control_not_async(hass, mock_entities):
"""Test domain verification in a service call with an unknown user."""
calls = []
def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
with pytest.raises(exceptions.HomeAssistantError):
hass.helpers.service.verify_domain_control("test_domain")(mock_service_log)
async def test_domain_control_unknown(hass, mock_entities):
"""Test domain verification in a service call with an unknown user."""
calls = []
async def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
with patch(
"homeassistant.helpers.entity_registry.async_get_registry",
return_value=Mock(entities=mock_entities),
):
protected_mock_service = hass.helpers.service.verify_domain_control(
"test_domain"
)(mock_service_log)
hass.services.async_register(
"test_domain", "test_service", protected_mock_service, schema=None
)
with pytest.raises(exceptions.UnknownUser):
await hass.services.async_call(
"test_domain",
"test_service",
{},
blocking=True,
context=ha.Context(user_id="fake_user_id"),
)
assert len(calls) == 0
async def test_domain_control_unauthorized(hass, hass_read_only_user):
"""Test domain verification in a service call with an unauthorized user."""
mock_registry(
hass,
{
"light.kitchen": ent_reg.RegistryEntry(
entity_id="light.kitchen",
unique_id="kitchen",
platform="test_domain",
)
},
)
calls = []
async def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
protected_mock_service = hass.helpers.service.verify_domain_control("test_domain")(
mock_service_log
)
hass.services.async_register(
"test_domain", "test_service", protected_mock_service, schema=None
)
with pytest.raises(exceptions.Unauthorized):
await hass.services.async_call(
"test_domain",
"test_service",
{},
blocking=True,
context=ha.Context(user_id=hass_read_only_user.id),
)
assert len(calls) == 0
async def test_domain_control_admin(hass, hass_admin_user):
"""Test domain verification in a service call with an admin user."""
mock_registry(
hass,
{
"light.kitchen": ent_reg.RegistryEntry(
entity_id="light.kitchen",
unique_id="kitchen",
platform="test_domain",
)
},
)
calls = []
async def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
protected_mock_service = hass.helpers.service.verify_domain_control("test_domain")(
mock_service_log
)
hass.services.async_register(
"test_domain", "test_service", protected_mock_service, schema=None
)
await hass.services.async_call(
"test_domain",
"test_service",
{},
blocking=True,
context=ha.Context(user_id=hass_admin_user.id),
)
assert len(calls) == 1
async def test_domain_control_no_user(hass):
"""Test domain verification in a service call with no user."""
mock_registry(
hass,
{
"light.kitchen": ent_reg.RegistryEntry(
entity_id="light.kitchen",
unique_id="kitchen",
platform="test_domain",
)
},
)
calls = []
async def mock_service_log(call):
"""Define a protected service."""
calls.append(call)
protected_mock_service = hass.helpers.service.verify_domain_control("test_domain")(
mock_service_log
)
hass.services.async_register(
"test_domain", "test_service", protected_mock_service, schema=None
)
await hass.services.async_call(
"test_domain",
"test_service",
{},
blocking=True,
context=ha.Context(user_id=None),
)
assert len(calls) == 1
async def test_extract_from_service_available_device(hass):
"""Test the extraction of entity from service and device is available."""
entities = [
MockEntity(name="test_1", entity_id="test_domain.test_1"),
MockEntity(name="test_2", entity_id="test_domain.test_2", available=False),
MockEntity(name="test_3", entity_id="test_domain.test_3"),
MockEntity(name="test_4", entity_id="test_domain.test_4", available=False),
]
call_1 = ha.ServiceCall("test", "service", data={"entity_id": ENTITY_MATCH_ALL})
assert ["test_domain.test_1", "test_domain.test_3"] == [
ent.entity_id
for ent in (await service.async_extract_entities(hass, entities, call_1))
]
call_2 = ha.ServiceCall(
"test",
"service",
data={"entity_id": ["test_domain.test_3", "test_domain.test_4"]},
)
assert ["test_domain.test_3"] == [
ent.entity_id
for ent in (await service.async_extract_entities(hass, entities, call_2))
]
assert (
await service.async_extract_entities(
hass,
entities,
ha.ServiceCall(
"test",
"service",
data={"entity_id": ENTITY_MATCH_NONE},
),
)
== []
)
async def test_extract_from_service_empty_if_no_entity_id(hass):
"""Test the extraction from service without specifying entity."""
entities = [
MockEntity(name="test_1", entity_id="test_domain.test_1"),
MockEntity(name="test_2", entity_id="test_domain.test_2"),
]
call = ha.ServiceCall("test", "service")
assert [] == [
ent.entity_id
for ent in (await service.async_extract_entities(hass, entities, call))
]
async def test_extract_from_service_filter_out_non_existing_entities(hass):
"""Test the extraction of non existing entities from service."""
entities = [
MockEntity(name="test_1", entity_id="test_domain.test_1"),
MockEntity(name="test_2", entity_id="test_domain.test_2"),
]
call = ha.ServiceCall(
"test",
"service",
{"entity_id": ["test_domain.test_2", "test_domain.non_exist"]},
)
assert ["test_domain.test_2"] == [
ent.entity_id
for ent in (await service.async_extract_entities(hass, entities, call))
]
async def test_extract_from_service_area_id(hass, area_mock):
"""Test the extraction using area ID as reference."""
entities = [
MockEntity(name="in_area", entity_id="light.in_area"),
MockEntity(name="no_area", entity_id="light.no_area"),
MockEntity(name="diff_area", entity_id="light.diff_area"),
]
call = ha.ServiceCall("light", "turn_on", {"area_id": "test-area"})
extracted = await service.async_extract_entities(hass, entities, call)
assert len(extracted) == 1
assert extracted[0].entity_id == "light.in_area"
call = ha.ServiceCall("light", "turn_on", {"area_id": ["test-area", "diff-area"]})
extracted = await service.async_extract_entities(hass, entities, call)
assert len(extracted) == 2
assert sorted(ent.entity_id for ent in extracted) == [
"light.diff_area",
"light.in_area",
]
call = ha.ServiceCall(
"light",
"turn_on",
{"area_id": ["test-area", "diff-area"], "device_id": "device-no-area-id"},
)
extracted = await service.async_extract_entities(hass, entities, call)
assert len(extracted) == 3
assert sorted(ent.entity_id for ent in extracted) == [
"light.diff_area",
"light.in_area",
"light.no_area",
]
async def test_entity_service_call_warn_referenced(hass, caplog):
"""Test we only warn for referenced entities in entity_service_call."""
call = ha.ServiceCall(
"light",
"turn_on",
{
"area_id": "non-existent-area",
"entity_id": "non.existent",
"device_id": "non-existent-device",
},
)
await service.entity_service_call(hass, {}, "", call)
assert (
"Unable to find referenced areas non-existent-area, devices non-existent-device, entities non.existent"
in caplog.text
)
async def test_async_extract_entities_warn_referenced(hass, caplog):
"""Test we only warn for referenced entities in async_extract_entities."""
call = ha.ServiceCall(
"light",
"turn_on",
{
"area_id": "non-existent-area",
"entity_id": "non.existent",
"device_id": "non-existent-device",
},
)
extracted = await service.async_extract_entities(hass, {}, call)
assert len(extracted) == 0
assert (
"Unable to find referenced areas non-existent-area, devices non-existent-device, entities non.existent"
in caplog.text
)
async def test_async_extract_config_entry_ids(hass):
"""Test we can find devices that have no entities."""
device_no_entities = dev_reg.DeviceEntry(
id="device-no-entities", config_entries={"abc"}
)
call = ha.ServiceCall(
"homeassistant",
"reload_config_entry",
{
"device_id": "device-no-entities",
},
)
mock_device_registry(
hass,
{
device_no_entities.id: device_no_entities,
},
)
assert await service.async_extract_config_entry_ids(hass, call) == {"abc"}
|
|
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
#-# Modified by D.C.-G. for translation purpose
import traceback
from OpenGL import GL
import numpy
from numpy import newaxis
from albow import Label, ValueDisplay, AttrRef, Button, Column, ask, Row, alert, Widget, Menu, showProgress, \
ChoiceButton, IntInputRow, CheckBoxLabel
from albow.translate import _
from editortools.editortool import EditorTool
from glbackground import Panel
from glutils import DisplayList, gl
from mceutils import alertException, setWindowCaption
import mcplatform
import pymclevel
from pymclevel.minecraft_server import MCServerChunkGenerator
from config import config
from albow.dialogs import Dialog
import renderer
class ChunkToolPanel(Panel):
def __init__(self, tool, *a, **kw):
if 'name' not in kw.keys():
kw['name'] = 'Panel.ChunkToolPanel'
Panel.__init__(self, *a, **kw)
self.tool = tool
self.anchor = "whl"
chunkToolLabel = Label("Selected Chunks:")
self.chunksLabel = ValueDisplay(ref=AttrRef(self, 'chunkSizeText'), width=115)
self.chunksLabel.align = "c"
self.chunksLabel.tooltipText = "..."
deselectButton = Button("Deselect",
tooltipText=None,
action=tool.editor.deselect,
)
createButton = Button("Create")
createButton.tooltipText = "Create new chunks within the selection."
createButton.action = tool.createChunks
createButton.highlight_color = (0, 255, 0)
destroyButton = Button("Delete")
destroyButton.tooltipText = "Delete the selected chunks from disk. Minecraft will recreate them the next time you are near."
destroyButton.action = tool.destroyChunks
pruneButton = Button("Prune")
pruneButton.tooltipText = "Prune the world, leaving only the selected chunks. Any chunks outside of the selection will be removed, and empty region files will be deleted from disk"
pruneButton.action = tool.pruneChunks
relightButton = Button("Relight")
relightButton.tooltipText = "Recalculate light values across the selected chunks"
relightButton.action = tool.relightChunks
relightButton.highlight_color = (255, 255, 255)
repopButton = Button("Repop")
repopButton.tooltipText = "Mark the selected chunks for repopulation. The next time you play Minecraft, the chunks will have trees, ores, and other features regenerated."
repopButton.action = tool.repopChunks
repopButton.highlight_color = (255, 200, 155)
dontRepopButton = Button("Don't Repop")
dontRepopButton.tooltipText = "Unmark the selected chunks. They will not repopulate the next time you play the game."
dontRepopButton.action = tool.dontRepopChunks
dontRepopButton.highlight_color = (255, 255, 255)
col = Column((
chunkToolLabel, self.chunksLabel, deselectButton, createButton, destroyButton, pruneButton, relightButton,
repopButton, dontRepopButton))
# col.right = self.width - 10;
self.width = col.width
self.height = col.height
#self.width = 120
self.add(col)
@property
def chunkSizeText(self):
return _("{0} chunks").format(len(self.tool.selectedChunks()))
def updateText(self):
pass
# self.chunksLabel.text = self.chunksLabelText()
class ChunkTool(EditorTool):
toolIconName = "chunk"
tooltipText = "Chunk Control"
@property
def statusText(self):
return _("Click and drag to select chunks. Hold {0} to deselect chunks. Hold {1} to select chunks.").format(_(config.keys.deselectChunks.get()), _(config.keys.selectChunks.get()))
def toolEnabled(self):
return isinstance(self.editor.level, pymclevel.ChunkedLevelMixin)
_selectedChunks = None
_displayList = None
def drawToolMarkers(self):
if self._displayList is None:
self._displayList = DisplayList(self._drawToolMarkers)
# print len(self._selectedChunks) if self._selectedChunks else None, "!=", len(self.editor.selectedChunks)
if self._selectedChunks != self.editor.selectedChunks or True: # xxx # TODO Pod
self._selectedChunks = set(self.editor.selectedChunks)
self._displayList.invalidate()
self._displayList.call()
def _drawToolMarkers(self):
lines = (
((-1, 0), (0, 0, 0, 1), []),
((1, 0), (1, 0, 1, 1), []),
((0, -1), (0, 0, 1, 0), []),
((0, 1), (0, 1, 1, 1), []),
)
for ch in self._selectedChunks:
cx, cz = ch
for (dx, dz), points, positions in lines:
n = (cx + dx, cz + dz)
if n not in self._selectedChunks:
positions.append([ch])
color = self.editor.selectionTool.selectionColor + (0.3, )
GL.glColor(*color)
with gl.glEnable(GL.GL_BLEND):
#import renderer
sizedChunks = renderer.chunkMarkers(self._selectedChunks)
for size, chunks in sizedChunks.iteritems():
if not len(chunks):
continue
chunks = numpy.array(chunks, dtype='float32')
chunkPosition = numpy.zeros(shape=(chunks.shape[0], 4, 3), dtype='float32')
chunkPosition[..., (0, 2)] = numpy.array(((0, 0), (0, 1), (1, 1), (1, 0)), dtype='float32')
chunkPosition[..., (0, 2)] *= size
chunkPosition[..., (0, 2)] += chunks[:, newaxis, :]
chunkPosition *= 16
chunkPosition[..., 1] = self.editor.level.Height
GL.glVertexPointer(3, GL.GL_FLOAT, 0, chunkPosition.ravel())
# chunkPosition *= 8
GL.glDrawArrays(GL.GL_QUADS, 0, len(chunkPosition) * 4)
for d, points, positions in lines:
if 0 == len(positions):
continue
vertexArray = numpy.zeros((len(positions), 4, 3), dtype='float32')
vertexArray[..., [0, 2]] = positions
vertexArray.shape = len(positions), 2, 2, 3
vertexArray[..., 0, 0, 0] += points[0]
vertexArray[..., 0, 0, 2] += points[1]
vertexArray[..., 0, 1, 0] += points[2]
vertexArray[..., 0, 1, 2] += points[3]
vertexArray[..., 1, 0, 0] += points[2]
vertexArray[..., 1, 0, 2] += points[3]
vertexArray[..., 1, 1, 0] += points[0]
vertexArray[..., 1, 1, 2] += points[1]
vertexArray *= 16
vertexArray[..., 1, :, 1] = self.editor.level.Height
GL.glVertexPointer(3, GL.GL_FLOAT, 0, vertexArray)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glDrawArrays(GL.GL_QUADS, 0, len(positions) * 4)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
with gl.glEnable(GL.GL_BLEND, GL.GL_DEPTH_TEST):
GL.glDepthMask(False)
GL.glDrawArrays(GL.GL_QUADS, 0, len(positions) * 4)
GL.glDepthMask(True)
@property
def worldTooltipText(self):
box = self.editor.selectionTool.selectionBoxInProgress()
if box:
box = box.chunkBox(self.editor.level)
l, w = box.length // 16, box.width // 16
return _("%s x %s chunks") % (l, w)
else:
if config.settings.viewMode.get() == "Chunk":
point, face = self.editor.chunkViewport.blockFaceUnderCursor
else:
point, face = self.editor.mainViewport.blockFaceUnderCursor
if point:
return "Chunk ({}, {})".format(point[0] // 16, point[2] // 16)
def toolSelected(self):
self.editor.selectionToChunks()
self.panel = ChunkToolPanel(self)
self.panel.centery = self.editor.centery
self.panel.left = 10
self.editor.add(self.panel)
def toolDeselected(self):
self.editor.chunksToSelection()
def cancel(self):
self.editor.remove(self.panel)
def selectedChunks(self):
return self.editor.selectedChunks
@alertException
def destroyChunks(self, chunks=None):
if "No" == ask("Really delete these chunks? This cannot be undone.", ("Yes", "No")):
return
if chunks is None:
chunks = self.selectedChunks()
chunks = list(chunks)
def _destroyChunks():
i = 0
chunkCount = len(chunks)
for cx, cz in chunks:
i += 1
yield (i, chunkCount)
if self.editor.level.containsChunk(cx, cz):
try:
self.editor.level.deleteChunk(cx, cz)
except Exception as e:
print "Error during chunk delete: ", e
with setWindowCaption("DELETING - "):
showProgress("Deleting chunks...", _destroyChunks())
self.editor.renderer.invalidateChunkMarkers()
self.editor.renderer.discardAllChunks()
# self.editor.addUnsavedEdit()
@alertException
def pruneChunks(self):
if "No" == ask("Save these chunks and remove the rest? This cannot be undone.", ("Yes", "No")):
return
self.editor.saveFile()
def _pruneChunks():
maxChunks = self.editor.level.chunkCount
selectedChunks = self.selectedChunks()
for i, cPos in enumerate(list(self.editor.level.allChunks)):
if cPos not in selectedChunks:
try:
self.editor.level.deleteChunk(*cPos)
except Exception as e:
print "Error during chunk delete: ", e
yield i, maxChunks
with setWindowCaption("PRUNING - "):
showProgress("Pruning chunks...", _pruneChunks())
self.editor.renderer.invalidateChunkMarkers()
self.editor.discardAllChunks()
# self.editor.addUnsavedEdit()
@alertException
def relightChunks(self):
def _relightChunks():
for i in self.editor.level.generateLightsIter(self.selectedChunks()):
yield i
with setWindowCaption("RELIGHTING - "):
showProgress(_("Lighting {0} chunks...").format(len(self.selectedChunks())),
_relightChunks(), cancel=True)
self.editor.invalidateChunks(self.selectedChunks())
self.editor.addUnsavedEdit()
@alertException
def createChunks(self):
panel = GeneratorPanel()
col = [panel]
label = Label("Create chunks using the settings above? This cannot be undone.")
col.append(Row([Label("")]))
col.append(label)
col = Column(col)
if Dialog(client=col, responses=["OK", "Cancel"]).present() == "Cancel":
return
chunks = self.selectedChunks()
createChunks = panel.generate(self.editor.level, chunks)
try:
with setWindowCaption("CREATING - "):
showProgress("Creating {0} chunks...".format(len(chunks)), createChunks, cancel=True)
except Exception as e:
traceback.print_exc()
alert(_("Failed to start the chunk generator. {0!r}").format(e))
finally:
self.editor.renderer.invalidateChunkMarkers()
self.editor.renderer.loadNearbyChunks()
@alertException
def repopChunks(self):
for cpos in self.selectedChunks():
try:
chunk = self.editor.level.getChunk(*cpos)
chunk.TerrainPopulated = False
except pymclevel.ChunkNotPresent:
continue
self.editor.renderer.invalidateChunks(self.selectedChunks(), layers=["TerrainPopulated"])
@alertException
def dontRepopChunks(self):
for cpos in self.selectedChunks():
try:
chunk = self.editor.level.getChunk(*cpos)
chunk.TerrainPopulated = True
except pymclevel.ChunkNotPresent:
continue
self.editor.renderer.invalidateChunks(self.selectedChunks(), layers=["TerrainPopulated"])
def mouseDown(self, *args):
return self.editor.selectionTool.mouseDown(*args)
def mouseUp(self, evt, *args):
self.editor.selectionTool.mouseUp(evt, *args)
def keyDown(self, evt):
self.editor.selectionTool.keyDown(evt)
def keyUp(self, evt):
self.editor.selectionTool.keyUp(evt)
def GeneratorPanel():
panel = Widget()
panel.chunkHeight = 64
panel.grass = True
panel.simulate = False
panel.snapshot = False
jarStorage = MCServerChunkGenerator.getDefaultJarStorage()
if jarStorage:
jarStorage.reloadVersions()
generatorChoice = ChoiceButton(["Minecraft Server", "Flatland"])
panel.generatorChoice = generatorChoice
col = [Row((Label("Generator:"), generatorChoice))]
noVersionsRow = Label("Will automatically download and use the latest version")
versionContainer = Widget()
heightinput = IntInputRow("Height: ", ref=AttrRef(panel, "chunkHeight"), min=0, max=255)
grassinput = CheckBoxLabel("Grass", ref=AttrRef(panel, "grass"))
flatPanel = Column([heightinput, grassinput], align="l")
def generatorChoiceChanged():
serverPanel.visible = generatorChoice.selectedChoice == "Minecraft Server"
flatPanel.visible = not serverPanel.visible
generatorChoice.choose = generatorChoiceChanged
versionChoice = None
if len(jarStorage.versions):
def checkForUpdates():
def _check():
yield
jarStorage.downloadCurrentServer(panel.snapshot)
yield
showProgress("Checking for server updates...", _check())
versionChoice.choices = sorted(jarStorage.versions, reverse=True)
versionChoice.choiceIndex = 0
versionChoice = ChoiceButton(sorted(jarStorage.versions, reverse=True))
versionChoice.set_size_for_text(200)
versionChoiceRow = (Row((
Label("Server version:"),
versionChoice,
Label("or"),
Button("Check for Updates", action=checkForUpdates))))
panel.versionChoice = versionChoice
versionContainer.add(versionChoiceRow)
else:
versionContainer.add(noVersionsRow)
versionContainer.shrink_wrap()
menu = Menu("Advanced", [
("Open Server Storage", "revealStorage"),
("Reveal World Cache", "revealCache"),
("Delete World Cache", "clearCache")
])
def presentMenu():
i = menu.present(advancedButton.parent, advancedButton.topleft)
if i != -1:
(revealStorage, revealCache, clearCache)[i]()
advancedButton = Button("Advanced...", presentMenu)
@alertException
def revealStorage():
mcplatform.platform_open(jarStorage._cacheDir)
@alertException
def revealCache():
mcplatform.platform_open(MCServerChunkGenerator.worldCacheDir)
# revealCacheRow = Row((Label("Minecraft Server Storage: "), Button("Open Folder", action=revealCache, tooltipText="Click me to install your own minecraft_server.jar if you have any.")))
@alertException
def clearCache():
MCServerChunkGenerator.clearWorldCache()
simRow = CheckBoxLabel("Simulate world", ref=AttrRef(panel, "simulate"),
tooltipText="Simulate the world for a few seconds after generating it. Reduces the save file size by processing all of the TileTicks.")
useSnapshotServer = CheckBoxLabel("Use snapshot versions", ref=AttrRef(panel, "snapshot"),
tooltipText="Uses the Latest Snapshot Terrain Generation")
simRow = Row((simRow, advancedButton), anchor="lrh")
#deleteCacheRow = Row((Label("Delete Temporary World File Cache?"), Button("Delete Cache!", action=clearCache, tooltipText="Click me if you think your chunks are stale.")))
serverPanel = Column([useSnapshotServer, versionContainer, simRow], align="l")
col.append(serverPanel)
col = Column(col, align="l")
col.add(flatPanel)
flatPanel.topleft = serverPanel.topleft
flatPanel.visible = False
panel.add(col)
panel.shrink_wrap()
def generate(level, arg, useWorldType="DEFAULT"):
useServer = generatorChoice.selectedChoice == "Minecraft Server"
if useServer:
def _createChunks():
if versionChoice:
version = versionChoice.selectedChoice
else:
version = None
gen = MCServerChunkGenerator(version=version)
if isinstance(arg, pymclevel.BoundingBox):
for i in gen.createLevelIter(level, arg, simulate=panel.simulate, worldType=useWorldType):
yield i
else:
for i in gen.generateChunksInLevelIter(level, arg, simulate=panel.simulate):
yield i
else:
def _createChunks():
height = panel.chunkHeight
grass = panel.grass and pymclevel.alphaMaterials.Grass.ID or pymclevel.alphaMaterials.Dirt.ID
if isinstance(arg, pymclevel.BoundingBox):
chunks = list(arg.chunkPositions)
else:
chunks = arg
if level.dimNo in (-1, 1):
maxskylight = 0
else:
maxskylight = 15
for i, (cx, cz) in enumerate(chunks):
yield i, len(chunks)
#surface = blockInput.blockInfo
#for cx, cz in :
try:
level.createChunk(cx, cz)
except ValueError as e: # chunk already present
print e
continue
else:
ch = level.getChunk(cx, cz)
if height > 0:
stoneHeight = max(0, height - 5)
grassHeight = max(0, height - 1)
ch.Blocks[:, :, grassHeight] = grass
ch.Blocks[:, :, stoneHeight:grassHeight] = pymclevel.alphaMaterials.Dirt.ID
ch.Blocks[:, :, :stoneHeight] = pymclevel.alphaMaterials.Stone.ID
ch.Blocks[:, :, 0] = pymclevel.alphaMaterials.Bedrock.ID
ch.SkyLight[:, :, height:] = maxskylight
if maxskylight:
ch.HeightMap[:] = height
else:
ch.SkyLight[:] = maxskylight
ch.needsLighting = False
ch.dirty = True
return _createChunks()
panel.generate = generate
panel.kill_process = MCServerChunkGenerator.terminateProcesses
return panel
|
|
from common_fixtures import * # NOQA
from test_services_sidekick \
import create_env_with_sidekick, validate_sidekick, validate_dns
logger = logging.getLogger(__name__)
def create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port,
ssh_port="22", isnetworkModeHost_svc=False,
isnetworkModeHost_consumed_svc=False):
if not isnetworkModeHost_svc and not isnetworkModeHost_consumed_svc:
env, service, consumed_service = create_env_with_2_svc(
client, service_scale, consumed_service_scale, port)
else:
env, service, consumed_service = create_env_with_2_svc_hostnetwork(
client, service_scale, consumed_service_scale, port, ssh_port,
isnetworkModeHost_svc, isnetworkModeHost_consumed_svc)
service.activate()
consumed_service.activate()
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
consumed_service = client.wait_success(
consumed_service, SERVICE_WAIT_TIMEOUT)
assert service.state == "active"
assert consumed_service.state == "active"
return env, service, consumed_service
def test_dns_discovery_activate_svc_activate_consumed_svc_link(
super_client, client):
port = "401"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discovery_service_scale_up(super_client, client):
port = "402"
service_scale = 1
consumed_service_scale = 2
final_service_scale = 3
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port)
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
assert service.state == "active"
assert service.scale == final_service_scale
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discovery_services_scale_down(super_client, client):
port = "403"
service_scale = 3
consumed_service_scale = 2
final_service_scale = 1
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port)
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
assert service.state == "active"
assert service.scale == final_service_scale
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discovery_consumed_services_scale_up(super_client, client):
port = "404"
service_scale = 1
consumed_service_scale = 2
final_consumed_service_scale = 4
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port)
consumed_service = client.update(consumed_service,
scale=final_consumed_service_scale,
name=consumed_service.name)
consumed_service = client.wait_success(
consumed_service, SERVICE_WAIT_TIMEOUT)
assert consumed_service.state == "active"
assert consumed_service.scale == final_consumed_service_scale
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discovery_consumed_services_scale_down(super_client, client):
port = "405"
service_scale = 2
consumed_service_scale = 3
final_consumed_service_scale = 1
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port)
consumed_service = client.update(consumed_service,
scale=final_consumed_service_scale,
name=consumed_service.name)
consumed_service = client.wait_success(
consumed_service, SERVICE_WAIT_TIMEOUT)
assert consumed_service.state == "active"
assert consumed_service.scale == final_consumed_service_scale
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discovery_consumed_services_stop_start_instance(
super_client, client):
port = "406"
service_scale = 1
consumed_service_scale = 3
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port)
container_name = env.name + "_" + consumed_service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Stop instance
container = client.wait_success(container.stop(), SERVICE_WAIT_TIMEOUT)
service = client.wait_success(service)
wait_for_scale_to_adjust(super_client, consumed_service)
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discovery_consumed_services_restart_instance(
super_client, client):
port = "407"
service_scale = 1
consumed_service_scale = 3
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port)
container_name = env.name + "_" + consumed_service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Restart instance
container = client.wait_success(container.restart(), SERVICE_WAIT_TIMEOUT)
assert container.state == 'running'
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discovery_consumed_services_delete_instance(super_client, client):
port = "408"
service_scale = 1
consumed_service_scale = 3
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port)
container_name = env.name + "_" + consumed_service.name + "_1"
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Delete instance
container = client.wait_success(client.delete(container))
assert container.state == 'removed'
wait_for_scale_to_adjust(super_client, consumed_service)
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discovery_consumed_services_deactivate_activate(
super_client, client):
port = "409"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port)
consumed_service = consumed_service.deactivate()
consumed_service = client.wait_success(
consumed_service, SERVICE_WAIT_TIMEOUT)
assert consumed_service.state == "inactive"
wait_until_instances_get_stopped(super_client, consumed_service)
consumed_service = consumed_service.activate()
consumed_service = client.wait_success(
consumed_service, SERVICE_WAIT_TIMEOUT)
assert consumed_service.state == "active"
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discovery_service_deactivate_activate(super_client, client):
port = "410"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port)
service = service.deactivate()
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
assert service.state == "inactive"
wait_until_instances_get_stopped(super_client, service)
service = service.activate()
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
assert service.state == "active"
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discovery_deactivate_activate_environment(super_client, client):
port = "411"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
)
env = env.deactivateservices()
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
assert service.state == "inactive"
consumed_service = client.wait_success(
consumed_service, SERVICE_WAIT_TIMEOUT)
assert consumed_service.state == "inactive"
wait_until_instances_get_stopped(super_client, consumed_service)
env = env.activateservices()
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
assert service.state == "active"
consumed_service = client.wait_success(
consumed_service, SERVICE_WAIT_TIMEOUT)
assert consumed_service.state == "active"
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discovery_services_stop_start_instance(super_client, client):
port = "416"
service_scale = 2
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port,
)
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# Stop service instance
service_instance = client.wait_success(
service_instance.stop(), SERVICE_WAIT_TIMEOUT)
service = client.wait_success(service)
wait_for_scale_to_adjust(super_client, service)
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discovery_services_restart_instance(super_client, client):
port = "417"
service_scale = 2
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port)
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# Restart consumed instance
service_instance = client.wait_success(
service_instance.restart(), SERVICE_WAIT_TIMEOUT)
assert service_instance.state == 'running'
validate_linked_service(super_client, service, [consumed_service], port,
)
delete_all(client, [env])
def test_dns_discovery_services_delete_instance(super_client, client):
port = "418"
service_scale = 2
consumed_service_scale = 2
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port)
validate_linked_service(super_client, service, [consumed_service], port)
container_name = env.name + "_" + service.name + "_2"
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# Delete instance
container = client.wait_success(client.delete(service_instance))
assert container.state == 'removed'
wait_for_scale_to_adjust(super_client, service)
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discoverys_with_hostnetwork_1(super_client, client):
# Verify if able to resolve to containers of service in host network
# from containers that belong to another service in managed network.
port = "419"
service_scale = 1
consumed_service_scale = 2
ssh_port = "33"
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port,
ssh_port, isnetworkModeHost_svc=False,
isnetworkModeHost_consumed_svc=True)
validate_linked_service(super_client, service, [consumed_service], port)
delete_all(client, [env])
def test_dns_discoverys_with_hostnetwork_2(super_client, client):
# Verify if able to resolve to container of service in host network
# from containers that belong to another service in host network in the
# same stack
port = "420"
service_scale = 1
consumed_service_scale = 2
ssh_port = "33"
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port,
ssh_port, isnetworkModeHost_svc=True,
isnetworkModeHost_consumed_svc=True)
validate_linked_service(
super_client, service, [consumed_service], ssh_port)
delete_all(client, [env])
def test_dns_discoverys_with_hostnetwork_3(super_client, client):
# Verify if able to resolve to containers of service in managed
# network from containers that belong to another service in host network.
port = "421"
service_scale = 1
consumed_service_scale = 2
ssh_port = "33"
env, service, consumed_service = create_environment_with_services(
super_client, client, service_scale, consumed_service_scale, port,
ssh_port, isnetworkModeHost_svc=True,
isnetworkModeHost_consumed_svc=False)
validate_linked_service(
super_client, service, [consumed_service], ssh_port)
delete_all(client, [env])
def test_dns_discoverys_with_hostnetwork_externalService(super_client, client):
# Verify if able to resolve external services from containers
# that belong to another service in host network.
port = "422"
env, service, ext_service, con_list = \
create_env_with_ext_svc(client, 1, port)
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID_HOSTNET,
"networkMode": "host",
"labels": dns_labels}
random_name = random_str()
service_name = random_name.replace("-", "")
host_service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=1)
host_service = client.wait_success(host_service)
host_service.activate()
ext_service.activate()
host_service = client.wait_success(host_service, SERVICE_WAIT_TIMEOUT)
ext_service = client.wait_success(ext_service, SERVICE_WAIT_TIMEOUT)
assert host_service.state == "active"
assert ext_service.state == "active"
validate_external_service(
super_client, host_service, [ext_service], 33, con_list)
con_list.append(env)
delete_all(client, con_list)
def test_dns_discoverys_with_hostnetwork_externalService_cname(
super_client, client):
# Verify if able to resolve external services from containers
# that belong to another service in host network.
port = "423"
env, service, ext_service, con_list = \
create_env_with_ext_svc(client, 1, port, True)
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID_HOSTNET,
"networkMode": "host",
"labels": dns_labels}
random_name = random_str()
service_name = random_name.replace("-", "")
host_service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=1)
host_service = client.wait_success(host_service)
host_service.activate()
ext_service.activate()
host_service = client.wait_success(host_service, SERVICE_WAIT_TIMEOUT)
ext_service = client.wait_success(ext_service, SERVICE_WAIT_TIMEOUT)
assert host_service.state == "active"
assert ext_service.state == "active"
validate_external_service_for_hostname(super_client, host_service,
[ext_service], 33)
delete_all(client, [env])
def test_dns_discoverys_coss_stack_service(
super_client, client):
env = create_env(client)
launch_config_svc = {"imageUuid": WEB_IMAGE_UUID}
service_name = "test1"
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=2)
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
service.activate()
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
assert service.state == "active"
port = "424"
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID, }
launch_config_svc["ports"] = [port+":"+"22/tcp"]
env1 = create_env(client)
service_name = random_str()
service1 = client.create_service(name=service_name,
environmentId=env1.id,
launchConfig=launch_config_svc,
scale=2)
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
service1.activate()
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
assert service1.state == "active"
validate_linked_service(super_client, service1, [service],
port,
linkName=service.name+"."+env.name)
linkName = service.name+"."+env.name+"."+RANCHER_FQDN
validate_linked_service(super_client, service1, [service],
port,
linkName=linkName)
delete_all(client, [env, env1])
def test_dns_discoverys_coss_stack_service_uppercase(
super_client, client):
env = create_env(client)
launch_config_svc = {"imageUuid": WEB_IMAGE_UUID}
service_name = "TEST"
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=2)
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
service.activate()
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
assert service.state == "active"
port = "425"
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID, }
launch_config_svc["ports"] = [port+":"+"22/tcp"]
env1 = create_env(client)
service_name = random_str()
service1 = client.create_service(name=service_name,
environmentId=env1.id,
launchConfig=launch_config_svc,
scale=2)
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
service1.activate()
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
assert service1.state == "active"
validate_linked_service(super_client, service1, [service],
port,
linkName=service.name+"."+env.name)
linkName = service.name+"."+env.name+"."+RANCHER_FQDN
validate_linked_service(super_client, service1, [service],
port,
linkName=linkName)
delete_all(client, [env, env1])
def test_dns_discoverys_for_containers_by_name_and_fqdn(
super_client, client):
env = create_env(client)
launch_config_svc = {"imageUuid": WEB_IMAGE_UUID}
service_name = "TEST"
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=2)
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
service.activate()
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
assert service.state == "active"
port = "426"
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID, }
launch_config_svc["ports"] = [port+":"+"22/tcp"]
service_name = random_str()
service1 = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=2)
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
service1.activate()
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
assert service1.state == "active"
containers = get_service_container_list(super_client, service)
assert len(containers) == service.scale
for container in containers:
validate_for_container_dns_resolution(
super_client, service1, port, container, container.name)
validate_for_container_dns_resolution(
super_client, service1, port, container,
container.name+"."+RANCHER_FQDN)
delete_all(client, [env])
def test_dns_discoverys_for_containers_by_name_and_fqdn_cross_stack(
super_client, client):
env = create_env(client)
launch_config_svc = {"imageUuid": WEB_IMAGE_UUID}
service_name = "TEST"
service = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=2)
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
service.activate()
service = client.wait_success(service, SERVICE_WAIT_TIMEOUT)
assert service.state == "active"
# Deploy client service
port = "427"
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID, }
launch_config_svc["ports"] = [port+":"+"22/tcp"]
env1 = create_env(client)
service_name = random_str()
service1 = client.create_service(name=service_name,
environmentId=env1.id,
launchConfig=launch_config_svc,
scale=2)
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
service1.activate()
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
assert service1.state == "active"
containers = get_service_container_list(super_client, service)
assert len(containers) == service.scale
for container in containers:
validate_for_container_dns_resolution(
super_client, service1, port, container, container.name)
validate_for_container_dns_resolution(
super_client, service1, port, container,
container.name+"."+RANCHER_FQDN)
delete_all(client, [env, env1])
def test_dns_discovery_for_sidekick_containers_by_name_and_fqdn_cross_stack(
super_client, client):
port = "428"
service_scale = 2
env, service, service_name, consumed_service_name = \
create_env_with_sidekick(client, service_scale, port)
env = env.activateservices()
env = client.wait_success(env, 120)
assert env.state == "active"
service = client.wait_success(service, 120)
assert service.state == "active"
validate_sidekick(super_client, service, service_name,
consumed_service_name, port)
secondary_cons = get_service_containers_with_name(
super_client, service, consumed_service_name)
assert len(secondary_cons) == service.scale
# Deploy client service in another environment
port = "429"
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID, }
launch_config_svc["ports"] = [port+":"+"22/tcp"]
env1 = create_env(client)
service_name = random_str()
service1 = client.create_service(name=service_name,
environmentId=env1.id,
launchConfig=launch_config_svc,
scale=2)
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
service1.activate()
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
assert service1.state == "active"
for container in secondary_cons:
validate_for_container_dns_resolution(
super_client, service1, port, container, container.name)
validate_for_container_dns_resolution(
super_client, service1, port, container,
container.name+"."+RANCHER_FQDN)
delete_all(client, [env, env1])
def test_dns_discovery_for_service_with_sidekick(super_client, client):
port = "430"
service_scale = 2
env, service, service_name, consumed_service_name = \
create_env_with_sidekick(client, service_scale, port)
env = env.activateservices()
env = client.wait_success(env, 120)
assert env.state == "active"
service = client.wait_success(service, 120)
assert service.state == "active"
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, port, dnsname)
secondary_cons = get_service_containers_with_name(
super_client, service, consumed_service_name)
# Deploy client service in same environment
port = "431"
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID, }
launch_config_svc["ports"] = [port+":"+"22/tcp"]
service_name = random_str()
service1 = client.create_service(name=service_name,
environmentId=env.id,
launchConfig=launch_config_svc,
scale=2)
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
service1.activate()
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
assert service1.state == "active"
client_containers = get_service_container_list(super_client, service1)
time.sleep(5)
dnsname = service.secondaryLaunchConfigs[0].name + "." + service.name
validate_dns(
super_client, client_containers, secondary_cons, port, dnsname)
delete_all(client, [env])
def test_dns_discovery_for_service_with_sidekick_cross_stack(
super_client, client):
port = "432"
service_scale = 2
env, service, service_name, consumed_service_name = \
create_env_with_sidekick(client, service_scale, port)
env = env.activateservices()
env = client.wait_success(env, 120)
assert env.state == "active"
service = client.wait_success(service, 120)
assert service.state == "active"
dnsname = service.secondaryLaunchConfigs[0].name
validate_sidekick(super_client, service, service_name,
consumed_service_name, port, dnsname)
secondary_cons = get_service_containers_with_name(
super_client, service, consumed_service_name)
# Deploy client service in a different environment
port = "433"
launch_config_svc = {"imageUuid": SSH_IMAGE_UUID, }
launch_config_svc["ports"] = [port+":"+"22/tcp"]
service_name = random_str()
env1 = create_env(client)
service1 = client.create_service(name=service_name,
environmentId=env1.id,
launchConfig=launch_config_svc,
scale=2)
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
service1.activate()
service1 = client.wait_success(service1, SERVICE_WAIT_TIMEOUT)
assert service1.state == "active"
client_containers = get_service_container_list(super_client, service1)
time.sleep(5)
dnsname = \
service.secondaryLaunchConfigs[0].name + "." + service.name + \
"." + env.name + "." + RANCHER_FQDN
validate_dns(
super_client, client_containers, secondary_cons, port, dnsname)
delete_all(client, [env, env1])
def validate_for_container_dns_resolution(
super_client, service, sshport, container, dns_name):
client_containers = get_service_container_list(super_client, service)
assert len(client_containers) == service.scale
for con in client_containers:
host = super_client.by_id('host', con.hosts[0].id)
# Validate port mapping
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host.ipAddresses()[0].address, username="root",
password="root", port=int(sshport))
# Validate container name resolution
cmd = "wget -O result.txt --timeout=20 --tries=1 http://" + \
dns_name + ":80/name.html;cat result.txt"
logger.info(cmd)
print cmd
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
assert len(response) == 1
resp = response[0].strip("\n")
logger.info(resp)
print resp
assert resp in (container.externalId[:12])
# Validate DNS resolution using dig
cmd = "dig " + dns_name + " +short"
logger.info(cmd)
print cmd
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
logger.info("Actual dig Response" + str(response))
assert len(response) == 1
resp = response[0].strip("\n")
logger.info(resp)
print resp
assert resp == container.primaryIpAddress
return
|
|
##===-- lldbutil.py ------------------------------------------*- Python -*-===##
##
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
##
##===----------------------------------------------------------------------===##
"""
This LLDB module contains miscellaneous utilities.
Some of the test suite takes advantage of the utility functions defined here.
They can also be useful for general purpose lldb scripting.
"""
from __future__ import print_function
import lldb
import os
import sys
import io
# ===================================================
# Utilities for locating/checking executable programs
# ===================================================
def is_exe(fpath):
"""Returns True if fpath is an executable."""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
"""Returns the full path to a program; None otherwise."""
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# ===================================================
# Disassembly for an SBFunction or an SBSymbol object
# ===================================================
def disassemble(target, function_or_symbol):
"""Disassemble the function or symbol given a target.
It returns the disassembly content in a string object.
"""
buf = io.StringIO()
insts = function_or_symbol.GetInstructions(target)
for i in insts:
print(i, file=buf)
return buf.getvalue()
# ==========================================================
# Integer (byte size 1, 2, 4, and 8) to bytearray conversion
# ==========================================================
def int_to_bytearray(val, bytesize):
"""Utility function to convert an integer into a bytearray.
It returns the bytearray in the little endian format. It is easy to get the
big endian format, just do ba.reverse() on the returned object.
"""
import struct
if bytesize == 1:
return bytearray([val])
# Little endian followed by a format character.
template = "<%c"
if bytesize == 2:
fmt = template % 'h'
elif bytesize == 4:
fmt = template % 'i'
elif bytesize == 4:
fmt = template % 'q'
else:
return None
packed = struct.pack(fmt, val)
return bytearray(ord(x) for x in packed)
def bytearray_to_int(bytes, bytesize):
"""Utility function to convert a bytearray into an integer.
It interprets the bytearray in the little endian format. For a big endian
bytearray, just do ba.reverse() on the object before passing it in.
"""
import struct
if bytesize == 1:
return bytes[0]
# Little endian followed by a format character.
template = "<%c"
if bytesize == 2:
fmt = template % 'h'
elif bytesize == 4:
fmt = template % 'i'
elif bytesize == 4:
fmt = template % 'q'
else:
return None
unpacked = struct.unpack(fmt, str(bytes))
return unpacked[0]
# ==============================================================
# Get the description of an lldb object or None if not available
# ==============================================================
def get_description(obj, option=None):
"""Calls lldb_obj.GetDescription() and returns a string, or None.
For SBTarget, SBBreakpointLocation, and SBWatchpoint lldb objects, an extra
option can be passed in to describe the detailed level of description
desired:
o lldb.eDescriptionLevelBrief
o lldb.eDescriptionLevelFull
o lldb.eDescriptionLevelVerbose
"""
method = getattr(obj, 'GetDescription')
if not method:
return None
tuple = (lldb.SBTarget, lldb.SBBreakpointLocation, lldb.SBWatchpoint)
if isinstance(obj, tuple):
if option is None:
option = lldb.eDescriptionLevelBrief
stream = lldb.SBStream()
if option is None:
success = method(stream)
else:
success = method(stream, option)
if not success:
return None
return stream.GetData()
# =================================================
# Convert some enum value to its string counterpart
# =================================================
def state_type_to_str(enum):
"""Returns the stateType string given an enum."""
if enum == lldb.eStateInvalid:
return "invalid"
elif enum == lldb.eStateUnloaded:
return "unloaded"
elif enum == lldb.eStateConnected:
return "connected"
elif enum == lldb.eStateAttaching:
return "attaching"
elif enum == lldb.eStateLaunching:
return "launching"
elif enum == lldb.eStateStopped:
return "stopped"
elif enum == lldb.eStateRunning:
return "running"
elif enum == lldb.eStateStepping:
return "stepping"
elif enum == lldb.eStateCrashed:
return "crashed"
elif enum == lldb.eStateDetached:
return "detached"
elif enum == lldb.eStateExited:
return "exited"
elif enum == lldb.eStateSuspended:
return "suspended"
else:
raise Exception("Unknown StateType enum")
def stop_reason_to_str(enum):
"""Returns the stopReason string given an enum."""
if enum == lldb.eStopReasonInvalid:
return "invalid"
elif enum == lldb.eStopReasonNone:
return "none"
elif enum == lldb.eStopReasonTrace:
return "trace"
elif enum == lldb.eStopReasonBreakpoint:
return "breakpoint"
elif enum == lldb.eStopReasonWatchpoint:
return "watchpoint"
elif enum == lldb.eStopReasonSignal:
return "signal"
elif enum == lldb.eStopReasonException:
return "exception"
elif enum == lldb.eStopReasonPlanComplete:
return "plancomplete"
elif enum == lldb.eStopReasonThreadExiting:
return "threadexiting"
else:
raise Exception("Unknown StopReason enum")
def symbol_type_to_str(enum):
"""Returns the symbolType string given an enum."""
if enum == lldb.eSymbolTypeInvalid:
return "invalid"
elif enum == lldb.eSymbolTypeAbsolute:
return "absolute"
elif enum == lldb.eSymbolTypeCode:
return "code"
elif enum == lldb.eSymbolTypeData:
return "data"
elif enum == lldb.eSymbolTypeTrampoline:
return "trampoline"
elif enum == lldb.eSymbolTypeRuntime:
return "runtime"
elif enum == lldb.eSymbolTypeException:
return "exception"
elif enum == lldb.eSymbolTypeSourceFile:
return "sourcefile"
elif enum == lldb.eSymbolTypeHeaderFile:
return "headerfile"
elif enum == lldb.eSymbolTypeObjectFile:
return "objectfile"
elif enum == lldb.eSymbolTypeCommonBlock:
return "commonblock"
elif enum == lldb.eSymbolTypeBlock:
return "block"
elif enum == lldb.eSymbolTypeLocal:
return "local"
elif enum == lldb.eSymbolTypeParam:
return "param"
elif enum == lldb.eSymbolTypeVariable:
return "variable"
elif enum == lldb.eSymbolTypeVariableType:
return "variabletype"
elif enum == lldb.eSymbolTypeLineEntry:
return "lineentry"
elif enum == lldb.eSymbolTypeLineHeader:
return "lineheader"
elif enum == lldb.eSymbolTypeScopeBegin:
return "scopebegin"
elif enum == lldb.eSymbolTypeScopeEnd:
return "scopeend"
elif enum == lldb.eSymbolTypeAdditional:
return "additional"
elif enum == lldb.eSymbolTypeCompiler:
return "compiler"
elif enum == lldb.eSymbolTypeInstrumentation:
return "instrumentation"
elif enum == lldb.eSymbolTypeUndefined:
return "undefined"
def value_type_to_str(enum):
"""Returns the valueType string given an enum."""
if enum == lldb.eValueTypeInvalid:
return "invalid"
elif enum == lldb.eValueTypeVariableGlobal:
return "global_variable"
elif enum == lldb.eValueTypeVariableStatic:
return "static_variable"
elif enum == lldb.eValueTypeVariableArgument:
return "argument_variable"
elif enum == lldb.eValueTypeVariableLocal:
return "local_variable"
elif enum == lldb.eValueTypeRegister:
return "register"
elif enum == lldb.eValueTypeRegisterSet:
return "register_set"
elif enum == lldb.eValueTypeConstResult:
return "constant_result"
else:
raise Exception("Unknown ValueType enum")
# ==================================================
# Get stopped threads due to each stop reason.
# ==================================================
def sort_stopped_threads(process,
breakpoint_threads=None,
crashed_threads=None,
watchpoint_threads=None,
signal_threads=None,
exiting_threads=None,
other_threads=None):
""" Fills array *_threads with threads stopped for the corresponding stop
reason.
"""
for lst in [breakpoint_threads,
watchpoint_threads,
signal_threads,
exiting_threads,
other_threads]:
if lst is not None:
lst[:] = []
for thread in process:
dispatched = False
for (reason, list) in [(lldb.eStopReasonBreakpoint, breakpoint_threads),
(lldb.eStopReasonException, crashed_threads),
(lldb.eStopReasonWatchpoint, watchpoint_threads),
(lldb.eStopReasonSignal, signal_threads),
(lldb.eStopReasonThreadExiting, exiting_threads),
(None, other_threads)]:
if not dispatched and list is not None:
if thread.GetStopReason() == reason or reason is None:
list.append(thread)
dispatched = True
# ==================================================
# Utility functions for setting breakpoints
# ==================================================
def run_break_set_by_file_and_line(
test,
file_name,
line_number,
extra_options=None,
num_expected_locations=1,
loc_exact=False,
module_name=None):
"""Set a breakpoint by file and line, returning the breakpoint number.
If extra_options is not None, then we append it to the breakpoint set command.
If num_expected_locations is -1 we check that we got AT LEAST one location, otherwise we check that num_expected_locations equals the number of locations.
If loc_exact is true, we check that there is one location, and that location must be at the input file and line number."""
if file_name is None:
command = 'breakpoint set -l %d' % (line_number)
else:
command = 'breakpoint set -f "%s" -l %d' % (file_name, line_number)
if module_name:
command += " --shlib '%s'" % (module_name)
if extra_options:
command += " " + extra_options
break_results = run_break_set_command(test, command)
if num_expected_locations == 1 and loc_exact:
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations,
file_name=file_name,
line_number=line_number,
module_name=module_name)
else:
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations)
return get_bpno_from_match(break_results)
def run_break_set_by_symbol(
test,
symbol,
extra_options=None,
num_expected_locations=-1,
sym_exact=False,
module_name=None):
"""Set a breakpoint by symbol name. Common options are the same as run_break_set_by_file_and_line.
If sym_exact is true, then the output symbol must match the input exactly, otherwise we do a substring match."""
command = 'breakpoint set -n "%s"' % (symbol)
if module_name:
command += " --shlib '%s'" % (module_name)
if extra_options:
command += " " + extra_options
break_results = run_break_set_command(test, command)
if num_expected_locations == 1 and sym_exact:
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations,
symbol_name=symbol,
module_name=module_name)
else:
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations)
return get_bpno_from_match(break_results)
def run_break_set_by_selector(
test,
selector,
extra_options=None,
num_expected_locations=-1,
module_name=None):
"""Set a breakpoint by selector. Common options are the same as run_break_set_by_file_and_line."""
command = 'breakpoint set -S "%s"' % (selector)
if module_name:
command += ' --shlib "%s"' % (module_name)
if extra_options:
command += " " + extra_options
break_results = run_break_set_command(test, command)
if num_expected_locations == 1:
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations,
symbol_name=selector,
symbol_match_exact=False,
module_name=module_name)
else:
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations)
return get_bpno_from_match(break_results)
def run_break_set_by_regexp(
test,
regexp,
extra_options=None,
num_expected_locations=-1):
"""Set a breakpoint by regular expression match on symbol name. Common options are the same as run_break_set_by_file_and_line."""
command = 'breakpoint set -r "%s"' % (regexp)
if extra_options:
command += " " + extra_options
break_results = run_break_set_command(test, command)
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations)
return get_bpno_from_match(break_results)
def run_break_set_by_source_regexp(
test,
regexp,
extra_options=None,
num_expected_locations=-1):
"""Set a breakpoint by source regular expression. Common options are the same as run_break_set_by_file_and_line."""
command = 'breakpoint set -p "%s"' % (regexp)
if extra_options:
command += " " + extra_options
break_results = run_break_set_command(test, command)
check_breakpoint_result(
test,
break_results,
num_locations=num_expected_locations)
return get_bpno_from_match(break_results)
def run_break_set_command(test, command):
"""Run the command passed in - it must be some break set variant - and analyze the result.
Returns a dictionary of information gleaned from the command-line results.
Will assert if the breakpoint setting fails altogether.
Dictionary will contain:
bpno - breakpoint of the newly created breakpoint, -1 on error.
num_locations - number of locations set for the breakpoint.
If there is only one location, the dictionary MAY contain:
file - source file name
line_no - source line number
symbol - symbol name
inline_symbol - inlined symbol name
offset - offset from the original symbol
module - module
address - address at which the breakpoint was set."""
patterns = [
r"^Breakpoint (?P<bpno>[0-9]+): (?P<num_locations>[0-9]+) locations\.$",
r"^Breakpoint (?P<bpno>[0-9]+): (?P<num_locations>no) locations \(pending\)\.",
r"^Breakpoint (?P<bpno>[0-9]+): where = (?P<module>.*)`(?P<symbol>[+\-]{0,1}[^+]+)( \+ (?P<offset>[0-9]+)){0,1}( \[inlined\] (?P<inline_symbol>.*)){0,1} at (?P<file>[^:]+):(?P<line_no>[0-9]+), address = (?P<address>0x[0-9a-fA-F]+)$",
r"^Breakpoint (?P<bpno>[0-9]+): where = (?P<module>.*)`(?P<symbol>.*)( \+ (?P<offset>[0-9]+)){0,1}, address = (?P<address>0x[0-9a-fA-F]+)$"]
match_object = test.match(command, patterns)
break_results = match_object.groupdict()
# We always insert the breakpoint number, setting it to -1 if we couldn't find it
# Also, make sure it gets stored as an integer.
if not 'bpno' in break_results:
break_results['bpno'] = -1
else:
break_results['bpno'] = int(break_results['bpno'])
# We always insert the number of locations
# If ONE location is set for the breakpoint, then the output doesn't mention locations, but it has to be 1...
# We also make sure it is an integer.
if not 'num_locations' in break_results:
num_locations = 1
else:
num_locations = break_results['num_locations']
if num_locations == 'no':
num_locations = 0
else:
num_locations = int(break_results['num_locations'])
break_results['num_locations'] = num_locations
if 'line_no' in break_results:
break_results['line_no'] = int(break_results['line_no'])
return break_results
def get_bpno_from_match(break_results):
return int(break_results['bpno'])
def check_breakpoint_result(
test,
break_results,
file_name=None,
line_number=-1,
symbol_name=None,
symbol_match_exact=True,
module_name=None,
offset=-1,
num_locations=-1):
out_num_locations = break_results['num_locations']
if num_locations == -1:
test.assertTrue(out_num_locations > 0,
"Expecting one or more locations, got none.")
else:
test.assertTrue(
num_locations == out_num_locations,
"Expecting %d locations, got %d." %
(num_locations,
out_num_locations))
if file_name:
out_file_name = ""
if 'file' in break_results:
out_file_name = break_results['file']
test.assertTrue(
file_name == out_file_name,
"Breakpoint file name '%s' doesn't match resultant name '%s'." %
(file_name,
out_file_name))
if line_number != -1:
out_file_line = -1
if 'line_no' in break_results:
out_line_number = break_results['line_no']
test.assertTrue(
line_number == out_line_number,
"Breakpoint line number %s doesn't match resultant line %s." %
(line_number,
out_line_number))
if symbol_name:
out_symbol_name = ""
# Look first for the inlined symbol name, otherwise use the symbol
# name:
if 'inline_symbol' in break_results and break_results['inline_symbol']:
out_symbol_name = break_results['inline_symbol']
elif 'symbol' in break_results:
out_symbol_name = break_results['symbol']
if symbol_match_exact:
test.assertTrue(
symbol_name == out_symbol_name,
"Symbol name '%s' doesn't match resultant symbol '%s'." %
(symbol_name,
out_symbol_name))
else:
test.assertTrue(
out_symbol_name.find(symbol_name) != -
1,
"Symbol name '%s' isn't in resultant symbol '%s'." %
(symbol_name,
out_symbol_name))
if module_name:
out_nodule_name = None
if 'module' in break_results:
out_module_name = break_results['module']
test.assertTrue(
module_name.find(out_module_name) != -
1,
"Symbol module name '%s' isn't in expected module name '%s'." %
(out_module_name,
module_name))
# ==================================================
# Utility functions related to Threads and Processes
# ==================================================
def get_stopped_threads(process, reason):
"""Returns the thread(s) with the specified stop reason in a list.
The list can be empty if no such thread exists.
"""
threads = []
for t in process:
if t.GetStopReason() == reason:
threads.append(t)
return threads
def get_stopped_thread(process, reason):
"""A convenience function which returns the first thread with the given stop
reason or None.
Example usages:
1. Get the stopped thread due to a breakpoint condition
...
from lldbutil import get_stopped_thread
thread = get_stopped_thread(process, lldb.eStopReasonPlanComplete)
self.assertTrue(thread.IsValid(), "There should be a thread stopped due to breakpoint condition")
...
2. Get the thread stopped due to a breakpoint
...
from lldbutil import get_stopped_thread
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(thread.IsValid(), "There should be a thread stopped due to breakpoint")
...
"""
threads = get_stopped_threads(process, reason)
if len(threads) == 0:
return None
return threads[0]
def get_threads_stopped_at_breakpoint(process, bkpt):
""" For a stopped process returns the thread stopped at the breakpoint passed in bkpt"""
stopped_threads = []
threads = []
stopped_threads = get_stopped_threads(process, lldb.eStopReasonBreakpoint)
if len(stopped_threads) == 0:
return threads
for thread in stopped_threads:
# Make sure we've hit our breakpoint...
break_id = thread.GetStopReasonDataAtIndex(0)
if break_id == bkpt.GetID():
threads.append(thread)
return threads
def continue_to_breakpoint(process, bkpt):
""" Continues the process, if it stops, returns the threads stopped at bkpt; otherwise, returns None"""
process.Continue()
if process.GetState() != lldb.eStateStopped:
return None
else:
return get_threads_stopped_at_breakpoint(process, bkpt)
def get_caller_symbol(thread):
"""
Returns the symbol name for the call site of the leaf function.
"""
depth = thread.GetNumFrames()
if depth <= 1:
return None
caller = thread.GetFrameAtIndex(1).GetSymbol()
if caller:
return caller.GetName()
else:
return None
def get_function_names(thread):
"""
Returns a sequence of function names from the stack frames of this thread.
"""
def GetFuncName(i):
return thread.GetFrameAtIndex(i).GetFunctionName()
return [GetFuncName(i) for i in range(thread.GetNumFrames())]
def get_symbol_names(thread):
"""
Returns a sequence of symbols for this thread.
"""
def GetSymbol(i):
return thread.GetFrameAtIndex(i).GetSymbol().GetName()
return [GetSymbol(i) for i in range(thread.GetNumFrames())]
def get_pc_addresses(thread):
"""
Returns a sequence of pc addresses for this thread.
"""
def GetPCAddress(i):
return thread.GetFrameAtIndex(i).GetPCAddress()
return [GetPCAddress(i) for i in range(thread.GetNumFrames())]
def get_filenames(thread):
"""
Returns a sequence of file names from the stack frames of this thread.
"""
def GetFilename(i):
return thread.GetFrameAtIndex(
i).GetLineEntry().GetFileSpec().GetFilename()
return [GetFilename(i) for i in range(thread.GetNumFrames())]
def get_line_numbers(thread):
"""
Returns a sequence of line numbers from the stack frames of this thread.
"""
def GetLineNumber(i):
return thread.GetFrameAtIndex(i).GetLineEntry().GetLine()
return [GetLineNumber(i) for i in range(thread.GetNumFrames())]
def get_module_names(thread):
"""
Returns a sequence of module names from the stack frames of this thread.
"""
def GetModuleName(i):
return thread.GetFrameAtIndex(
i).GetModule().GetFileSpec().GetFilename()
return [GetModuleName(i) for i in range(thread.GetNumFrames())]
def get_stack_frames(thread):
"""
Returns a sequence of stack frames for this thread.
"""
def GetStackFrame(i):
return thread.GetFrameAtIndex(i)
return [GetStackFrame(i) for i in range(thread.GetNumFrames())]
def print_stacktrace(thread, string_buffer=False):
"""Prints a simple stack trace of this thread."""
output = io.StringIO() if string_buffer else sys.stdout
target = thread.GetProcess().GetTarget()
depth = thread.GetNumFrames()
mods = get_module_names(thread)
funcs = get_function_names(thread)
symbols = get_symbol_names(thread)
files = get_filenames(thread)
lines = get_line_numbers(thread)
addrs = get_pc_addresses(thread)
if thread.GetStopReason() != lldb.eStopReasonInvalid:
desc = "stop reason=" + stop_reason_to_str(thread.GetStopReason())
else:
desc = ""
print("Stack trace for thread id={0:#x} name={1} queue={2} ".format(
thread.GetThreadID(), thread.GetName(), thread.GetQueueName()) + desc, file=output)
for i in range(depth):
frame = thread.GetFrameAtIndex(i)
function = frame.GetFunction()
load_addr = addrs[i].GetLoadAddress(target)
if not function:
file_addr = addrs[i].GetFileAddress()
start_addr = frame.GetSymbol().GetStartAddress().GetFileAddress()
symbol_offset = file_addr - start_addr
print(" frame #{num}: {addr:#016x} {mod}`{symbol} + {offset}".format(
num=i, addr=load_addr, mod=mods[i], symbol=symbols[i], offset=symbol_offset), file=output)
else:
print(" frame #{num}: {addr:#016x} {mod}`{func} at {file}:{line} {args}".format(
num=i, addr=load_addr, mod=mods[i], func='%s [inlined]' %
funcs[i] if frame.IsInlined() else funcs[i], file=files[i], line=lines[i], args=get_args_as_string(
frame, showFuncName=False) if not frame.IsInlined() else '()'), file=output)
if string_buffer:
return output.getvalue()
def print_stacktraces(process, string_buffer=False):
"""Prints the stack traces of all the threads."""
output = io.StringIO() if string_buffer else sys.stdout
print("Stack traces for " + str(process), file=output)
for thread in process:
print(print_stacktrace(thread, string_buffer=True), file=output)
if string_buffer:
return output.getvalue()
# ===================================
# Utility functions related to Frames
# ===================================
def get_parent_frame(frame):
"""
Returns the parent frame of the input frame object; None if not available.
"""
thread = frame.GetThread()
parent_found = False
for f in thread:
if parent_found:
return f
if f.GetFrameID() == frame.GetFrameID():
parent_found = True
# If we reach here, no parent has been found, return None.
return None
def get_args_as_string(frame, showFuncName=True):
"""
Returns the args of the input frame object as a string.
"""
# arguments => True
# locals => False
# statics => False
# in_scope_only => True
vars = frame.GetVariables(True, False, False, True) # type of SBValueList
args = [] # list of strings
for var in vars:
args.append("(%s)%s=%s" % (var.GetTypeName(),
var.GetName(),
var.GetValue()))
if frame.GetFunction():
name = frame.GetFunction().GetName()
elif frame.GetSymbol():
name = frame.GetSymbol().GetName()
else:
name = ""
if showFuncName:
return "%s(%s)" % (name, ", ".join(args))
else:
return "(%s)" % (", ".join(args))
def print_registers(frame, string_buffer=False):
"""Prints all the register sets of the frame."""
output = io.StringIO() if string_buffer else sys.stdout
print("Register sets for " + str(frame), file=output)
registerSet = frame.GetRegisters() # Return type of SBValueList.
print("Frame registers (size of register set = %d):" % registerSet.GetSize(
), file=output)
for value in registerSet:
#print >> output, value
print("%s (number of children = %d):" % (
value.GetName(), value.GetNumChildren()), file=output)
for child in value:
print("Name: %s, Value: %s" % (
child.GetName(), child.GetValue()), file=output)
if string_buffer:
return output.getvalue()
def get_registers(frame, kind):
"""Returns the registers given the frame and the kind of registers desired.
Returns None if there's no such kind.
"""
registerSet = frame.GetRegisters() # Return type of SBValueList.
for value in registerSet:
if kind.lower() in value.GetName().lower():
return value
return None
def get_GPRs(frame):
"""Returns the general purpose registers of the frame as an SBValue.
The returned SBValue object is iterable. An example:
...
from lldbutil import get_GPRs
regs = get_GPRs(frame)
for reg in regs:
print "%s => %s" % (reg.GetName(), reg.GetValue())
...
"""
return get_registers(frame, "general purpose")
def get_FPRs(frame):
"""Returns the floating point registers of the frame as an SBValue.
The returned SBValue object is iterable. An example:
...
from lldbutil import get_FPRs
regs = get_FPRs(frame)
for reg in regs:
print "%s => %s" % (reg.GetName(), reg.GetValue())
...
"""
return get_registers(frame, "floating point")
def get_ESRs(frame):
"""Returns the exception state registers of the frame as an SBValue.
The returned SBValue object is iterable. An example:
...
from lldbutil import get_ESRs
regs = get_ESRs(frame)
for reg in regs:
print "%s => %s" % (reg.GetName(), reg.GetValue())
...
"""
return get_registers(frame, "exception state")
# ======================================
# Utility classes/functions for SBValues
# ======================================
class BasicFormatter(object):
"""The basic formatter inspects the value object and prints the value."""
def format(self, value, buffer=None, indent=0):
if not buffer:
output = io.StringIO()
else:
output = buffer
# If there is a summary, it suffices.
val = value.GetSummary()
# Otherwise, get the value.
if val is None:
val = value.GetValue()
if val is None and value.GetNumChildren() > 0:
val = "%s (location)" % value.GetLocation()
print("{indentation}({type}) {name} = {value}".format(
indentation=' ' * indent,
type=value.GetTypeName(),
name=value.GetName(),
value=val), file=output)
return output.getvalue()
class ChildVisitingFormatter(BasicFormatter):
"""The child visiting formatter prints the value and its immediate children.
The constructor takes a keyword arg: indent_child, which defaults to 2.
"""
def __init__(self, indent_child=2):
"""Default indentation of 2 SPC's for the children."""
self.cindent = indent_child
def format(self, value, buffer=None):
if not buffer:
output = io.StringIO()
else:
output = buffer
BasicFormatter.format(self, value, buffer=output)
for child in value:
BasicFormatter.format(
self, child, buffer=output, indent=self.cindent)
return output.getvalue()
class RecursiveDecentFormatter(BasicFormatter):
"""The recursive decent formatter prints the value and the decendents.
The constructor takes two keyword args: indent_level, which defaults to 0,
and indent_child, which defaults to 2. The current indentation level is
determined by indent_level, while the immediate children has an additional
indentation by inden_child.
"""
def __init__(self, indent_level=0, indent_child=2):
self.lindent = indent_level
self.cindent = indent_child
def format(self, value, buffer=None):
if not buffer:
output = io.StringIO()
else:
output = buffer
BasicFormatter.format(self, value, buffer=output, indent=self.lindent)
new_indent = self.lindent + self.cindent
for child in value:
if child.GetSummary() is not None:
BasicFormatter.format(
self, child, buffer=output, indent=new_indent)
else:
if child.GetNumChildren() > 0:
rdf = RecursiveDecentFormatter(indent_level=new_indent)
rdf.format(child, buffer=output)
else:
BasicFormatter.format(
self, child, buffer=output, indent=new_indent)
return output.getvalue()
|
|
# -*- coding: utf-8 -*-
r"""
The :mod:`pygsp.reduction` module implements functionalities for the reduction
of graphs' vertex set while keeping the graph structure.
.. autosummary::
tree_multiresolution
graph_multiresolution
kron_reduction
pyramid_analysis
pyramid_synthesis
interpolate
graph_sparsify
"""
import numpy as np
from scipy import sparse, stats
from scipy.sparse import linalg
from pygsp import graphs, filters, utils
logger = utils.build_logger(__name__)
def _analysis(g, s, **kwargs):
# TODO: that is the legacy analysis method.
s = g.filter(s, **kwargs)
while s.ndim < 3:
s = np.expand_dims(s, 1)
return s.swapaxes(1, 2).reshape(-1, s.shape[1], order='F')
def graph_sparsify(M, epsilon, maxiter=10, seed=None):
r"""Sparsify a graph (with Spielman-Srivastava).
Parameters
----------
M : Graph or sparse matrix
Graph structure or a Laplacian matrix
epsilon : float
Sparsification parameter, which must be between ``1/sqrt(N)`` and 1.
maxiter : int, optional
Maximum number of iterations.
seed : {None, int, RandomState, Generator}, optional
Seed for the random number generator (for reproducible sparsification).
Returns
-------
Mnew : Graph or sparse matrix
New graph structure or sparse matrix
Examples
--------
>>> from pygsp import reduction
>>> from matplotlib import pyplot as plt
>>> G = graphs.Sensor(100, k=20, distributed=True, seed=1)
>>> Gs = reduction.graph_sparsify(G, epsilon=0.4, seed=1)
>>> fig, axes = plt.subplots(1, 2)
>>> _ = G.plot(ax=axes[0], title='original')
>>> Gs.coords = G.coords
>>> _ = Gs.plot(ax=axes[1], title='sparsified')
References
----------
See :cite:`spielman2011graph`, :cite:`rudelson1999random` and :cite:`rudelson2007sampling`.
for more informations
"""
# Test the input parameters
if isinstance(M, graphs.Graph):
if not M.lap_type == 'combinatorial':
raise NotImplementedError
L = M.L
else:
L = M
N = np.shape(L)[0]
if not 1./np.sqrt(N) <= epsilon < 1:
raise ValueError('GRAPH_SPARSIFY: Epsilon out of required range')
# Not sparse
resistance_distances = utils.resistance_distance(L).toarray()
# Get the Weight matrix
if isinstance(M, graphs.Graph):
W = M.W
else:
W = np.diag(L.diagonal()) - L.toarray()
W[W < 1e-10] = 0
W = sparse.coo_matrix(W)
W.data[W.data < 1e-10] = 0
W = W.tocsc()
W.eliminate_zeros()
start_nodes, end_nodes, weights = sparse.find(sparse.tril(W))
# Calculate the new weights.
weights = np.maximum(0, weights)
Re = np.maximum(0, resistance_distances[start_nodes, end_nodes])
Pe = weights * Re
Pe = Pe / np.sum(Pe)
dist = stats.rv_discrete(values=(np.arange(len(Pe)), Pe), seed=seed)
for i in range(maxiter):
# Rudelson, 1996 Random Vectors in the Isotropic Position
# (too hard to figure out actual C0)
C0 = 1 / 30.
# Rudelson and Vershynin, 2007, Thm. 3.1
C = 4 * C0
q = round(N * np.log(N) * 9 * C**2 / (epsilon**2))
results = dist.rvs(size=int(q))
spin_counts = stats.itemfreq(results).astype(int)
per_spin_weights = weights / (q * Pe)
counts = np.zeros(np.shape(weights)[0])
counts[spin_counts[:, 0]] = spin_counts[:, 1]
new_weights = counts * per_spin_weights
sparserW = sparse.csc_matrix((new_weights, (start_nodes, end_nodes)),
shape=(N, N))
sparserW = sparserW + sparserW.T
sparserL = sparse.diags(sparserW.diagonal(), 0) - sparserW
if graphs.Graph(sparserW).is_connected():
break
elif i == maxiter - 1:
logger.warning('Despite attempts to reduce epsilon, sparsified graph is disconnected')
else:
epsilon -= (epsilon - 1/np.sqrt(N)) / 2.
if isinstance(M, graphs.Graph):
sparserW = sparse.diags(sparserL.diagonal(), 0) - sparserL
if not M.is_directed():
sparserW = (sparserW + sparserW.T) / 2.
Mnew = graphs.Graph(sparserW)
#M.copy_graph_attributes(Mnew)
else:
Mnew = sparse.lil_matrix(sparserL)
return Mnew
def interpolate(G, f_subsampled, keep_inds, order=100, reg_eps=0.005, **kwargs):
r"""Interpolate a graph signal.
Parameters
----------
G : Graph
f_subsampled : ndarray
A graph signal on the graph G.
keep_inds : ndarray
List of indices on which the signal is sampled.
order : int
Degree of the Chebyshev approximation (default = 100).
reg_eps : float
The regularized graph Laplacian is $\bar{L}=L+\epsilon I$.
A smaller epsilon may lead to better regularization,
but will also require a higher order Chebyshev approximation.
Returns
-------
f_interpolated : ndarray
Interpolated graph signal on the full vertex set of G.
References
----------
See :cite:`pesenson2009variational`
"""
L_reg = G.L + reg_eps * sparse.eye(G.N)
K_reg = getattr(G.mr, 'K_reg', kron_reduction(L_reg, keep_inds))
green_kernel = getattr(G.mr, 'green_kernel',
filters.Filter(G, lambda x: 1. / (reg_eps + x)))
alpha = K_reg.dot(f_subsampled)
try:
Nv = np.shape(f_subsampled)[1]
f_interpolated = np.zeros((G.N, Nv))
except IndexError:
f_interpolated = np.zeros((G.N))
f_interpolated[keep_inds] = alpha
return _analysis(green_kernel, f_interpolated, order=order, **kwargs)
def graph_multiresolution(G, levels, sparsify=True, sparsify_eps=None,
downsampling_method='largest_eigenvector',
reduction_method='kron', compute_full_eigen=False,
reg_eps=0.005):
r"""Compute a pyramid of graphs (by Kron reduction).
'graph_multiresolution(G,levels)' computes a multiresolution of
graph by repeatedly downsampling and performing graph reduction. The
default downsampling method is the largest eigenvector method based on
the polarity of the components of the eigenvector associated with the
largest graph Laplacian eigenvalue. The default graph reduction method
is Kron reduction followed by a graph sparsification step.
*param* is a structure of optional parameters.
Parameters
----------
G : Graph structure
The graph to reduce.
levels : int
Number of level of decomposition
lambd : float
Stability parameter. It adds self loop to the graph to give the
algorithm some stability (default = 0.025). [UNUSED?!]
sparsify : bool
To perform a spectral sparsification step immediately after
the graph reduction (default is True).
sparsify_eps : float
Parameter epsilon used in the spectral sparsification
(default is min(10/sqrt(G.N),.3)).
downsampling_method: string
The graph downsampling method (default is 'largest_eigenvector').
reduction_method : string
The graph reduction method (default is 'kron')
compute_full_eigen : bool
To also compute the graph Laplacian eigenvalues and eigenvectors
for every graph in the multiresolution sequence (default is False).
reg_eps : float
The regularized graph Laplacian is :math:`\bar{L}=L+\epsilon I`.
A smaller epsilon may lead to better regularization, but will also
require a higher order Chebyshev approximation. (default is 0.005)
Returns
-------
Gs : list
A list of graph layers.
Examples
--------
>>> from pygsp import reduction
>>> levels = 5
>>> G = graphs.Sensor(N=512)
>>> G.compute_fourier_basis()
>>> Gs = reduction.graph_multiresolution(G, levels, sparsify=False)
>>> for idx in range(levels):
... fig, ax = Gs[idx].plot(title='Reduction level: {}'.format(idx))
"""
if sparsify_eps is None:
sparsify_eps = min(10. / np.sqrt(G.N), 0.3)
if compute_full_eigen:
G.compute_fourier_basis()
else:
G.estimate_lmax()
Gs = [G]
Gs[0].mr = {'idx': np.arange(G.N), 'orig_idx': np.arange(G.N)}
for i in range(levels):
if downsampling_method == 'largest_eigenvector':
if Gs[i]._U is not None:
V = Gs[i].U[:, -1]
else:
V = linalg.eigs(Gs[i].L, 1)[1][:, 0]
V *= np.sign(V[0])
ind = np.nonzero(V >= 0)[0]
else:
raise NotImplementedError('Unknown graph downsampling method.')
if reduction_method == 'kron':
Gs.append(kron_reduction(Gs[i], ind))
else:
raise NotImplementedError('Unknown graph reduction method.')
if sparsify and Gs[i+1].N > 2:
Gs[i+1] = graph_sparsify(Gs[i+1], min(max(sparsify_eps, 2. / np.sqrt(Gs[i+1].N)), 1.))
# TODO : Make in place modifications instead!
if compute_full_eigen:
Gs[i+1].compute_fourier_basis()
else:
Gs[i+1].estimate_lmax()
Gs[i+1].mr = {'idx': ind, 'orig_idx': Gs[i].mr['orig_idx'][ind], 'level': i}
L_reg = Gs[i].L + reg_eps * sparse.eye(Gs[i].N)
Gs[i].mr['K_reg'] = kron_reduction(L_reg, ind)
Gs[i].mr['green_kernel'] = filters.Filter(Gs[i], lambda x: 1./(reg_eps + x))
return Gs
def kron_reduction(G, ind):
r"""Compute the Kron reduction.
This function perform the Kron reduction of the weight matrix in the
graph *G*, with boundary nodes labeled by *ind*. This function will
create a new graph with a weight matrix Wnew that contain only boundary
nodes and is computed as the Schur complement of the original matrix
with respect to the selected indices.
Parameters
----------
G : Graph or sparse matrix
Graph structure or weight matrix
ind : list
indices of the nodes to keep
Returns
-------
Gnew : Graph or sparse matrix
New graph structure or weight matrix
References
----------
See :cite:`dorfler2013kron`
"""
if isinstance(G, graphs.Graph):
if G.lap_type != 'combinatorial':
msg = 'Unknown reduction for {} Laplacian.'.format(G.lap_type)
raise NotImplementedError(msg)
if G.is_directed():
msg = 'This method only work for undirected graphs.'
raise NotImplementedError(msg)
L = G.L
else:
L = G
N = np.shape(L)[0]
ind_comp = np.setdiff1d(np.arange(N, dtype=int), ind)
L_red = L[np.ix_(ind, ind)]
L_in_out = L[np.ix_(ind, ind_comp)]
L_out_in = L[np.ix_(ind_comp, ind)].tocsc()
L_comp = L[np.ix_(ind_comp, ind_comp)].tocsc()
Lnew = L_red - L_in_out.dot(linalg.spsolve(L_comp, L_out_in))
# Make the laplacian symmetric if it is almost symmetric!
if np.abs(Lnew - Lnew.T).sum() < np.spacing(1) * np.abs(Lnew).sum():
Lnew = (Lnew + Lnew.T) / 2.
if isinstance(G, graphs.Graph):
# Suppress the diagonal ? This is a good question?
Wnew = sparse.diags(Lnew.diagonal(), 0) - Lnew
Snew = Lnew.diagonal() - np.ravel(Wnew.sum(0))
if np.linalg.norm(Snew, 2) >= np.spacing(1000):
Wnew = Wnew + sparse.diags(Snew, 0)
# Removing diagonal for stability
Wnew = Wnew - Wnew.diagonal()
coords = G.coords[ind, :] if len(G.coords.shape) else np.ndarray(None)
Gnew = graphs.Graph(Wnew, coords=coords, lap_type=G.lap_type,
plotting=G.plotting)
else:
Gnew = Lnew
return Gnew
def pyramid_analysis(Gs, f, **kwargs):
r"""Compute the graph pyramid transform coefficients.
Parameters
----------
Gs : list of graphs
A multiresolution sequence of graph structures.
f : ndarray
Graph signal to analyze.
h_filters : list
A list of filter that will be used for the analysis and sythesis operator.
If only one filter is given, it will be used for all levels.
Default is h(x) = 1 / (2x+1)
Returns
-------
ca : ndarray
Coarse approximation at each level
pe : ndarray
Prediction error at each level
h_filters : list
Graph spectral filters applied
References
----------
See :cite:`shuman2013framework` and :cite:`pesenson2009variational`.
"""
if np.shape(f)[0] != Gs[0].N:
raise ValueError("PYRAMID ANALYSIS: The signal to analyze should have the same dimension as the first graph.")
levels = len(Gs) - 1
# check if the type of filters is right.
h_filters = kwargs.pop('h_filters', lambda x: 1. / (2*x+1))
if not isinstance(h_filters, list):
if hasattr(h_filters, '__call__'):
logger.warning('Converting filters into a list.')
h_filters = [h_filters]
else:
logger.error('Filters must be a list of functions.')
if len(h_filters) == 1:
h_filters = h_filters * levels
elif len(h_filters) != levels:
message = 'The number of filters must be one or equal to {}.'.format(levels)
raise ValueError(message)
ca = [f]
pe = []
for i in range(levels):
# Low pass the signal
s_low = _analysis(filters.Filter(Gs[i], h_filters[i]), ca[i], **kwargs)
# Keep only the coefficient on the selected nodes
ca.append(s_low[Gs[i+1].mr['idx']])
# Compute prediction
s_pred = interpolate(Gs[i], ca[i+1], Gs[i+1].mr['idx'], **kwargs)
# Compute errors
pe.append(ca[i] - s_pred)
return ca, pe
def pyramid_synthesis(Gs, cap, pe, order=30, **kwargs):
r"""Synthesize a signal from its pyramid coefficients.
Parameters
----------
Gs : Array of Graphs
A multiresolution sequence of graph structures.
cap : ndarray
Coarsest approximation of the original signal.
pe : ndarray
Prediction error at each level.
use_exact : bool
To use exact graph spectral filtering instead of the Chebyshev approximation.
order : int
Degree of the Chebyshev approximation (default=30).
least_squares : bool
To use the least squares synthesis (default=False).
h_filters : ndarray
The filters used in the analysis operator.
These are required for least squares synthesis, but not for the direct synthesis method.
use_landweber : bool
To use the Landweber iteration approximation in the least squares synthesis.
reg_eps : float
Interpolation parameter.
landweber_its : int
Number of iterations in the Landweber approximation for least squares synthesis.
landweber_tau : float
Parameter for the Landweber iteration.
Returns
-------
reconstruction : ndarray
The reconstructed signal.
ca : ndarray
Coarse approximations at each level
"""
least_squares = bool(kwargs.pop('least_squares', False))
def_ul = Gs[0].N > 3000 or Gs[0]._e is None or Gs[0]._U is None
use_landweber = bool(kwargs.pop('use_landweber', def_ul))
reg_eps = float(kwargs.get('reg_eps', 0.005))
if least_squares and 'h_filters' not in kwargs:
ValueError('h-filters not provided.')
levels = len(Gs) - 1
if len(pe) != levels:
ValueError('Gs and pe have different shapes.')
ca = [cap]
# Reconstruct each level
for i in range(levels):
if not least_squares:
s_pred = interpolate(Gs[levels - i - 1], ca[i], Gs[levels - i].mr['idx'],
order=order, reg_eps=reg_eps, **kwargs)
ca.append(s_pred + pe[levels - i - 1])
else:
ca.append(_pyramid_single_interpolation(Gs[levels - i - 1], ca[i],
pe[levels - i - 1], h_filters[levels - i - 1],
use_landweber=use_landweber, **kwargs))
ca.reverse()
reconstruction = ca[0]
return reconstruction, ca
def _pyramid_single_interpolation(G, ca, pe, keep_inds, h_filter, **kwargs):
r"""Synthesize a single level of the graph pyramid transform.
Parameters
----------
G : Graph
Graph structure on which the signal resides.
ca : ndarray
Coarse approximation of the signal on a reduced graph.
pe : ndarray
Prediction error that was made when forming the current coarse approximation.
keep_inds : ndarray
The indices of the vertices to keep when downsampling the graph and signal.
h_filter : lambda expression
The filter in use at this level.
use_landweber : bool
To use the Landweber iteration approximation in the least squares synthesis.
Default is False.
reg_eps : float
Interpolation parameter. Default is 0.005.
landweber_its : int
Number of iterations in the Landweber approximation for least squares synthesis.
Default is 50.
landweber_tau : float
Parameter for the Landweber iteration. Default is 1.
Returns
-------
finer_approx :
Coarse approximation of the signal on a higher resolution graph.
"""
nb_ind = keep_inds.shape
N = G.N
reg_eps = float(kwargs.pop('reg_eps', 0.005))
use_landweber = bool(kwargs.pop('use_landweber', False))
landweber_its = int(kwargs.pop('landweber_its', 50))
landweber_tau = float(kwargs.pop('landweber_tau', 1.))
# index matrix (nb_ind x N) of keep_inds, S_i,j = 1 iff keep_inds[i] = j
S = sparse.csr_matrix(([1] * nb_ind, (range(nb_ind), keep_inds)), shape=(nb_ind, N))
if use_landweber:
x = np.zeros(N)
z = np.concatenate((ca, pe), axis=0)
green_kernel = filters.Filter(G, lambda x: 1./(x+reg_eps))
PhiVlt = _analysis(green_kernel, S.T, **kwargs).T
filt = filters.Filter(G, h_filter, **kwargs)
for iteration in range(landweber_its):
h_filtered_sig = _analysis(filt, x, **kwargs)
x_bar = h_filtered_sig[keep_inds]
y_bar = x - interpolate(G, x_bar, keep_inds, **kwargs)
z_delt = np.concatenate((x_bar, y_bar), axis=0)
z_delt = z - z_delt
alpha_new = PhiVlt * z_delt[nb_ind:]
x_up = sparse.csr_matrix((z_delt, (range(nb_ind), [1] * nb_ind)), shape=(N, 1))
reg_L = G.L + reg_esp * sparse.eye(N)
elim_inds = np.setdiff1d(np.arange(N, dtype=int), keep_inds)
L_red = reg_L[np.ix_(keep_inds, keep_inds)]
L_in_out = reg_L[np.ix_(keep_inds, elim_inds)]
L_out_in = reg_L[np.ix_(elim_inds, keep_inds)]
L_comp = reg_L[np.ix_(elim_inds, elim_inds)]
next_term = L_red * alpha_new - L_in_out * linalg.spsolve(L_comp, L_out_in * alpha_new)
next_up = sparse.csr_matrix((next_term, (keep_inds, [1] * nb_ind)), shape=(N, 1))
x += landweber_tau * _analysis(filt, x_up - next_up, **kwargs) + z_delt[nb_ind:]
finer_approx = x
else:
# When the graph is small enough, we can do a full eigendecomposition
# and compute the full analysis operator T_a
H = G.U * sparse.diags(h_filter(G.e), 0) * G.U.T
Phi = G.U * sparse.diags(1./(reg_eps + G.e), 0) * G.U.T
Ta = np.concatenate((S * H, sparse.eye(G.N) - Phi[:, keep_inds] * linalg.spsolve(Phi[np.ix_(keep_inds, keep_inds)], S*H)), axis=0)
finer_approx = linalg.spsolve(Ta.T * Ta, Ta.T * np.concatenate((ca, pe), axis=0))
def _tree_depths(A, root):
if not graphs.Graph(A=A).is_connected():
raise ValueError('Graph is not connected')
N = np.shape(A)[0]
assigned = root - 1
depths = np.zeros((N))
parents = np.zeros((N))
next_to_expand = np.array([root])
current_depth = 1
while len(assigned) < N:
new_entries_whole_round = []
for i in range(len(next_to_expand)):
neighbors = np.where(A[next_to_expand[i]])[0]
new_entries = np.setdiff1d(neighbors, assigned)
parents[new_entries] = next_to_expand[i]
depths[new_entries] = current_depth
assigned = np.concatenate((assigned, new_entries))
new_entries_whole_round = np.concatenate((new_entries_whole_round,
new_entries))
current_depth = current_depth + 1
next_to_expand = new_entries_whole_round
return depths, parents
def tree_multiresolution(G, Nlevel, reduction_method='resistance_distance',
compute_full_eigen=False, root=None):
r"""Compute a multiresolution of trees
Parameters
----------
G : Graph
Graph structure of a tree.
Nlevel : Number of times to downsample and coarsen the tree
root : int
The index of the root of the tree. (default = 1)
reduction_method : str
The graph reduction method (default = 'resistance_distance')
compute_full_eigen : bool
To also compute the graph Laplacian eigenvalues for every tree in the sequence
Returns
-------
Gs : ndarray
Ndarray, with each element containing a graph structure represent a reduced tree.
subsampled_vertex_indices : ndarray
Indices of the vertices of the previous tree that are kept for the subsequent tree.
"""
if not root:
if hasattr(G, 'root'):
root = G.root
else:
root = 1
Gs = [G]
if compute_full_eigen:
Gs[0].compute_fourier_basis()
subsampled_vertex_indices = []
depths, parents = _tree_depths(G.A, root)
old_W = G.W
for lev in range(Nlevel):
# Identify the vertices in the even depths of the current tree
down_odd = round(depths) % 2
down_even = np.ones((Gs[lev].N)) - down_odd
keep_inds = np.where(down_even == 1)[0]
subsampled_vertex_indices.append(keep_inds)
# There will be one undirected edge in the new graph connecting each
# non-root subsampled vertex to its new parent. Here, we find the new
# indices of the new parents
non_root_keep_inds, new_non_root_inds = np.setdiff1d(keep_inds, root)
old_parents_of_non_root_keep_inds = parents[non_root_keep_inds]
old_grandparents_of_non_root_keep_inds = parents[old_parents_of_non_root_keep_inds]
# TODO new_non_root_parents = dsearchn(keep_inds, old_grandparents_of_non_root_keep_inds)
old_W_i_inds, old_W_j_inds, old_W_weights = sparse.find(old_W)
i_inds = np.concatenate((new_non_root_inds, new_non_root_parents))
j_inds = np.concatenate((new_non_root_parents, new_non_root_inds))
new_N = np.sum(down_even)
if reduction_method == "unweighted":
new_weights = np.ones(np.shape(i_inds))
elif reduction_method == "sum":
# TODO old_weights_to_parents_inds = dsearchn([old_W_i_inds,old_W_j_inds], [non_root_keep_inds, old_parents_of_non_root_keep_inds]);
old_weights_to_parents = old_W_weights[old_weights_to_parents_inds]
# old_W(non_root_keep_inds,old_parents_of_non_root_keep_inds);
# TODO old_weights_parents_to_grandparents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [old_parents_of_non_root_keep_inds, old_grandparents_of_non_root_keep_inds])
old_weights_parents_to_grandparents = old_W_weights[old_weights_parents_to_grandparents_inds]
# old_W(old_parents_of_non_root_keep_inds,old_grandparents_of_non_root_keep_inds);
new_weights = old_weights_to_parents + old_weights_parents_to_grandparents
new_weights = np.concatenate((new_weights. new_weights))
elif reduction_method == "resistance_distance":
# TODO old_weights_to_parents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [non_root_keep_inds, old_parents_of_non_root_keep_inds])
old_weights_to_parents = old_W_weight[sold_weights_to_parents_inds]
# old_W(non_root_keep_inds,old_parents_of_non_root_keep_inds);
# TODO old_weights_parents_to_grandparents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [old_parents_of_non_root_keep_inds, old_grandparents_of_non_root_keep_inds])
old_weights_parents_to_grandparents = old_W_weights[old_weights_parents_to_grandparents_inds]
# old_W(old_parents_of_non_root_keep_inds,old_grandparents_of_non_root_keep_inds);
new_weights = 1./(1./old_weights_to_parents + 1./old_weights_parents_to_grandparents)
new_weights = np.concatenate(([new_weights, new_weights]))
else:
raise ValueError('Unknown graph reduction method.')
new_W = sparse.csc_matrix((new_weights, (i_inds, j_inds)),
shape=(new_N, new_N))
# Update parents
new_root = np.where(keep_inds == root)[0]
parents = np.zeros(np.shape(keep_inds)[0], np.shape(keep_inds)[0])
parents[:new_root - 1, new_root:] = new_non_root_parents
# Update depths
depths = depths[keep_inds]
depths = depths/2.
# Store new tree
Gtemp = graphs.Graph(new_W, coords=Gs[lev].coords[keep_inds], limits=G.limits, root=new_root)
#Gs[lev].copy_graph_attributes(Gtemp, False)
if compute_full_eigen:
Gs[lev + 1].compute_fourier_basis()
# Replace current adjacency matrix and root
Gs.append(Gtemp)
old_W = new_W
root = new_root
return Gs, subsampled_vertex_indices
|
|
def renderStreet(map, street_id=0):
graph = []
street = map.streets[street_id]
# graph.append([-2 for _ in range(street.height)])
for lane in range(street.width):
row = []
for cell in range(street.height):
val = street.get((lane, cell))
row.append(-1 if val == 0 else val.speed)
graph.append(row)
# graph.append([-2 for _ in range(street.height)])
return graph
def renderIntersection(map, hstreet_id=0, vstreet_id=1):
graph = []
horizontal = map.streets[hstreet_id]
vertical = map.streets[vstreet_id]
hfront = map.streets[horizontal.front_id]
vfront = map.streets[vertical.front_id]
width = horizontal.height + horizontal.front_offset + hfront.height
height = vertical.height + vertical.front_offset + vfront.height
graph = [[-2] * width for _ in range(height)]
for lane in range(horizontal.width):
for cell in range(horizontal.height + horizontal.front_offset):
val = horizontal.get((lane, cell))
y = lane + vertical.height
x = cell
graph[x][y] = -1 if val == 0 else val.id[:6]
for lane in range(hfront.width):
for cell in range(hfront.height):
val = hfront.get((lane, cell))
y = lane + vertical.height
x = cell + horizontal.height + horizontal.front_offset
graph[x][y] = -1 if val == 0 else val.id[:6]
for lane in range(vertical.width):
for cell in range(vertical.height):
val = vertical.get((lane, cell))
x = lane + horizontal.height
y = cell
if vertical.orientation == 3:
x = width - x - 1
if vertical.orientation == 1:
y = height - y - 1
graph[x][y] = -1 if val == 0 else val.id[:6]
for lane in range(vertical.width):
for cell in range(vfront.height):
val = vfront.get((lane, cell))
x = lane + horizontal.height
y = cell + vertical.height + vertical.front_offset
if vertical.orientation == 3:
x = width - x - 1
if vertical.orientation == 1:
y = height - y - 1
graph[x][y] = -1 if val == 0 else val.id[:6]
# traffic lights
graph[horizontal.height - 4][vertical.height - 2] = '00ff00' if horizontal.light.color > 0 else 'ff0000'
graph[horizontal.height - 4][vertical.height + horizontal.width + 1] = '00ff00' if horizontal.light.color > 0 else 'ff0000'
graph[horizontal.height - 4][vertical.height - 1] = '00ff00' if horizontal.light.color > 0 else 'ff0000'
graph[horizontal.height - 4][vertical.height + horizontal.width] = '00ff00' if horizontal.light.color > 0 else 'ff0000'
vlight = vertical.height - 4 if vertical.orientation == 3 else vfront.height + horizontal.width + 4
graph[horizontal.height - 2][vlight] = '00ff00' if vertical.light.color > 0 else 'ff0000'
graph[horizontal.height + vertical.width + 1][vlight] = '00ff00' if vertical.light.color > 0 else 'ff0000'
graph[horizontal.height - 1][vlight] = '00ff00' if vertical.light.color > 0 else 'ff0000'
graph[horizontal.height + vertical.width][vlight] = '00ff00' if vertical.light.color > 0 else 'ff0000'
return graph
class GridMapRenderer:
def __init__(self, map):
self.streets = map.clone().streets
self.map = map
init_street = self.streets.pop(0, None)
self.min_x, self.min_y = 0, 0
self.max_x, self.max_y = 0, 0
map_ = self.create_map(init_street)
self.map_ = self.normalize(map_)
self.width = self.max_x - self.min_x
self.height = self.max_y - self.min_y
# for s in map_:
# print(
# s['street'].id,
# s['initial'],
# s['orientation']
# )
#
# print((self.min_x, self.min_y), (self.max_x, self.max_y))
def create_map(self, street, initial=(0, 0), orientation=0):
streets_desc = []
streets_desc.append({
'street': self.map.streets[street.id],
'initial': initial,
'orientation': orientation,
})
streets_out = [
(street.front_id, 0),
(street.front['right'], 1),
(street.front['left'], 3),
(street.back_id, 4)
]
axe, sign, ox, oy = 0, 1, 0, 0
i_width, i_height = street.front_offset, street.width
if orientation in [1, 3]:
axe = 1
i_width, i_height = i_height, i_width
if orientation in [1, 2]:
ox = -i_width + 1
if orientation in [2, 3]:
sign = -1
oy = -i_height + 1
intersection_address = list(initial)
intersection_address[axe] += sign * street.height
intersection_address[0] += ox
intersection_address[1] += oy
for street_out in streets_out:
new_street = self.streets.pop(street_out[0], None)
if new_street:
new_orientation = (orientation + street_out[1]) % 4
min_x, min_y = 0, 0
max_x, max_y = 0, 0
if new_orientation == 0:
new_initial = [i_width, 0]
max_x = new_street.height + new_street.front_offset
elif new_orientation == 1:
new_initial = [i_width - 1, i_height]
max_y = new_street.height + new_street.front_offset
elif new_orientation == 2:
new_initial = [-1, i_height - 1]
min_x = -(new_street.height + new_street.front_offset)
elif new_orientation == 3:
new_initial = [0, -1]
min_y = -(new_street.height + new_street.front_offset)
if street_out[1] == 4:
val = street.height + new_street.height + new_street.front_offset
if new_orientation in [0, 1]:
val *= -1
new_initial[new_orientation % 2] += val
addr = (
intersection_address[0]+new_initial[0],
intersection_address[1]+new_initial[1]
)
self.min_x = min(self.min_x, addr[0] + min_x)
self.min_y = min(self.min_y, addr[1] + min_y)
self.max_x = max(self.max_x, addr[0] + max_x)
self.max_y = max(self.max_y, addr[1] + max_y)
streets_desc.extend(self.create_map(
new_street,
initial=addr,
orientation=new_orientation
))
return streets_desc
def normalize(self, map_):
offset_x = -self.min_x
offset_y = -self.min_y
for s in map_:
initial = list(s['initial'])
initial[0] += offset_x
initial[1] += offset_y
s['initial'] = tuple(initial)
return map_
def get_matrix(self):
matrix = [[-2] * self.height for _ in range(self.width)]
for street_ in self.map_:
street = street_['street']
axe = 0 if street_['orientation'] in [0, 2] else 1
sign = 1 if street_['orientation'] in [0, 1] else -1
initial = street_['initial']
for lane in range(street.width):
for cell in range(street.height + street.front_offset):
val = street.get((lane, cell))
addr = list(initial)
addr[axe] += cell * sign
addr[(axe + 1) % 2] += lane * sign * (1 if axe == 0 else -1)
matrix[addr[0]][addr[1]] = -1 if val == 0 else val.id[:6]
# traffic light
addr = list(initial)
addr[axe] += (street.height - 2) * sign
addr[(axe + 1) % 2] += street.width * sign * (1 if axe == 0 else -1)
matrix[addr[0]][addr[1]] = '00ff00' if street.light.color > 0 else 'ff0000'
addr[(axe + 1) % 2] += sign * (1 if axe == 0 else -1)
matrix[addr[0]][addr[1]] = '00ff00' if street.light.color > 0 else 'ff0000'
addr[axe] -= sign
matrix[addr[0]][addr[1]] = '00ff00' if street.light.color > 0 else 'ff0000'
addr[(axe + 1) % 2] -= sign * (1 if axe == 0 else -1)
matrix[addr[0]][addr[1]] = '00ff00' if street.light.color > 0 else 'ff0000'
return matrix
|
|
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to use Modules as feature columns."""
import collections
import tensorflow as tf
from tensorflow_hub import image_util
from tensorflow_hub import module
# TODO(b/73987364): It is not possible to extend feature columns without
# depending on TensorFlow internal implementation details.
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.feature_column import feature_column
from tensorflow.python.feature_column import feature_column_v2
# pylint: enable=g-direct-tensorflow-import
class DenseFeatureColumn(
feature_column._DenseColumn, # pylint: disable=protected-access
feature_column_v2.DenseColumn):
@property
def dtype(self):
return tf.float32
_MODULE_RESOURCE_STRING = "module"
def text_embedding_column(key, module_spec, trainable=False):
"""Uses a Module to construct a dense representation from a text feature.
TODO(b/131678043): This does not work yet with TF2.
This feature column can be used on an input feature whose values are strings
of arbitrary size.
The result of this feature column is the result of passing its `input`
through the module `m` instantiated from `module_spec`, as per
`result = m(input)`. The `result` must have dtype float32 and shape
`[batch_size, num_features]` with a known value of num_features.
Example:
```python
comment = hub.text_embedding_column("comment", "/tmp/text-module")
feature_columns = [comment, ...]
...
features = {
"comment": np.array(["wow, much amazing", "so easy", ...]),
...
}
labels = np.array([[1], [0], ...])
# If running TF 2.x, use `tf.compat.v1.estimator.inputs.numpy_input_fn`
input_fn = tf.estimator.inputs.numpy_input_fn(features, labels,
shuffle=True)
estimator = tf.estimator.DNNClassifier(hidden_units, feature_columns)
estimator.train(input_fn, max_steps=100)
```
Args:
key: A string or `_FeatureColumn` identifying the text feature.
module_spec: A ModuleSpec defining the Module to instantiate or a path where
to load a ModuleSpec via `load_module_spec`
trainable: Whether or not the Module is trainable. False by default, meaning
the pre-trained weights are frozen. This is different from the ordinary
tf.feature_column.embedding_column(), but that one is intended for
training from scratch.
Returns:
`_DenseColumn` that converts from text input.
Raises:
ValueError: if module_spec is not suitable for use in this feature column.
"""
return _TextEmbeddingColumn(
key=key, module_spec_path=module_spec, trainable=trainable)
def _check_module_is_text_embedding(module_spec):
"""Raises ValueError if `module_spec` is not a text-embedding module.
Args:
module_spec: A `ModuleSpec` to test.
Raises:
ValueError: if `module_spec` default signature is not compatible with
Tensor(string, shape=(?,)) -> Tensor(float32, shape=(?,K)).
"""
issues = []
# Find issues with signature inputs.
input_info_dict = module_spec.get_input_info_dict()
if len(input_info_dict) != 1:
issues.append("Module default signature must require only one input")
else:
input_info, = input_info_dict.values()
input_shape = input_info.get_shape()
if not (input_info.dtype == tf.string and input_shape.ndims == 1 and
input_shape.as_list() == [None]):
issues.append("Module default signature must have only one input "
"tf.Tensor(shape=(?,), dtype=string)")
# Find issues with signature outputs.
output_info_dict = module_spec.get_output_info_dict()
if "default" not in output_info_dict:
issues.append("Module default signature must have a 'default' output.")
else:
output_info = output_info_dict["default"]
output_shape = output_info.get_shape()
if not (output_info.dtype == tf.float32 and output_shape.ndims == 2 and
not output_shape.as_list()[0] and output_shape.as_list()[1]):
issues.append("Module default signature must have a 'default' output of "
"tf.Tensor(shape=(?,K), dtype=float32).")
if issues:
raise ValueError("Module is not a text-embedding: %r" % issues)
class _TextEmbeddingColumn(
DenseFeatureColumn,
collections.namedtuple("_ModuleEmbeddingColumn",
("key", "module_spec_path", "trainable"))):
"""Returned by text_embedding_column(). Do not use directly."""
def __init__(self, key, module_spec_path, trainable):
self.module_spec = module.as_module_spec(self.module_spec_path)
_check_module_is_text_embedding(self.module_spec)
super().__init__()
@property
def _is_v2_column(self):
return True
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
@property
def name(self):
"""Returns string. Used for variable_scope and naming."""
if not hasattr(self, "_name"):
key_name = self.key if isinstance(self.key, str) else self.key.name
self._name = "{}_hub_module_embedding".format(key_name)
return self._name
def create_state(self, state_manager):
"""Imports the module along with all variables."""
# Note: state_manager._trainable is not public but is the pattern used
# to propagate the "trainable" state that used to be received via
# self._get_dense_tensor.
trainable = self.trainable and state_manager._trainable # pylint: disable=protected-access
m = module.Module(self.module_spec, trainable=trainable)
state_manager.add_resource(self, _MODULE_RESOURCE_STRING, m)
def _transform_feature(self, inputs):
"""Returns intermediate representation (usually a `Tensor`)."""
return inputs.get(self.key)
def transform_feature(self, transformation_cache, state_manager):
return transformation_cache.get(self.key, state_manager)
@property
def _parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict."""
return self.parse_example_spec
@property
def parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict."""
return {self.key: tf.compat.v1.FixedLenFeature([1], tf.string)}
@property
def _variable_shape(self):
"""`TensorShape` of `_get_dense_tensor`, without batch dimension."""
return self.variable_shape
@property
def variable_shape(self):
"""`TensorShape` of `_get_dense_tensor`, without batch dimension."""
return self.module_spec.get_output_info_dict()["default"].get_shape()[1:]
def _get_dense_tensor_for_input_tensor(self, input_tensor, text_module):
text_batch = tf.reshape(input_tensor, shape=[-1])
return text_module(text_batch)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns a `Tensor`."""
del weight_collections
input_tensor = inputs.get(self)
text_module = module.Module(
self.module_spec, trainable=self.trainable and trainable)
return self._get_dense_tensor_for_input_tensor(input_tensor, text_module)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `Tensor`."""
input_tensor = transformation_cache.get(self, state_manager)
text_module = state_manager.get_resource(self, _MODULE_RESOURCE_STRING)
return self._get_dense_tensor_for_input_tensor(input_tensor, text_module)
def get_config(self):
if not isinstance(self.module_spec_path, str):
raise NotImplementedError(
"Can only generate a valid config for `hub.text_embedding_column`"
"that uses a string `module_spec`.\n\n"
"Got `type(module_spec)`: {}".format(type(self.module_spec_path)))
config = dict(zip(self._fields, self))
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
copied_config = config.copy()
return cls(**copied_config)
def image_embedding_column(key, module_spec, image_size=None):
"""Uses a Module to get a dense 1-D representation from the pixels of images.
TODO(b/131678043): This does not work yet with TF2.
This feature column can be used on images, represented as float32 tensors of
RGB pixel data in the range [0,1]. This can be read from a numeric_column()
if the tf.Example input data happens to have decoded images, all with the
same shape [height, width, 3]. More commonly, the input_fn will have code to
explicitly decode images, resize them (possibly after performing data
augmentation such as random crops etc.), and provide a batch of shape
[batch_size, height, width, 3].
The result of this feature column is the result of passing its `input`
through the module `m` instantiated from `module_spec`, as per
`result = m({"images": input})`. The `result` must have dtype float32 and
shape `[batch_size, num_features]` with a known value of num_features.
Example:
```python
image_column = hub.image_embedding_column("embeddings", "/tmp/image-module")
feature_columns = [image_column, ...]
estimator = tf.estimator.LinearClassifier(feature_columns, ...)
height, width = hub.get_expected_image_size(image_column.module_spec)
input_fn = ... # Provides "embeddings" with shape [None, height, width, 3].
estimator.train(input_fn, ...)
```
Args:
key: A string or `_FeatureColumn` identifying the input image data.
module_spec: A string handle or a `ModuleSpec` identifying the module.
image_size: Optional. If specified it should be a tuple of image height and
width to use with the module. Note that it depends on the module on
whether the default size can be overridden and what the permissible
values are.
Returns:
`_DenseColumn` that converts from pixel data.
Raises:
ValueError: if module_spec is not suitable for use in this feature column.
"""
# Configuration stored in a feature column should be hashable or user can
# get a TypeError when using it with DenseFeatures. If a user passes a list
# cast it to a tuple to avoid wasted debugging time.
if isinstance(image_size, list):
image_size = tuple(image_size)
return _ImageEmbeddingColumn(key=key, module_spec_path=module_spec,
image_size=image_size)
def _check_module_is_image_embedding(module_spec, check_image_size):
"""Raises ValueError if `module_spec` is not usable as image embedding.
Args:
module_spec: A `_ModuleSpec` to test.
check_image_size: Whether to check for compatibility with
get_expected_image_size.
Raises:
ValueError: if `module_spec` default signature is not compatible with
mappingan "images" input to a Tensor(float32, shape=(_,K)).
"""
issues = []
# Find issues with "default" signature inputs. The common signatures for
# image models prescribe a specific name; we trust it if we find it
# and if we can do the necessary inference of input shapes from it.
input_info_dict = module_spec.get_input_info_dict()
if (list(input_info_dict.keys()) != ["images"] or
input_info_dict["images"].dtype != tf.float32):
issues.append("Module 'default' signature must require a single input, "
"which must have type float32 and name 'images'.")
else:
try:
if check_image_size:
image_util.get_expected_image_size(module_spec)
except ValueError as e:
issues.append("Module does not support hub.get_expected_image_size(); "
"original error was:\n" + str(e)) # Raised again below.
# Find issues with "default" signature outputs. We test that the dtype and
# shape is appropriate for use in input_layer().
output_info_dict = module_spec.get_output_info_dict()
if "default" not in output_info_dict:
issues.append("Module 'default' signature must have a 'default' output.")
else:
output_type = output_info_dict["default"].dtype
output_shape = output_info_dict["default"].get_shape()
if not (output_type == tf.float32 and output_shape.ndims == 2 and
output_shape.dims[1].value):
issues.append("Module 'default' signature must have a 'default' output "
"of tf.Tensor(shape=(_,K), dtype=float32).")
if issues:
raise ValueError("Module is not usable as image embedding: %r" % issues)
class _ImageEmbeddingColumn(DenseFeatureColumn,
collections.namedtuple("_ImageEmbeddingColumn",
("key", "module_spec_path",
"image_size"))
):
"""Returned by image_embedding_column(). Do not use directly."""
def __init__(self, key, module_spec_path, image_size):
self.module_spec = module.as_module_spec(self.module_spec_path)
_check_module_is_image_embedding(self.module_spec,
check_image_size=self.image_size is None)
super().__init__()
@property
def _is_v2_column(self):
return True
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
@property
def name(self):
"""Returns string. Used for variable_scope and naming."""
if not hasattr(self, "_name"):
key_name = self.key if isinstance(self.key, str) else self.key.name
self._name = "{}_hub_module_embedding".format(key_name)
return self._name
def create_state(self, state_manager):
"""Imports the module along with all variables."""
# Module is not trainable by default.
m = module.Module(self.module_spec)
state_manager.add_resource(self, _MODULE_RESOURCE_STRING, m)
def _transform_feature(self, inputs):
"""Returns intermediate representation (usually a `Tensor`)."""
return inputs.get(self.key)
def transform_feature(self, transformation_cache, state_manager):
return transformation_cache.get(self.key, state_manager)
@property
def _parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict."""
return self.parse_example_spec
@property
def parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict."""
if self.image_size:
height, width = self.image_size
else:
height, width = image_util.get_expected_image_size(self.module_spec)
input_shape = [height, width, 3]
return {self.key: tf.compat.v1.FixedLenFeature(input_shape, tf.float32)}
@property
def _variable_shape(self):
"""`TensorShape` of `_get_dense_tensor`, without batch dimension."""
return self.variable_shape
@property
def variable_shape(self):
"""`TensorShape` of `_get_dense_tensor`, without batch dimension."""
return self.module_spec.get_output_info_dict()["default"].get_shape()[1:]
def _get_dense_tensor_for_images(self, images, image_module):
return image_module({"images": images})
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections, trainable # Unused.
images = inputs.get(self)
image_module = module.Module(self.module_spec)
return self._get_dense_tensor_for_images(images, image_module)
def get_dense_tensor(self, transformation_cache, state_manager):
images = transformation_cache.get(self, state_manager)
image_module = state_manager.get_resource(self, _MODULE_RESOURCE_STRING)
return self._get_dense_tensor_for_images(images, image_module)
def get_config(self):
if not isinstance(self.module_spec_path, str):
raise NotImplementedError(
"Can only generate a valid config for `hub.image_embedding_column`"
"that uses a string `module_spec`.\n\n"
"Got `type(module_spec)`: {}".format(type(self.module_spec_path)))
config = dict(zip(self._fields, self))
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
copied_config = config.copy()
return cls(**copied_config)
def sparse_text_embedding_column(key,
module_spec,
combiner,
default_value,
trainable=False):
"""Uses a Module to construct dense representations from sparse text features.
TODO(b/131678043): This does not work yet with TF2.
The input to this feature column is a batch of multiple strings with
arbitrary size, assuming the input is a SparseTensor.
This type of feature column is typically suited for modules that operate on
pre-tokenized text to produce token level embeddings which are combined with
the combiner into a text embedding. The combiner always treats the tokens as a
bag of words rather than a sequence.
The output (i.e., transformed input layer) is a DenseTensor, with shape
[batch_size, num_embedding_dim].
For Example:
```python
comment = hub.sparse_text_embedding_column("comment", "/tmp/text_module")
feature_columns = [comment, ...]
...
features = {
"comment": tf.SparseTensor(indices=[[0, 0], [1, 2]],
values=['sparse', 'embedding'],
dense_shape=[3, 4]),
...
}
estimator = tf.estimator.DNNClassifier(hidden_units, feature_columns)
```
Args:
key: A string or `_FeatureColumn` identifying the text feature.
module_spec: A string handle or a `_ModuleSpec` identifying the module.
combiner: a string specifying reducing op for embeddings in the same
Example. Currently, 'mean', 'sqrtn', 'sum' are supported. Using
combiner=None is undefined.
default_value: default value for Examples where the text feature is empty.
Note, it's recommended to have default_value consistent OOV tokens, in
case there was special handling of OOV in the text module. If None, the
text feature is assumed be non-empty for each Example.
trainable: Whether or not the Module is trainable. False by default, meaning
the pre-trained weights are frozen. This is different from the ordinary
tf.feature_column.embedding_column(), but that one is intended for
training from scratch.
Returns:
`_DenseColumn` that converts from text input.
Raises:
ValueError: if module_spec is not suitable for use in this feature column.
ValueError: if combiner not in ('mean', 'sqrtn', 'sum').
"""
module_spec = module.as_module_spec(module_spec)
_check_module_is_text_embedding(module_spec)
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be 'mean', 'sqrtn' or 'sum': %r" % combiner)
return _SparseTextEmbeddingColumn(
key=key,
module_spec=module_spec,
trainable=trainable,
default_value=default_value,
combiner=combiner)
class _SparseTextEmbeddingColumn(
DenseFeatureColumn, # pylint: disable=protected-access
collections.namedtuple(
"_ModuleEmbeddingColumn",
("key", "combiner", "module_spec", "default_value", "trainable"))):
"""Returned by sparse_text_embedding_column(). Do not use directly."""
@property
def _is_v2_column(self):
return True
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
@property
def name(self):
"""Returns string. Used for variable_scope and naming."""
if not hasattr(self, "_name"):
key_name = self.key if isinstance(self.key, str) else self.key.name
self._name = "{}_hub_module_embedding".format(key_name)
return self._name
def _transform_feature(self, inputs):
"""Returns intermediate representation (usually a `Tensor`)."""
return inputs.get(self.key)
def transform_feature(self, transformation_cache, state_manager):
return transformation_cache.get(self.key, state_manager)
@property
def _parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict."""
return self.parse_example_spec
@property
def parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict."""
return {self.key: tf.compat.v1.VarLenFeature(tf.string)}
@property
def _variable_shape(self):
"""`TensorShape` of `_get_dense_tensor`, without batch dimension."""
return self.variable_shape
@property
def variable_shape(self):
"""`TensorShape` of `_get_dense_tensor`, without batch dimension."""
return self.module_spec.get_output_info_dict()["default"].get_shape()[1:]
def _get_dense_tensor_for_inputs(self, text_batch, trainable):
m = module.Module(self.module_spec, trainable=self.trainable and trainable)
if self.default_value is not None:
text_batch = tf.sparse.fill_empty_rows(text_batch, self.default_value)[0]
embedded_tokens = m(text_batch.values)
embedding_ids = tf.SparseTensor(
indices=text_batch.indices,
values=tf.range(tf.shape(text_batch.indices)[0], dtype=tf.int32),
dense_shape=text_batch.dense_shape)
return tf.nn.embedding_lookup_sparse(
params=embedded_tokens,
sp_ids=embedding_ids,
sp_weights=None,
combiner=self.combiner)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns a `Tensor`."""
del weight_collections
text_batch = inputs.get(self)
return self._get_dense_tensor_for_inputs(text_batch, self.trainable and
trainable)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `Tensor`."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_dense_tensor_for_inputs(input_tensor, self.trainable)
|
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2012 Caffeinated Code <caffeinatedco.de>
# Copyright (C) 2012 George Czabania
# Copyright (C) 2012 Jono Cooper
# Copyright (C) 2012 Vadim Rutkovsky
# Copyright (c) The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# OR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
### END LICENSE
'''Enhances builder connections, provides object to access glade objects'''
from gi.repository import GObject, Gtk # pylint: disable=E0611
import inspect
import functools
import logging
logger = logging.getLogger('gnomeread_lib')
from xml.etree.cElementTree import ElementTree
# this module is big so uses some conventional prefixes and postfixes
# *s list, except self.widgets is a dictionary
# *_dict dictionary
# *name string
# ele_* element in a ElementTree
# pylint: disable=R0904
# the many public methods is a feature of Gtk.Builder
class Builder(Gtk.Builder):
''' extra features
connects glade defined handler to default_handler if necessary
auto connects widget to handler with matching name or alias
auto connects several widgets to a handler via multiple aliases
allow handlers to lookup widget name
logs every connection made, and any on_* not made
'''
def __init__(self):
Gtk.Builder.__init__(self)
self.widgets = {}
self.glade_handler_dict = {}
self.connections = []
self._reverse_widget_dict = {}
# pylint: disable=R0201
# this is a method so that a subclass of Builder can redefine it
def default_handler(self,
handler_name, filename, *args, **kwargs):
'''helps the apprentice guru
glade defined handlers that do not exist come here instead.
An apprentice guru might wonder which signal does what he wants,
now he can define any likely candidates in glade and notice which
ones get triggered when he plays with the project.
this method does not appear in Gtk.Builder'''
logger.debug('''tried to call non-existent function:%s()
expected in %s
args:%s
kwargs:%s''', handler_name, filename, args, kwargs)
# pylint: enable=R0201
def get_name(self, widget):
''' allows a handler to get the name (id) of a widget
this method does not appear in Gtk.Builder'''
return self._reverse_widget_dict.get(widget)
def add_from_file(self, filename):
'''parses xml file and stores wanted details'''
Gtk.Builder.add_from_file(self, filename)
# extract data for the extra interfaces
tree = ElementTree()
tree.parse(filename)
ele_widgets = tree.getiterator("object")
for ele_widget in ele_widgets:
name = ele_widget.attrib['id']
widget = self.get_object(name)
# populate indexes - a dictionary of widgets
self.widgets[name] = widget
# populate a reversed dictionary
self._reverse_widget_dict[widget] = name
# populate connections list
ele_signals = ele_widget.findall("signal")
connections = [
(name,
ele_signal.attrib['name'],
ele_signal.attrib['handler']) for ele_signal in ele_signals]
if connections:
self.connections.extend(connections)
ele_signals = tree.getiterator("signal")
for ele_signal in ele_signals:
self.glade_handler_dict.update(
{ele_signal.attrib["handler"]: None})
def connect_signals(self, callback_obj):
'''connect the handlers defined in glade
reports successful and failed connections
and logs call to missing handlers'''
filename = inspect.getfile(callback_obj.__class__)
callback_handler_dict = dict_from_callback_obj(callback_obj)
connection_dict = {}
connection_dict.update(self.glade_handler_dict)
connection_dict.update(callback_handler_dict)
for item in connection_dict.items():
if item[1] is None:
# the handler is missing so reroute to default_handler
handler = functools.partial(
self.default_handler, item[0], filename)
connection_dict[item[0]] = handler
# replace the run time warning
logger.warn("expected handler '%s' in %s",
item[0], filename)
# connect glade define handlers
Gtk.Builder.connect_signals(self, connection_dict)
# let's tell the user how we applied the glade design
for connection in self.connections:
widget_name, signal_name, handler_name = connection
logger.debug("connect builder by design '%s', '%s', '%s'",
widget_name, signal_name, handler_name)
def get_ui(self, callback_obj=None, by_name=True):
'''Creates the ui object with widgets as attributes
connects signals by 2 methods
this method does not appear in Gtk.Builder'''
result = UiFactory(self.widgets)
# Hook up any signals the user defined in glade
if callback_obj is not None:
# connect glade define handlers
self.connect_signals(callback_obj)
if by_name:
auto_connect_by_name(callback_obj, self)
return result
# pylint: disable=R0903
# this class deliberately does not provide any public interfaces
# apart from the glade widgets
class UiFactory():
''' provides an object with attributes as glade widgets'''
def __init__(self, widget_dict):
self._widget_dict = widget_dict
for (widget_name, widget) in widget_dict.items():
setattr(self, widget_name, widget)
# Mangle any non-usable names (like with spaces or dashes)
# into pythonic ones
cannot_message = """cannot bind ui.%s, name already exists
consider using a pythonic name instead of design name '%s'"""
consider_message = """consider using a pythonic name instead of design name '%s'"""
for (widget_name, widget) in widget_dict.items():
pyname = make_pyname(widget_name)
if pyname != widget_name:
if hasattr(self, pyname):
logger.debug(cannot_message, pyname, widget_name)
else:
logger.debug(consider_message, widget_name)
setattr(self, pyname, widget)
def iterator():
'''Support 'for o in self' '''
return iter(widget_dict.values())
setattr(self, '__iter__', iterator)
def __getitem__(self, name):
'access as dictionary where name might be non-pythonic'
return self._widget_dict[name]
# pylint: enable=R0903
def make_pyname(name):
''' mangles non-pythonic names into pythonic ones'''
pyname = ''
for character in name:
if (character.isalpha() or character == '_' or
(pyname and character.isdigit())):
pyname += character
else:
pyname += '_'
return pyname
# Until bug https://bugzilla.gnome.org/show_bug.cgi?id=652127 is fixed, we
# need to reimplement inspect.getmembers. GObject introspection doesn't
# play nice with it.
def getmembers(obj, check):
members = []
for k in dir(obj):
try:
attr = getattr(obj, k)
except:
continue
if check(attr):
members.append((k, attr))
members.sort()
return members
def dict_from_callback_obj(callback_obj):
'''a dictionary interface to callback_obj'''
methods = getmembers(callback_obj, inspect.ismethod)
aliased_methods = [x[1] for x in methods if hasattr(x[1], 'aliases')]
# a method may have several aliases
#~ @alias('on_btn_foo_clicked')
#~ @alias('on_tool_foo_activate')
#~ on_menu_foo_activate():
#~ pass
alias_groups = [(x.aliases, x) for x in aliased_methods]
aliases = []
for item in alias_groups:
for alias in item[0]:
aliases.append((alias, item[1]))
dict_methods = dict(methods)
dict_aliases = dict(aliases)
results = {}
results.update(dict_methods)
results.update(dict_aliases)
return results
def auto_connect_by_name(callback_obj, builder):
'''finds handlers like on_<widget_name>_<signal> and connects them
i.e. find widget,signal pair in builder and call
widget.connect(signal, on_<widget_name>_<signal>)'''
callback_handler_dict = dict_from_callback_obj(callback_obj)
for item in builder.widgets.items():
(widget_name, widget) = item
signal_ids = []
try:
widget_type = type(widget)
while widget_type:
signal_ids.extend(GObject.signal_list_ids(widget_type))
widget_type = GObject.type_parent(widget_type)
except RuntimeError: # pylint wants a specific error
pass
signal_names = [GObject.signal_name(sid) for sid in signal_ids]
# Now, automatically find any the user didn't specify in glade
for sig in signal_names:
# using convention suggested by glade
sig = sig.replace("-", "_")
handler_names = ["on_%s_%s" % (widget_name, sig)]
# Using the convention that the top level window is not
# specified in the handler name. That is use
# on_destroy() instead of on_windowname_destroy()
if widget is callback_obj:
handler_names.append("on_%s" % sig)
do_connect(item, sig, handler_names,
callback_handler_dict, builder.connections)
log_unconnected_functions(callback_handler_dict, builder.connections)
def do_connect(item, signal_name, handler_names,
callback_handler_dict, connections):
'''connect this signal to an unused handler'''
widget_name, widget = item
for handler_name in handler_names:
target = handler_name in callback_handler_dict.keys()
connection = (widget_name, signal_name, handler_name)
duplicate = connection in connections
if target and not duplicate:
widget.connect(signal_name, callback_handler_dict[handler_name])
connections.append(connection)
logger.debug("connect builder by name '%s','%s', '%s'",
widget_name, signal_name, handler_name)
def log_unconnected_functions(callback_handler_dict, connections):
'''log functions like on_* that we could not connect'''
connected_functions = [x[2] for x in connections]
handler_names = callback_handler_dict.keys()
unconnected = [x for x in handler_names if x.startswith('on_')]
for handler_name in connected_functions:
try:
unconnected.remove(handler_name)
except ValueError:
pass
for handler_name in unconnected:
logger.debug("Not connected to builder '%s'", handler_name)
|
|
from __future__ import absolute_import
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import override_settings, requires_tz_support
from django.utils import timezone
from .models import Book, BookSigning
def _make_books(n, base_date):
for i in range(n):
b = Book.objects.create(
name='Book %d' % i,
slug='book-%d' % i,
pages=100+i,
pubdate=base_date - datetime.timedelta(days=i))
class ArchiveIndexViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_archive_view(self):
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_context_object_name(self):
res = self.client.get('/dates/books/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['thingies']), list(Book.objects.all()))
self.assertFalse('latest' in res.context)
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 404)
def test_allow_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_template(self):
res = self.client.get('/dates/books/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_archive_view_template_suffix(self):
res = self.client.get('/dates/books/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_archive_view_invalid(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/dates/books/invalid/')
def test_archive_view_by_month(self):
res = self.client.get('/dates/books/by_month/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'month', 'DESC')))
def test_paginated_archive_view(self):
_make_books(20, base_date=datetime.date.today())
res = self.client.get('/dates/books/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[0:10]))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
res = self.client.get('/dates/books/paginated/?page=2')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 2)
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[10:20]))
def test_paginated_archive_view_does_not_load_entire_table(self):
# Regression test for #18087
_make_books(20, base_date=datetime.date.today())
# 1 query for years list + 1 query for books
with self.assertNumQueries(2):
self.client.get('/dates/books/')
# same as above + 1 query to test if books exist + 1 query to count them
with self.assertNumQueries(4):
self.client.get('/dates/books/paginated/')
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(2):
self.client.get('/dates/books/reverse/')
def test_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted descending in index"""
_make_books(5, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(reversed(sorted(res.context['date_list']))))
class YearArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_year_view(self):
res = self.client.get('/dates/books/2008/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(res.context['year'], datetime.date(2008, 1, 1))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
# Since allow_empty=False, next/prev years must be valid (#7164)
self.assertEqual(res.context['next_year'], None)
self.assertEqual(res.context['previous_year'], datetime.date(2006, 1, 1))
def test_year_view_make_object_list(self):
res = self.client.get('/dates/books/2006/make_object_list/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2006, 5, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_empty(self):
res = self.client.get('/dates/books/1999/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/1999/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
# Since allow_empty=True, next/prev are allowed to be empty years (#7164)
self.assertEqual(res.context['next_year'], datetime.date(2000, 1, 1))
self.assertEqual(res.context['previous_year'], datetime.date(1998, 1, 1))
def test_year_view_allow_future(self):
# Create a new book in the future
year = datetime.date.today().year + 1
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=datetime.date(year, 1, 1))
res = self.client.get('/dates/books/%s/' % year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/allow_empty/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
res = self.client.get('/dates/books/%s/allow_future/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(year, 1, 1)])
def test_year_view_paginated(self):
res = self.client.get('/dates/books/2006/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_invalid_pattern(self):
res = self.client.get('/dates/books/no_year/')
self.assertEqual(res.status_code, 404)
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(4):
self.client.get('/dates/books/2008/reverse/')
def test_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in year view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
class MonthArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_month_view(self):
res = self.client.get('/dates/books/2008/oct/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['month'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev months must be valid (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['month'], datetime.date(2000, 1, 1))
# Since allow_empty=True, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], datetime.date(2000, 2, 1))
self.assertEqual(res.context['previous_month'], datetime.date(1999, 12, 1))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], None)
def test_month_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60)).replace(day=1)
urlbit = future.strftime('%Y/%b').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'][0], b.pubdate)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['month'], future)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month. So next
# should be in the future (yup, #7164, again)
res = self.client.get('/dates/books/2008/oct/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], future)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/')
self.assertEqual(res.status_code, 200)
def test_month_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/no_month/')
self.assertEqual(res.status_code, 404)
def test_previous_month_without_content(self):
"Content can exist on any day of the previous month. Refs #14711"
self.pubdate_list = [
datetime.date(2010, month, day)
for month,day in ((9,1), (10,2), (11,3))
]
for pubdate in self.pubdate_list:
name = str(pubdate)
Book.objects.create(name=name, slug=name, pages=100, pubdate=pubdate)
res = self.client.get('/dates/books/2010/nov/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,10,1))
# The following test demonstrates the bug
res = self.client.get('/dates/books/2010/nov/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,10,1))
# The bug does not occur here because a Book with pubdate of Sep 1 exists
res = self.client.get('/dates/books/2010/oct/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010,9,1))
def test_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in month view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/dec/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
class WeekArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_week_view(self):
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
self.assertEqual(res.context['book_list'][0], Book.objects.get(pubdate=datetime.date(2008, 10, 1)))
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
# Since allow_empty=False, next/prev weeks must be valid
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_allow_empty(self):
# allow_empty = False, empty week
res = self.client.get('/dates/books/2008/week/12/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2008/week/12/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['week'], datetime.date(2008, 3, 23))
# Since allow_empty=True, next/prev are allowed to be empty weeks
self.assertEqual(res.context['next_week'], datetime.date(2008, 3, 30))
self.assertEqual(res.context['previous_week'], datetime.date(2008, 3, 16))
# allow_empty but not allow_future: next_week should be empty
url = datetime.date.today().strftime('/dates/books/%Y/week/%U/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], None)
def test_week_view_allow_future(self):
# January 7th always falls in week 1, given Python's definition of week numbers
future = datetime.date(datetime.date.today().year + 1, 1, 7)
future_sunday = future - datetime.timedelta(days=(future.weekday() + 1) % 7)
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/week/1/' % future.year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/week/1/allow_future/' % future.year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['week'], future_sunday)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty weeks
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2008, 9, 28))
# allow_future, but not allow_empty, with a current week. So next
# should be in the future
res = self.client.get('/dates/books/2008/week/39/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], future_sunday)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_paginated(self):
week_start = datetime.date(2008, 9, 28)
week_end = week_start + datetime.timedelta(days=7)
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
def test_week_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/week/no_week/')
self.assertEqual(res.status_code, 404)
def test_week_start_Monday(self):
# Regression for #14752
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
res = self.client.get('/dates/books/2008/week/39/monday/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 29))
def test_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
class DayArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_day_view(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['day'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev days must be valid.
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
def test_day_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/1/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/1/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['day'], datetime.date(2000, 1, 1))
# Since it's allow empty, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_day'], datetime.date(2000, 1, 2))
self.assertEqual(res.context['previous_day'], datetime.date(1999, 12, 31))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/%d/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], None)
def test_day_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['day'], future)
# allow_future but not allow_empty, next/prev must be valid
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month.
res = self.client.get('/dates/books/2008/oct/01/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], future)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
# allow_future for yesterday, next_day is today (#17192)
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
res = self.client.get('/dates/books/%s/allow_empty_and_future/'
% yesterday.strftime('%Y/%b/%d').lower())
self.assertEqual(res.context['next_day'], today)
def test_day_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
def test_next_prev_context(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.content, b"Archive for Oct. 1, 2008. Previous day is May 1, 2006")
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/')
self.assertEqual(res.status_code, 200)
def test_day_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/oct/no_day/')
self.assertEqual(res.status_code, 404)
def test_today_view(self):
res = self.client.get('/dates/books/today/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/today/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['day'], datetime.date.today())
def test_datetime_day_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_day_view(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 404)
class DateDetailViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
urls = 'generic_views.urls'
def test_date_detail_by_pk(self):
res = self.client.get('/dates/books/2008/oct/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=1))
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_date_detail_by_slug(self):
res = self.client.get('/dates/books/2006/may/01/byslug/dreaming-in-code/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(slug='dreaming-in-code'))
def test_date_detail_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
def test_date_detail_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", slug="new-new", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/new-new/' % urlbit)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/%s/allow_future/' % (urlbit, b.id))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], b)
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, "/dates/books/2008/oct/01/nopk/")
def test_get_object_custom_queryset(self):
"""
Ensure that custom querysets are used when provided to
BaseDateDetailView.get_object()
Refs #16918.
"""
res = self.client.get(
'/dates/books/get_object_custom_queryset/2006/may/01/2/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=2))
self.assertEqual(res.context['book'], Book.objects.get(pk=2))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
res = self.client.get(
'/dates/books/get_object_custom_queryset/2008/oct/01/1/')
self.assertEqual(res.status_code, 404)
def test_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 404)
|
|
"""
This module provides convenient functions to transform sympy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from __future__ import print_function, division
import inspect
import textwrap
from sympy.core.compatibility import (exec_, is_sequence, iterable,
NotIterable, string_types, range, builtins)
from sympy.utilities.decorator import doctest_depends_on
# These are the namespaces the lambda functions will use.
MATH = {}
MPMATH = {}
NUMPY = {}
SYMPY = {}
NUMEXPR = {}
# Default namespaces, letting us define translations that can't be defined
# by simple variable maps, like I => 1j
# These are separate from the names above because the above names are modified
# throughout this file, whereas these should remain unmodified.
MATH_DEFAULT = {}
MPMATH_DEFAULT = {}
NUMPY_DEFAULT = {"I": 1j}
SYMPY_DEFAULT = {}
NUMEXPR_DEFAULT = {}
# Mappings between sympy and other modules function names.
MATH_TRANSLATIONS = {
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
MPMATH_TRANSLATIONS = {
"Abs": "fabs",
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"MutableDenseMatrix": "matrix",
"ImmutableMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci"
}
NUMPY_TRANSLATIONS = {
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"E": "e",
"im": "imag",
"ln": "log",
"Mod": "mod",
"oo": "inf",
"re": "real",
"SparseMatrix": "array",
"ImmutableSparseMatrix": "array",
"Matrix": "array",
"MutableDenseMatrix": "array",
"ImmutableMatrix": "array",
"ImmutableDenseMatrix": "array",
}
NUMEXPR_TRANSLATIONS = {}
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import_module('numpy')",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
"numexpr" : (NUMEXPR, NUMEXPR_DEFAULT, NUMEXPR_TRANSLATIONS,
("import_module('numexpr')", )),
}
def _import(module, reload="False"):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy".
These dictionaries map names of python functions to their equivalent in
other modules.
"""
from sympy.external import import_module
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module can't be used for lambdification" % module)
# Clear namespace or exit
if namespace != namespace_default:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec_(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"can't import '%s' with '%s' command" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
# For computing the modulus of a sympy expression we use the builtin abs
# function, instead of the previously used fabs function for all
# translation modules. This is because the fabs function in the math
# module does not accept complex valued arguments. (see issue 9474). The
# only exception, where we don't use the builtin abs function is the
# mpmath translation module, because mpmath.fabs returns mpf objects in
# contrast to abs().
if 'Abs' not in namespace:
namespace['Abs'] = abs
@doctest_depends_on(modules=('numpy'))
def lambdify(args, expr, modules=None, printer=None, use_imps=True,
dummify=True):
"""
Returns a lambda function for fast calculation of numerical values.
If not specified differently by the user, SymPy functions are replaced as
far as possible by either python-math, numpy (if available) or mpmath
functions - exactly in this order. To change this behavior, the "modules"
argument can be used. It accepts:
- the strings "math", "mpmath", "numpy", "numexpr", "sympy"
- any modules (e.g. math)
- dictionaries that map names of sympy functions to arbitrary functions
- lists that contain a mix of the arguments above, with higher priority
given to entries appearing first.
The default behavior is to substitute all arguments in the provided
expression with dummy symbols. This allows for applied functions (e.g.
f(t)) to be supplied as arguments. Call the function with dummify=False if
dummy substitution is unwanted (and `args` is not a string). If you want
to view the lambdified function or provide "sympy" as the module, you
should probably set dummify=False.
For functions involving large array calculations, numexpr can provide a
significant speedup over numpy. Please note that the available functions
for numexpr are more limited than numpy but can be expanded with
implemented_function and user defined subclasses of Function. If specified,
numexpr may be the only option in modules. The official list of numexpr
functions can be found at:
https://github.com/pydata/numexpr#supported-functions
In previous releases ``lambdify`` replaced ``Matrix`` with ``numpy.matrix``
by default. As of release 1.0 ``numpy.array`` is the default.
To get the old default behavior you must pass in ``[{'ImmutableMatrix':
numpy.matrix}, 'numpy']`` to the ``modules`` kwarg.
>>> from sympy import lambdify, Matrix
>>> from sympy.abc import x, y
>>> import numpy
>>> array2mat = [{'ImmutableMatrix': numpy.matrix}, 'numpy']
>>> f = lambdify((x, y), Matrix([x, y]), modules=array2mat)
>>> f(1, 2)
matrix([[1],
[2]])
Usage
=====
(1) Use one of the provided modules:
>>> from sympy import sin, tan, gamma
>>> from sympy.utilities.lambdify import lambdastr
>>> from sympy.abc import x, y
>>> f = lambdify(x, sin(x), "math")
Attention: Functions that are not in the math module will throw a name
error when the lambda function is evaluated! So this would
be better:
>>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy"))
(2) Use some other module:
>>> import numpy
>>> f = lambdify((x,y), tan(x*y), numpy)
Attention: There are naming differences between numpy and sympy. So if
you simply take the numpy module, e.g. sympy.atan will not be
translated to numpy.arctan. Use the modified module instead
by passing the string "numpy":
>>> f = lambdify((x,y), tan(x*y), "numpy")
>>> f(1, 2)
-2.18503986326
>>> from numpy import array
>>> f(array([1, 2, 3]), array([2, 3, 5]))
[-2.18503986 -0.29100619 -0.8559934 ]
(3) Use a dictionary defining custom functions:
>>> def my_cool_function(x): return 'sin(%s) is cool' % x
>>> myfuncs = {"sin" : my_cool_function}
>>> f = lambdify(x, sin(x), myfuncs); f(1)
'sin(1) is cool'
Examples
========
>>> from sympy.utilities.lambdify import implemented_function
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function.:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
A more robust way of handling this is to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in `expr` can also carry their own numerical
implementations, in a callable attached to the ``_imp_``
attribute. Usually you attach this using the
``implemented_function`` factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
"""
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import flatten
# If the user hasn't specified any modules, use what is available.
module_provided = True
if modules is None:
module_provided = False
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
#Attempt to import numpy
try:
_import("numpy")
except ImportError:
pass
else:
modules.insert(1, "numpy")
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
# consistency check
if _module_present('numexpr', modules) and len(modules) > 1:
raise TypeError("numexpr must be the only item in 'modules'")
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
if _module_present('numpy',namespaces) and printer is None:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import NumPyPrinter as printer
if _module_present('numexpr',namespaces) and printer is None:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import NumExprPrinter as printer
# Get the names of the args, for creating a docstring
if not iterable(args):
args = (args,)
names = []
# Grab the callers frame, for getting the names by inspection (if needed)
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
for n, var in enumerate(args):
if hasattr(var, 'name'):
names.append(var.name)
else:
# It's an iterable. Try to get name by inspection of calling frame.
name_list = [var_name for var_name, var_val in callers_local_vars
if var_val is var]
if len(name_list) == 1:
names.append(name_list[0])
else:
# Cannot infer name with certainty. arg_# will have to do.
names.append('arg_' + str(n))
# Create lambda function.
lstr = lambdastr(args, expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
if flat in lstr:
namespace.update({flat: flatten})
# Provide lambda expression with builtins, and compatible implementation of range
namespace.update({'builtins':builtins, 'range':range})
func = eval(lstr, namespace)
# For numpy lambdify, wrap all input arguments in arrays.
# This is a fix for gh-11306.
if module_provided and _module_present('numpy',namespaces):
def array_wrap(funcarg):
def wrapper(*argsx, **kwargsx):
return funcarg(*[namespace['asarray'](i) for i in argsx], **kwargsx)
return wrapper
func = array_wrap(func)
# Apply the docstring
sig = "func({0})".format(", ".join(str(i) for i in names))
sig = textwrap.fill(sig, subsequent_indent=' '*8)
expr_str = str(expr)
if len(expr_str) > 78:
expr_str = textwrap.wrap(expr_str, 75)[0] + '...'
func.__doc__ = ("Created with lambdify. Signature:\n\n{sig}\n\n"
"Expression:\n\n{expr}").format(sig=sig, expr=expr_str)
return func
def _module_present(modname, modlist):
if modname in modlist:
return True
for m in modlist:
if hasattr(m, '__name__') and m.__name__ == modname:
return True
return False
def _get_namespace(m):
"""
This is used by _lambdify to parse its arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def lambdastr(args, expr, printer=None, dummify=False):
"""
Returns a string that can be evaluated to a lambda function.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.lambdify import lambdastr
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
Although tuples may not appear as arguments to lambda in Python 3,
lambdastr will create a lambda function that will unpack the original
arguments so that nested arguments can be handled:
>>> lambdastr((x, (y, z)), x + y)
'lambda _0,_1: (lambda x,y,z: (x + y))(*list(__flatten_args__([_0,_1])))'
"""
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy import Dummy, sympify, Symbol, Function, flatten
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
#Sub in dummy variables for functions or symbols
if isinstance(args, (Function, Symbol)):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
try:
expr = sympify(expr).xreplace(dummies_dict)
except Exception:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [sub_expr(sympify(a), dummies_dict) for a in expr.keys()]
v = [sub_expr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(sub_expr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [sub_expr(sympify(a), dummies_dict) for a in expr]
return expr
# Transform args
def isiter(l):
return iterable(l, exclude=(str, DeferredVector, NotIterable))
if isiter(args) and any(isiter(i) for i in args):
from sympy.utilities.iterables import flatten
import re
dum_args = [str(Dummy(str(i))) for i in range(len(args))]
iter_args = ','.join([i if isiter(a) else i
for i, a in zip(dum_args, args)])
lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
rv = 'lambda %s: (%s)(*list(%s([%s])))' % (
','.join(dum_args), lstr, flat, iter_args)
if len(re.findall(r'\b%s\b' % flat, rv)) > 1:
raise ValueError('the name %s is reserved by lambdastr' % flat)
return rv
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = lambdarepr(expr)
return "lambda %s: (%s)" % (args, expr)
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as `expr`. Examples
include sympy expressions, as well as tuples, lists and dicts that may
contain sympy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dict with keys of implemented function names within `expr` and
corresponding values being the numerical implementation of
function
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function, _imp_namespace
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> g = implemented_function(Function('g'), lambda x: x*10)
>>> namespace = _imp_namespace(f(g(x)))
>>> sorted(namespace.keys())
['f', 'g']
"""
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# sympy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
""" Add numerical ``implementation`` to function ``symfunc``.
``symfunc`` can be an ``UndefinedFunction`` instance, or a name string.
In the latter case we create an ``UndefinedFunction`` instance with that
name.
Be aware that this is a quick workaround, not a general method to create
special symbolic functions. If you want to create a symbolic function to be
used by all the machinery of SymPy you should subclass the ``Function``
class.
Parameters
----------
symfunc : ``str`` or ``UndefinedFunction`` instance
If ``str``, then create new ``UndefinedFunction`` with this as
name. If `symfunc` is a sympy function, attach implementation to it.
implementation : callable
numerical implementation to be called by ``evalf()`` or ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
function with attached implementation
Examples
========
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import lambdify, implemented_function
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> lam_f = lambdify(x, f(x))
>>> lam_f(4)
5
"""
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
if isinstance(symfunc, string_types):
symfunc = UndefinedFunction(symfunc)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError('symfunc should be either a string or'
' an UndefinedFunction instance.')
# We need to attach as a method because symfunc will be a class
symfunc._imp_ = staticmethod(implementation)
return symfunc
|
|
#!/usr/bin/python
# coding=utf8
# Copyright 2013 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FieldDB Client --- functionality for connecting to FieldDB web services.
This module may be useful for developers or for people just wanting to get their
FieldDB data into Python. It's also good for understanding how to use the FieldDB
and CouchDB APIs.
"""
import requests
import pprint
import simplejson as json
import uuid
import copy
import optparse
# For logging HTTP requests & responses
import logging
try:
import http.client as http_client
except ImportError:
import httplib as http_client # Python 2
# Stop the Certificate warnings with `verify=False`
requests.packages.urllib3.disable_warnings()
p = pprint.pprint
def verbose():
"""Call this to spit the HTTP requests/responses to stdout.
From http://stackoverflow.com/questions/10588644/how-can-i-see-the-entire-http-request-thats-being-sent-by-my-python-application
I don't know how it works or how to turn it off once it's called ...
"""
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
class FieldDBClient(object):
"""Create a FieldDB instance to connect to live FieldDB web services.
Basically this is just some FieldDB-specific conveniences wrapped around
a Python `requests.Session` instance.
"""
def __init__(self, options):
"""Options is a dict of options, or a string path to a JSON object/dict.
"""
if type(options) is str:
options = json.load(open(options, 'rb'))
self._process_options(options)
self.session = requests.Session()
self.session.verify = False # https without certificates, wild!
self.session.headers.update({'Content-Type': 'application/json'})
def _process_options(self, options):
self.auth_protocol = options.get('auth_protocol', 'https')
self.auth_host = options.get('auth_host', 'localhost')
self.auth_port = options.get('auth_port', '')
# self.auth_port = options.get('auth_port', '3183')
self.corpus_protocol = options.get('corpus_protocol', 'http')
self.corpus_host = options.get('corpus_host', '127.0.0.1')
self.corpus_port = options.get('corpus_port ', '')
# self.corpus_port = options.get('corpus_port ', '9292')
self.couch_protocol = options.get('couch_protocol', 'http')
self.couch_host = options.get('couch_host', 'localhost')
self.couch_port = options.get('couch_port ', '')
# self.couch_port = options.get('couch_port ', '5984')
self.username = options.get('username', 'someusername')
self.password = options.get('password', 'somesecret')
self.admin_username = options.get('admin_username', 'someusername')
self.admin_password = options.get('admin_password', 'somesecret')
self.server_code = options.get('server_code', 'local')
self.app_version_when_created = options.get('app_version_when_created',
'unknown')
# URL getters
############################################################################
def _get_url(self, protocol, host, port):
return '%s://%s:%s' % (protocol, host, port)
def _get_url_cred(self, protocol, host, port):
return '%s://%s:%s@%s:%s' % (protocol, self.username, self.password,
host, port)
def get_auth_url(self):
return self._get_url(self.auth_protocol, self.auth_host, self.auth_port)
def get_corpus_url(self):
return self._get_url(self.corpus_protocol, self.corpus_host,
self.corpus_port)
def get_couch_url(self):
return self._get_url(self.couch_protocol, self.couch_host,
self.couch_port)
def get_auth_url_cred(self):
return self._get_url_cred(self.auth_protocol, self.auth_host,
self.auth_port)
def get_corpus_url_cred(self):
return self._get_url_cred(self.corpus_protocol, self.corpus_host,
self.corpus_port)
# General methods
############################################################################
def get_uuid(self):
return uuid.uuid4().hex
# Authentication Web Service
############################################################################
#
# Here is the API (see AuthenticationWebService/service.js):
#
# POST /login (attempt a login)
# POST /register (create a new user)
# POST /newcorpus
# POST /changepassword
# POST /corpusteam (list of team members on a corpus)
# POST /addroletouser
# POST /updateroles
def login(self):
"""Login to the FieldDB Authentication web service.
"""
response = self.session.post(
'%s/login' % self.get_auth_url(),
data=json.dumps({
'username': self.username,
'password': self.password}))
rjson = response.json()
if rjson.has_key('user'):
self.user = rjson['user']
self.cookies = response.cookies
return rjson
def register(self, username, password, email):
"""Register a new user via the FieldDB Authentication web service.
..note::
It is not clear to me if the appVersionWhenCreated param is
important.
"""
return self.session.post(
'%s/register' % self.get_auth_url(),
data=json.dumps({
'username': username,
'password': password,
'email': email,
'serverCode': self.server_code,
'authUrl': self.get_auth_url(),
'appVersionWhenCreated': self.app_version_when_created
})).json()
def new_corpus(self, new_corpus_name):
"""Create a new FieldDB corpus via the FieldDB Authentication web
service.
POST /newcorpus with a JSON object payload.
If successful, `response['corpusadded'] is True`
If unsuccessful:
{u'corpusadded': True,
u'info': [u'User details saved.'],
u'userFriendlyErrors': [u'There was an error creating your corpus.
Blackfoot']}
"""
return self.session.post(
'%s/newcorpus' % self.get_auth_url(),
data=json.dumps({
'newCorpusName': new_corpus_name,
'username': self.username,
'password': self.password,
'serverCode': self.server_code,
'authUrl': self.get_auth_url(),
'appVersionWhenCreated': self.app_version_when_created
})).json()
# Corpus Web Service
############################################################################
# Direct CouchDB Requests
############################################################################
def login_couchdb(self):
"""Login via the CouchDB HTTP API using the admin user.
"""
return self.session.post(
'%s/_session' % self.get_couch_url(),
data=json.dumps({
'name': self.admin_username,
'password': self.admin_password})).json()
def get_greeting(self):
return self.session.get(self.get_couch_url()).json()
def get_database_list(self):
url = '%s/_all_dbs' % self.get_couch_url()
return self.session.get(url).json()
def get_users(self):
return self.get_all_docs_list('zfielddbuserscouch')
def get__users(self):
return self.get_all_docs_list('_users')
def get_usernames(self):
"""Use the CouchDB API to get the usernames of the user documents in
zfielddbuserscouch.
"""
return [u['doc']['username'] for u
in self.get_users()['rows']
if u['doc'].has_key('username')]
def get__usernames(self):
"""Use the CouchDB API to get the usernames of the user documents in
_users.
.. note::
This assumes that the id is something like
'org.couchdb.user:username'.
"""
return [u['doc']['_id'].split(':')[1] for u in
self.get__users()['rows']
if u['doc'].get('type') == 'user']
def delete_user_and_corpora(self, username):
"""Use the CouchDB API to delete a FieldDB user.
..warning::
This involves deleting the user's documents from both of the users
databases as well as deleting the users corpora and activity feed
databases. I do not know if it should involve the deletion of other
data as well.
..warning::
This is just for testing. If you delete a database (=corpus) that
another user has access to and you don't alter that other user's
roles accordingly, the database will be in an inconsistent state.
"""
_users_db = '_users'
users_db = 'zfielddbuserscouch'
dbs_to_delete = [db_name for db_name in self.get_database_list() if
db_name.startswith('%s-' % username)]
for db in dbs_to_delete:
delete_db_resp = self.delete_database(db)
if delete_db_resp.get('ok') is True:
print '... Deleted database "%s".' % db
user = self.get_document(users_db, username)
user_id = user.get('_id')
user_rev = user.get('_rev')
_user = self.get_document(_users_db, 'org.couchdb.user:%s' % username)
_user_id = _user.get('_id')
_user_rev = _user.get('_rev')
if user_id:
r = self.delete_document(users_db, user_id, user_rev)
if r.get('ok') is True:
print '... Deleted user "%s".' % user_id
if _user_id:
r = self.delete_document(_users_db, _user_id, _user_rev)
if r.get('ok') is True:
print '... Deleted user "%s".' % _user_id
def create_database(self, database_name):
"""Only CouchDB admins can create databases.
"""
url = '%s/%s' % (self.get_couch_url(), database_name)
return self.session.put(url).json()
def delete_database(self, database_name):
#url = '%s/%s' % (self.get_couch_url_cred(), database_name)
url = '%s/%s' % (self.get_couch_url(), database_name)
return self.session.delete(url).json()
def replicate_database(self, source_name, target_name):
url = '%s/_replicate' % self.get_couch_url()
payload=json.dumps({
'source': source_name,
'target': target_name,
'create_target': True})
return self.session.post(
url,
data=payload,
headers={'content-type': 'application/json'}).json()
# Documents
############################################################################
def create_document(self, database_name, document):
document = json.dumps(document)
url = '%s/%s' % (self.get_couch_url(), database_name)
return self.session.post(
url,
data=document,
headers = {'content-type': 'application/json'}).json()
def get_document(self, database_name, document_id):
url = '%s/%s/%s' % (self.get_couch_url(), database_name, document_id)
return self.session.get(url).json()
def get_all_docs_list(self, database_name):
url = '%s/%s/_all_docs' % (self.get_couch_url(), database_name)
return self.session.get(url, params={'include_docs': 'true'}).json()
def update_document(self, database_name, document_id, document_rev,
new_document):
url = '%s/%s/%s' % (self.get_couch_url(), database_name, document_id)
new_document['_rev'] = document_rev
return self.session.put(url,
data=json.dumps(new_document),
headers = {'content-type': 'application/json'}).json()
def delete_document(self, database_name, document_id, document_rev):
url = '%s/%s/%s?rev=%s' % (self.get_couch_url(), database_name,
document_id, document_rev)
return self.session.delete(url).json()
class FieldDBClientTester(object):
"""Class with a `test` method that has a bunch of `assert` statements that
make sure that a FieldDBClient instance is behaving as we expect it to. Most
of this is straight out of http://guide.couchdb.org/
Usage::
>>> tester = FieldDBClientTester(fielddb_client)
>>> tester.test()
"""
def __init__(self, fielddb_instance, database_name='fruits',
database_clone_name='fruits_clone'):
self.fielddb = fielddb_instance
self.database_name = database_name
self.database_clone_name = database_clone_name
fruits = {
"orange": {
"item" : "orange",
"prices" : {
"Fresh Mart" : 1.99,
"Price Max" : 3.19,
"Citrus Circus" : 1.09
}
},
"apple": {
"item" : "apple",
"prices" : {
"Fresh Mart" : 1.59,
"Price Max" : 5.99,
"Apples Express" : 0.79
}
},
"banana": {
"item" : "banana",
"prices" : {
"Fresh Mart" : 1.99,
"Price Max" : 0.79,
"Banana Montana" : 4.22
}
}
}
def clean_up_couch(self):
"""Clean up the couch by deleting the databases we've created.
"""
database_list = self.fielddb.get_database_list()
if self.database_name in database_list:
self.fielddb.delete_database(self.database_name)
print '... Deleted database "%s".' % self.database_name
if self.database_clone_name in database_list:
self.fielddb.delete_database(self.database_clone_name)
print '... Deleted database "%s".' % self.database_clone_name
def test(self):
"""Run some tests by making requests to the Auth service, the Corpus
service, and the CouchDB API and verifying that these behave as
expected. The tests are just simple `assert` statements.
"""
user_to_add_username = 'devlocal'
user_to_add_password = 'devlocal'
user_to_add_email = '[email protected]'
temporary_user_username = 'temporary'
temporary_user_password = 'temporary'
temporary_user_email = '[email protected]'
print '\nTesting the FieldDB client.'
# Clean Up.
self.clean_up_couch()
# Login to the Authentication web service.
login_resp = self.fielddb.login()
assert login_resp.has_key('user')
assert login_resp['user']['username'] == self.fielddb.username
print '... Logged in to Authentication web service as "%s".' % \
self.fielddb.username
# Login to CouchDB with the admin account.
couchdb_login_resp = self.fielddb.login_couchdb()
assert couchdb_login_resp['ok'] is True
print '... Logged in to CouchDB as "%s".' % self.fielddb.admin_username
# Get users.
users_list = self.fielddb.get_usernames()
assert type(users_list) == type([])
print '... Got users list.'
# Create devlocal user if it doesn't exist.
if user_to_add_username not in users_list:
self.fielddb.register(user_to_add_username, user_to_add_password,
user_to_add_email)
print '... Registered user "%s".' % user_to_add_username
else:
print '... User "%s" is already registered.' % user_to_add_username
# Create temporary user.
register_request = self.fielddb.register(temporary_user_username,
temporary_user_password, temporary_user_email)
if register_request.get('userFriendlyErrors'):
print '... User "%s" is already registered.' % temporary_user_username
else:
print '... Registered user "%s".' % temporary_user_username
# Delete temporary user.
self.fielddb.delete_user_and_corpora(temporary_user_username)
print '... Deleted user "%s" and its corpora/databases.' % \
temporary_user_username
# Get the CouchDB greeting.
greeting = self.fielddb.get_greeting()
assert greeting.has_key('couchdb')
print '... Got CouchDB greeting.'
# Get the database list.
database_list = self.fielddb.get_database_list()
assert type(database_list) is type([])
print '... Got database list.'
# Create a FieldDB corpus via the auth service
new_corpus_name = 'Blackfoot'
r = self.fielddb.new_corpus(new_corpus_name)
if r.get('userFriendlyErrors'):
print '... Corpus "%s" already exists.' % new_corpus_name
else:
print '... Corpus "%s" created.' % new_corpus_name
# Create a CouchDB database.
if self.database_name not in database_list:
create_response = self.fielddb.create_database(self.database_name)
try:
assert create_response['ok'] is True
except:
pprint.pprint(create_response)
print '... Created database "%s".' % self.database_name
else:
print '... Database "%s" already exists.' % self.database_name
# Create documents.
apple_create_response = self.fielddb.create_document(self.database_name,
self.fruits['apple'])
orange_create_response = self.fielddb.create_document(self.database_name,
self.fruits['orange'])
banana_create_response = self.fielddb.create_document(self.database_name,
self.fruits['banana'])
apple_id = apple_create_response['id']
orange_id = apple_create_response['id']
banana_id = apple_create_response['id']
assert apple_create_response['ok'] is True
assert orange_create_response['ok'] is True
assert banana_create_response['ok'] is True
assert type(apple_id) is unicode # id is a UUID, e.g., u'59da119f7911695425ab79f8a7060709'}
assert len(apple_id) is 32
print '... Created apple, orange, and banana documents.'
# Get a document.
banana = self.fielddb.get_document(self.database_name, banana_id)
assert banana.has_key('_id')
assert banana['_id'] == banana_id
assert banana.has_key('_rev')
assert banana['_rev'][0] == u'1'
assert banana.has_key('item')
assert type(banana['prices']) is dict
print '... Retrieved the banana document.'
# Update a document.
new_banana = copy.deepcopy(self.fruits['banana'])
new_banana['foo'] = 'bar'
new_banana['item'] = 'waaaaanana'
update_response = self.fielddb.update_document(self.database_name,
banana['_id'], banana['_rev'], new_banana)
assert update_response['rev'][0] == u'2'
assert update_response['ok'] is True
assert update_response['id'] == banana_id
print '... Updated the banana document.'
# Get an updated document.
new_banana = self.fielddb.get_document(self.database_name, banana['_id'])
assert new_banana['_id'] == banana_id
assert new_banana['item'] == u'waaaaanana'
print '... Retrieved the updated banana.'
# Replicate a database.
replicate_response = self.fielddb.replicate_database(self.database_name,
self.database_clone_name)
new_database_list = self.fielddb.get_database_list()
assert len(new_database_list) == len(database_list) + 2
print '... Replicated database "%s".' % self.database_name
# Get all documents in a database
all_docs_list = self.fielddb.get_all_docs_list(self.database_name)
assert len(all_docs_list) == 3
print '... Got the three fruit documents in the database.'
# Design Documents
########################################################################
# Create a design document.
data = {
"_id": "_design/example",
"views": {
"foo": {
"map": "function(doc){emit(doc._id, doc._rev)}"
},
"add_syntactic_category": {
"map": open('views/add_syntactic_category/map.js',
'r').read()
}
}
}
dd_create_response = self.fielddb.create_document(self.database_name,
data)
assert dd_create_response['id'] == u'_design/example'
assert dd_create_response['rev'][0] == u'1'
print '... Created a design document.'
# Get the first design document.
view = self.fielddb.get_document(self.database_name,
'_design/example/_view/foo')
assert view.has_key('rows')
print '... Got design document "foo".'
# Get the second design document.
view = self.fielddb.get_document(self.database_name,
'_design/example/_view/add_syntactic_category')
assert view.has_key('rows')
print '... Got design document "add_syntactic_category".'
# Clean Up.
self.clean_up_couch()
print 'Testing complete.'
print
def add_optparser_options(parser):
"""Adds options to the optparser parser.
"""
parser.add_option("-d", "--delete", default=None, metavar="USERNAME",
help="username of a FieldDB user to be deleted along with all of their "
"databases")
if __name__ == '__main__':
"""Use this module as a command-line utility. Basic usage is::
$ ./fielddb-client.py config.json
where `config.json is a JSON config file containing an object with the
following attributes, the values of which are all strings::
auth_protocol
auth_host
auth_port
corpus_protocol
corpus_host
corpus_port
couch_protocol
couch_host
couch_port
username
password
admin_username
admin_password
"""
parser = optparse.OptionParser()
add_optparser_options(parser)
(options, args) = parser.parse_args()
config_path = args[0] # required first argument
fielddb_client = FieldDBClient(config_path)
if getattr(options, 'delete', False):
print 'Deleting user %s and all of their database.' % options.delete
fielddb_client.login()
fielddb_client.login_couchdb()
fielddb_client.delete_user_and_corpora(options.delete)
else:
# Default behaviour is to run some tests.
tester = FieldDBClientTester(fielddb_client)
tester.test()
|
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import base64
import json
import re
from azurelinuxagent.common.utils.textutil import parse_doc, find, findall
from tests.protocol.HttpRequestPredicates import HttpRequestPredicates
from tests.tools import load_bin_data, load_data, MagicMock, Mock
from azurelinuxagent.common.protocol.imds import IMDS_ENDPOINT
from azurelinuxagent.common.exception import HttpError, ResourceGoneError
from azurelinuxagent.common.future import httpclient
from azurelinuxagent.common.utils.cryptutil import CryptUtil
DATA_FILE = {
"version_info": "wire/version_info.xml",
"goal_state": "wire/goal_state.xml",
"hosting_env": "wire/hosting_env.xml",
"shared_config": "wire/shared_config.xml",
"certs": "wire/certs.xml",
"ext_conf": "wire/ext_conf.xml",
"manifest": "wire/manifest.xml",
"ga_manifest": "wire/ga_manifest.xml",
"trans_prv": "wire/trans_prv",
"trans_cert": "wire/trans_cert",
"test_ext": "ext/sample_ext-1.3.0.zip",
"imds_info": "imds/valid.json",
"remote_access": None,
"in_vm_artifacts_profile": None,
"vm_settings": None,
"ETag": None
}
DATA_FILE_IN_VM_ARTIFACTS_PROFILE = DATA_FILE.copy()
DATA_FILE_IN_VM_ARTIFACTS_PROFILE["ext_conf"] = "wire/ext_conf_in_vm_artifacts_profile.xml"
DATA_FILE_IN_VM_ARTIFACTS_PROFILE["in_vm_artifacts_profile"] = "wire/in_vm_artifacts_profile.json"
DATA_FILE_IN_VM_META_DATA = DATA_FILE.copy()
DATA_FILE_IN_VM_META_DATA["ext_conf"] = "wire/ext_conf_in_vm_metadata.xml"
DATA_FILE_INVALID_VM_META_DATA = DATA_FILE.copy()
DATA_FILE_INVALID_VM_META_DATA["ext_conf"] = "wire/ext_conf_invalid_vm_metadata.xml"
DATA_FILE_NO_EXT = DATA_FILE.copy()
DATA_FILE_NO_EXT["ext_conf"] = "wire/ext_conf_no_extensions-block_blob.xml"
DATA_FILE_NOOP_GS = DATA_FILE.copy()
DATA_FILE_NOOP_GS["goal_state"] = "wire/goal_state_noop.xml"
DATA_FILE_NOOP_GS["ext_conf"] = None
DATA_FILE_EXT_NO_SETTINGS = DATA_FILE.copy()
DATA_FILE_EXT_NO_SETTINGS["ext_conf"] = "wire/ext_conf_no_settings.xml"
DATA_FILE_EXT_NO_PUBLIC = DATA_FILE.copy()
DATA_FILE_EXT_NO_PUBLIC["ext_conf"] = "wire/ext_conf_no_public.xml"
DATA_FILE_EXT_AUTOUPGRADE = DATA_FILE.copy()
DATA_FILE_EXT_AUTOUPGRADE["ext_conf"] = "wire/ext_conf_autoupgrade.xml"
DATA_FILE_EXT_INTERNALVERSION = DATA_FILE.copy()
DATA_FILE_EXT_INTERNALVERSION["ext_conf"] = "wire/ext_conf_internalversion.xml"
DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION = DATA_FILE.copy()
DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION["ext_conf"] = "wire/ext_conf_autoupgrade_internalversion.xml"
DATA_FILE_EXT_ROLLINGUPGRADE = DATA_FILE.copy()
DATA_FILE_EXT_ROLLINGUPGRADE["ext_conf"] = "wire/ext_conf_upgradeguid.xml"
DATA_FILE_EXT_SEQUENCING = DATA_FILE.copy()
DATA_FILE_EXT_SEQUENCING["ext_conf"] = "wire/ext_conf_sequencing.xml"
DATA_FILE_EXT_ADDITIONAL_LOCATIONS = DATA_FILE.copy()
DATA_FILE_EXT_ADDITIONAL_LOCATIONS["ext_conf"] = "wire/ext_conf_additional_locations.xml"
DATA_FILE_EXT_DELETION = DATA_FILE.copy()
DATA_FILE_EXT_DELETION["manifest"] = "wire/manifest_deletion.xml"
DATA_FILE_EXT_SINGLE = DATA_FILE.copy()
DATA_FILE_EXT_SINGLE["manifest"] = "wire/manifest_deletion.xml"
DATA_FILE_MULTIPLE_EXT = DATA_FILE.copy()
DATA_FILE_MULTIPLE_EXT["ext_conf"] = "wire/ext_conf_multiple_extensions.xml"
DATA_FILE_CASE_MISMATCH_EXT = DATA_FILE.copy()
DATA_FILE_CASE_MISMATCH_EXT["ext_conf"] = "wire/ext_conf_settings_case_mismatch.xml"
DATA_FILE_NO_CERT_FORMAT = DATA_FILE.copy()
DATA_FILE_NO_CERT_FORMAT["certs"] = "wire/certs_no_format_specified.xml"
DATA_FILE_CERT_FORMAT_NOT_PFX = DATA_FILE.copy()
DATA_FILE_CERT_FORMAT_NOT_PFX["certs"] = "wire/certs_format_not_pfx.xml"
DATA_FILE_REMOTE_ACCESS = DATA_FILE.copy()
DATA_FILE_REMOTE_ACCESS["goal_state"] = "wire/goal_state_remote_access.xml"
DATA_FILE_REMOTE_ACCESS["remote_access"] = "wire/remote_access_single_account.xml"
DATA_FILE_PLUGIN_SETTINGS_MISMATCH = DATA_FILE.copy()
DATA_FILE_PLUGIN_SETTINGS_MISMATCH["ext_conf"] = "wire/invalid_config/ext_conf_plugin_settings_version_mismatch.xml"
DATA_FILE_REQUIRED_FEATURES = DATA_FILE.copy()
DATA_FILE_REQUIRED_FEATURES["ext_conf"] = "wire/ext_conf_required_features.xml"
DATA_FILE_VM_SETTINGS = DATA_FILE.copy()
DATA_FILE_VM_SETTINGS["vm_settings"] = "hostgaplugin/vm_settings.json"
DATA_FILE_VM_SETTINGS["ETag"] ="1"
DATA_FILE_VM_SETTINGS["ext_conf"] = "hostgaplugin/ext_conf.xml"
DATA_FILE_VM_SETTINGS["in_vm_artifacts_profile"] = "hostgaplugin/in_vm_artifacts_profile.json"
class WireProtocolData(object):
def __init__(self, data_files=None):
if data_files is None:
data_files = DATA_FILE
self.emulate_stale_goal_state = False
self.call_counts = {
"comp=versions": 0,
"/versions": 0,
"/health": 0,
"/HealthService": 0,
"/vmAgentLog": 0,
"goalstate": 0,
"hostingenvuri": 0,
"sharedconfiguri": 0,
"certificatesuri": 0,
"extensionsconfiguri": 0,
"remoteaccessinfouri": 0,
"extensionArtifact": 0,
"agentArtifact": 0,
"manifest.xml": 0,
"manifest_of_ga.xml": 0,
"ExampleHandlerLinux": 0,
"in_vm_artifacts_profile": 0,
"vm_settings": 0
}
self.status_blobs = []
self.data_files = data_files
self.version_info = None
self.goal_state = None
self.hosting_env = None
self.shared_config = None
self.certs = None
self.ext_conf = None
self.manifest = None
self.ga_manifest = None
self.trans_prv = None
self.trans_cert = None
self.ext = None
self.remote_access = None
self.in_vm_artifacts_profile = None
self.vm_settings = None
self.etag = None
self.imds_info = None
self.reload()
def reload(self):
self.version_info = load_data(self.data_files.get("version_info"))
self.goal_state = load_data(self.data_files.get("goal_state"))
self.hosting_env = load_data(self.data_files.get("hosting_env"))
self.shared_config = load_data(self.data_files.get("shared_config"))
self.certs = load_data(self.data_files.get("certs"))
self.ext_conf = self.data_files.get("ext_conf")
if self.ext_conf is not None:
self.ext_conf = load_data(self.ext_conf)
self.manifest = load_data(self.data_files.get("manifest"))
self.ga_manifest = load_data(self.data_files.get("ga_manifest"))
self.trans_prv = load_data(self.data_files.get("trans_prv"))
self.trans_cert = load_data(self.data_files.get("trans_cert"))
self.imds_info = json.loads(load_data(self.data_files.get("imds_info")))
self.ext = load_bin_data(self.data_files.get("test_ext"))
vm_settings = self.data_files.get("vm_settings")
if vm_settings is not None:
self.vm_settings = load_data(self.data_files.get("vm_settings"))
self.etag = self.data_files.get("ETag")
remote_access_data_file = self.data_files.get("remote_access")
if remote_access_data_file is not None:
self.remote_access = load_data(remote_access_data_file)
in_vm_artifacts_profile_file = self.data_files.get("in_vm_artifacts_profile")
if in_vm_artifacts_profile_file is not None:
self.in_vm_artifacts_profile = load_data(in_vm_artifacts_profile_file)
def mock_http_get(self, url, *_, **kwargs):
content = ''
response_headers = []
resp = MagicMock()
resp.status = httpclient.OK
if "comp=versions" in url: # wire server versions
content = self.version_info
self.call_counts["comp=versions"] += 1
elif "/versions" in url: # HostPlugin versions
content = '["2015-09-01"]'
self.call_counts["/versions"] += 1
elif url.endswith("/health"): # HostPlugin health
content = ''
self.call_counts["/health"] += 1
elif "goalstate" in url:
content = self.goal_state
self.call_counts["goalstate"] += 1
elif "hostingenvuri" in url:
content = self.hosting_env
self.call_counts["hostingenvuri"] += 1
elif "sharedconfiguri" in url:
content = self.shared_config
self.call_counts["sharedconfiguri"] += 1
elif "certificatesuri" in url:
content = self.certs
self.call_counts["certificatesuri"] += 1
elif "extensionsconfiguri" in url:
content = self.ext_conf
self.call_counts["extensionsconfiguri"] += 1
elif "remoteaccessinfouri" in url:
content = self.remote_access
self.call_counts["remoteaccessinfouri"] += 1
elif ".vmSettings" in url or ".settings" in url:
content = self.in_vm_artifacts_profile
self.call_counts["in_vm_artifacts_profile"] += 1
elif "/vmSettings" in url:
if self.vm_settings is None:
resp.status = httpclient.NOT_FOUND
else:
content = self.vm_settings
response_headers = [('ETag', self.etag)]
self.call_counts["vm_settings"] += 1
elif '{0}/metadata/compute'.format(IMDS_ENDPOINT) in url:
content = json.dumps(self.imds_info.get("compute", "{}"))
else:
# A stale GoalState results in a 400 from the HostPlugin
# for which the HTTP handler in restutil raises ResourceGoneError
if self.emulate_stale_goal_state:
if "extensionArtifact" in url:
self.emulate_stale_goal_state = False
self.call_counts["extensionArtifact"] += 1
raise ResourceGoneError()
else:
raise HttpError()
# For HostPlugin requests, replace the URL with that passed
# via the x-ms-artifact-location header
if "extensionArtifact" in url:
self.call_counts["extensionArtifact"] += 1
if "headers" not in kwargs:
raise ValueError("HostPlugin request is missing the HTTP headers: {0}", kwargs) # pylint: disable=raising-format-tuple
if "x-ms-artifact-location" not in kwargs["headers"]:
raise ValueError("HostPlugin request is missing the x-ms-artifact-location header: {0}", kwargs) # pylint: disable=raising-format-tuple
url = kwargs["headers"]["x-ms-artifact-location"]
if "manifest.xml" in url:
content = self.manifest
self.call_counts["manifest.xml"] += 1
elif HttpRequestPredicates.is_ga_manifest_request(url):
content = self.ga_manifest
self.call_counts["manifest_of_ga.xml"] += 1
elif "ExampleHandlerLinux" in url:
content = self.ext
self.call_counts["ExampleHandlerLinux"] += 1
resp.read = Mock(return_value=content)
return resp
elif ".vmSettings" in url or ".settings" in url:
content = self.in_vm_artifacts_profile
self.call_counts["in_vm_artifacts_profile"] += 1
else:
raise NotImplementedError(url)
resp.read = Mock(return_value=content.encode("utf-8"))
resp.getheaders = Mock(return_value=response_headers)
return resp
def mock_http_post(self, url, *_, **__):
content = None
resp = MagicMock()
resp.status = httpclient.OK
if url.endswith('/HealthService'):
self.call_counts['/HealthService'] += 1
content = ''
else:
raise NotImplementedError(url)
resp.read = Mock(return_value=content.encode("utf-8"))
return resp
def mock_http_put(self, url, data, **_):
content = ''
resp = MagicMock()
resp.status = httpclient.OK
if url.endswith('/vmAgentLog'):
self.call_counts['/vmAgentLog'] += 1
elif HttpRequestPredicates.is_storage_status_request(url):
self.status_blobs.append(data)
elif HttpRequestPredicates.is_host_plugin_status_request(url):
self.status_blobs.append(WireProtocolData.get_status_blob_from_hostgaplugin_put_status_request(content))
else:
raise NotImplementedError(url)
resp.read = Mock(return_value=content.encode("utf-8"))
return resp
def mock_crypt_util(self, *args, **kw):
# Partially patch instance method of class CryptUtil
cryptutil = CryptUtil(*args, **kw)
cryptutil.gen_transport_cert = Mock(side_effect=self.mock_gen_trans_cert)
return cryptutil
def mock_gen_trans_cert(self, trans_prv_file, trans_cert_file):
with open(trans_prv_file, 'w+') as prv_file:
prv_file.write(self.trans_prv)
with open(trans_cert_file, 'w+') as cert_file:
cert_file.write(self.trans_cert)
@staticmethod
def get_status_blob_from_hostgaplugin_put_status_request(data):
status_object = json.loads(data)
content = status_object["content"]
return base64.b64decode(content)
def get_no_of_plugins_in_extension_config(self):
if self.ext_conf is None:
return 0
ext_config_doc = parse_doc(self.ext_conf)
plugins_list = find(ext_config_doc, "Plugins")
return len(findall(plugins_list, "Plugin"))
def get_no_of_extensions_in_config(self):
if self.ext_conf is None:
return 0
ext_config_doc = parse_doc(self.ext_conf)
plugin_settings = find(ext_config_doc, "PluginSettings")
return len(findall(plugin_settings, "ExtensionRuntimeSettings")) + len(
findall(plugin_settings, "RuntimeSettings"))
#
# Having trouble reading the regular expressions below? you are not alone!
#
# For the use of "(?<=" "(?=" see 7.2.1 in https://docs.python.org/3.1/library/re.html
# For the use of "\g<1>" see backreferences in https://docs.python.org/3.1/library/re.html#re.sub
#
# Note that these regular expressions are not enough to parse all valid XML documents (e.g. they do
# not account for metacharacters like < or > in the values) but they are good enough for the test
# data. There are some basic checks, but the functions may not match valid XML or produce invalid
# XML if their input is too complex.
#
@staticmethod
def replace_xml_element_value(xml_document, element_name, element_value):
new_xml_document = re.sub(r'(?<=<{0}>).+(?=</{0}>)'.format(element_name), element_value, xml_document)
if new_xml_document == xml_document:
raise Exception("Could not match element '{0}'", element_name) # pylint: disable=raising-format-tuple
return new_xml_document
@staticmethod
def replace_xml_attribute_value(xml_document, element_name, attribute_name, attribute_value):
new_xml_document = re.sub(r'(?<=<{0} )(.*{1}=")[^"]+(?="[^>]*>)'.format(element_name, attribute_name), r'\g<1>{0}'.format(attribute_value), xml_document)
if new_xml_document == xml_document:
raise Exception("Could not match attribute '{0}' of element '{1}'".format(attribute_name, element_name))
return new_xml_document
def set_etag(self, etag):
'''
Sets the ETag for the mock response
'''
self.etag = etag
def set_incarnation(self, incarnation):
'''
Sets the incarnation in the goal state, but not on its subcomponents (e.g. hosting env, shared config)
'''
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "Incarnation", str(incarnation))
def set_container_id(self, container_id):
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "ContainerId", container_id)
def set_role_config_name(self, role_config_name):
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "ConfigName", role_config_name)
def set_hosting_env_deployment_name(self, deployment_name):
self.hosting_env = WireProtocolData.replace_xml_attribute_value(self.hosting_env, "Deployment", "name", deployment_name)
def set_shared_config_deployment_name(self, deployment_name):
self.shared_config = WireProtocolData.replace_xml_attribute_value(self.shared_config, "Deployment", "name", deployment_name)
def set_extensions_config_sequence_number(self, sequence_number):
'''
Sets the sequence number for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "RuntimeSettings", "seqNo", str(sequence_number))
def set_extensions_config_version(self, version):
'''
Sets the version for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "Plugin", "version", version)
def set_extensions_config_state(self, state):
'''
Sets the state for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "Plugin", "state", state)
def set_manifest_version(self, version):
'''
Sets the version of the extension manifest
'''
self.manifest = WireProtocolData.replace_xml_element_value(self.manifest, "Version", version)
def set_extension_config(self, ext_conf_file):
self.ext_conf = load_data(ext_conf_file)
def set_extension_config_requested_version(self, version):
self.ext_conf = WireProtocolData.replace_xml_element_value(self.ext_conf, "Version", version)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
sessions2trash.py
Run this script in a web2py environment shell e.g. python web2py.py -S app
If models are loaded (-M option) auth.settings.expiration is assumed
for sessions without an expiration. If models are not loaded, sessions older
than 60 minutes are removed. Use the --expiration option to override these
values.
Typical usage:
# Delete expired sessions every 5 minutes
nohup python web2py.py -S app -M -R scripts/sessions2trash.py &
# Delete sessions older than 60 minutes regardless of expiration,
# with verbose output, then exit.
python web2py.py -S app -M -R scripts/sessions2trash.py -A -o -x 3600 -f -v
# Delete all sessions regardless of expiry and exit.
python web2py.py -S app -M -R scripts/sessions2trash.py -A -o -x 0
# Delete session in a module (move to the modules folder)
from sessions2trash import single_loop
def delete_sessions():
single_loop()
"""
from __future__ import with_statement
from gluon import current
from gluon.storage import Storage
from optparse import OptionParser
import cPickle
import datetime
import os
import stat
import time
EXPIRATION_MINUTES = 60
SLEEP_MINUTES = 5
VERSION = 0.3
class SessionSet(object):
"""Class representing a set of sessions"""
def __init__(self, expiration, force, verbose):
self.expiration = expiration
self.force = force
self.verbose = verbose
def get(self):
"""Get session files/records."""
raise NotImplementedError
def trash(self):
"""Trash expired sessions."""
now = datetime.datetime.now()
for item in self.get():
status = 'OK'
last_visit = item.last_visit_default()
try:
session = item.get()
if session.auth:
if session.auth.expiration and not self.force:
self.expiration = session.auth.expiration
if session.auth.last_visit:
last_visit = session.auth.last_visit
except:
pass
age = 0
if last_visit:
age = total_seconds(now - last_visit)
if age > self.expiration or not self.expiration:
item.delete()
status = 'trashed'
if self.verbose > 1:
print 'key: %s' % str(item)
print 'expiration: %s seconds' % self.expiration
print 'last visit: %s' % str(last_visit)
print 'age: %s seconds' % age
print 'status: %s' % status
print ''
elif self.verbose > 0:
print('%s %s' % (str(item), status))
class SessionSetDb(SessionSet):
"""Class representing a set of sessions stored in database"""
def __init__(self, expiration, force, verbose):
SessionSet.__init__(self, expiration, force, verbose)
def get(self):
"""Return list of SessionDb instances for existing sessions."""
sessions = []
table = current.response.session_db_table
if table:
for row in table._db(table.id > 0).select():
sessions.append(SessionDb(row))
return sessions
class SessionSetFiles(SessionSet):
"""Class representing a set of sessions stored in flat files"""
def __init__(self, expiration, force, verbose):
SessionSet.__init__(self, expiration, force, verbose)
def get(self):
"""Return list of SessionFile instances for existing sessions."""
root_path = os.path.join(current.request.folder, 'sessions')
return [SessionFile(os.path.join(path, x)) for path,dirs,files in os.walk(root_path) for x in files]
class SessionDb(object):
"""Class representing a single session stored in database"""
def __init__(self, row):
self.row = row
def delete(self):
table = current.response.session_db_table
self.row.delete_record()
table._db.commit()
def get(self):
session = Storage()
session.update(cPickle.loads(self.row.session_data))
return session
def last_visit_default(self):
if isinstance(self.row.modified_datetime, datetime.datetime):
return self.row.modified_datetime
else:
try:
return datetime.datetime.strptime(self.row.modified_datetime, '%Y-%m-%d %H:%M:%S.%f')
except:
print 'failed to retrieve last modified time (value: %s)' % self.row.modified_datetime
def __str__(self):
return self.row.unique_key
class SessionFile(object):
"""Class representing a single session stored as a flat file"""
def __init__(self, filename):
self.filename = filename
def delete(self):
try:
os.unlink(self.filename)
except:
pass
def get(self):
session = Storage()
with open(self.filename, 'rb+') as f:
session.update(cPickle.load(f))
return session
def last_visit_default(self):
return datetime.datetime.fromtimestamp(
os.stat(self.filename)[stat.ST_MTIME])
def __str__(self):
return self.filename
def total_seconds(delta):
"""
Adapted from Python 2.7's timedelta.total_seconds() method.
Args:
delta: datetime.timedelta instance.
"""
return (delta.microseconds + (delta.seconds + (delta.days * 24 * 3600)) *
10 ** 6) / 10 ** 6
def single_loop(expiration=None, force=False, verbose=False):
if expiration is None:
try:
expiration = auth.settings.expiration
except:
expiration = EXPIRATION_MINUTES * 60
set_db = SessionSetDb(expiration, force, verbose)
set_files = SessionSetFiles(expiration, force, verbose)
set_db.trash()
set_files.trash()
def main():
"""Main processing."""
usage = '%prog [options]' + '\nVersion: %s' % VERSION
parser = OptionParser(usage=usage)
parser.add_option('-f', '--force',
action='store_true', dest='force', default=False,
help=('Ignore session expiration. '
'Force expiry based on -x option or auth.settings.expiration.')
)
parser.add_option('-o', '--once',
action='store_true', dest='once', default=False,
help='Delete sessions, then exit.',
)
parser.add_option('-s', '--sleep',
dest='sleep', default=SLEEP_MINUTES * 60, type="int",
help='Number of seconds to sleep between executions. Default 300.',
)
parser.add_option('-v', '--verbose',
default=0, action='count',
help="print verbose output, a second -v increases verbosity")
parser.add_option('-x', '--expiration',
dest='expiration', default=None, type="int",
help='Expiration value for sessions without expiration (in seconds)',
)
(options, unused_args) = parser.parse_args()
expiration = options.expiration
while True:
single_loop(expiration, options.force, options.verbose)
if options.once:
break
else:
if options.verbose:
print 'Sleeping %s seconds' % (options.sleep)
time.sleep(options.sleep)
if __name__ == '__main__':
main()
|
|
"""Tests for acme.messages."""
import os
import pkg_resources
import unittest
from Crypto.PublicKey import RSA
import M2Crypto
import mock
from acme import challenges
from acme import jose
CERT = jose.ComparableX509(M2Crypto.X509.load_cert_string(
pkg_resources.resource_string(
'acme.jose', os.path.join('testdata', 'cert.der')),
M2Crypto.X509.FORMAT_DER))
CSR = jose.ComparableX509(M2Crypto.X509.load_request_string(
pkg_resources.resource_string(
'acme.jose', os.path.join('testdata', 'csr.der')),
M2Crypto.X509.FORMAT_DER))
KEY = jose.util.HashableRSAKey(RSA.importKey(pkg_resources.resource_string(
'acme.jose', os.path.join('testdata', 'rsa512_key.pem'))))
CERT = jose.ComparableX509(M2Crypto.X509.load_cert(
format=M2Crypto.X509.FORMAT_DER, file=pkg_resources.resource_filename(
'acme.jose', os.path.join('testdata', 'cert.der'))))
class ErrorTest(unittest.TestCase):
"""Tests for acme.messages.Error."""
def setUp(self):
from acme.messages import Error
self.error = Error(detail='foo', typ='malformed', title='title')
self.jobj = {'detail': 'foo', 'title': 'some title'}
def test_typ_prefix(self):
self.assertEqual('malformed', self.error.typ)
self.assertEqual(
'urn:acme:error:malformed', self.error.to_partial_json()['type'])
self.assertEqual(
'malformed', self.error.from_json(self.error.to_partial_json()).typ)
def test_typ_decoder_missing_prefix(self):
from acme.messages import Error
self.jobj['type'] = 'malformed'
self.assertRaises(jose.DeserializationError, Error.from_json, self.jobj)
self.jobj['type'] = 'not valid bare type'
self.assertRaises(jose.DeserializationError, Error.from_json, self.jobj)
def test_typ_decoder_not_recognized(self):
from acme.messages import Error
self.jobj['type'] = 'urn:acme:error:baz'
self.assertRaises(jose.DeserializationError, Error.from_json, self.jobj)
def test_description(self):
self.assertEqual(
'The request message was malformed', self.error.description)
def test_from_json_hashable(self):
from acme.messages import Error
hash(Error.from_json(self.error.to_json()))
def test_str(self):
self.assertEqual(
'malformed :: The request message was malformed :: foo',
str(self.error))
self.assertEqual('foo', str(self.error.update(typ=None)))
class ConstantTest(unittest.TestCase):
"""Tests for acme.messages._Constant."""
def setUp(self):
from acme.messages import _Constant
class MockConstant(_Constant): # pylint: disable=missing-docstring
POSSIBLE_NAMES = {}
self.MockConstant = MockConstant # pylint: disable=invalid-name
self.const_a = MockConstant('a')
self.const_b = MockConstant('b')
def test_to_partial_json(self):
self.assertEqual('a', self.const_a.to_partial_json())
self.assertEqual('b', self.const_b.to_partial_json())
def test_from_json(self):
self.assertEqual(self.const_a, self.MockConstant.from_json('a'))
self.assertRaises(
jose.DeserializationError, self.MockConstant.from_json, 'c')
def test_from_json_hashable(self):
hash(self.MockConstant.from_json('a'))
def test_repr(self):
self.assertEqual('MockConstant(a)', repr(self.const_a))
self.assertEqual('MockConstant(b)', repr(self.const_b))
def test_equality(self):
const_a_prime = self.MockConstant('a')
self.assertFalse(self.const_a == self.const_b)
self.assertTrue(self.const_a == const_a_prime)
self.assertTrue(self.const_a != self.const_b)
self.assertFalse(self.const_a != const_a_prime)
class RegistrationTest(unittest.TestCase):
"""Tests for acme.messages.Registration."""
def setUp(self):
key = jose.jwk.JWKRSA(key=KEY.publickey())
contact = (
'mailto:[email protected]',
'tel:1234',
)
recovery_token = 'XYZ'
agreement = 'https://letsencrypt.org/terms'
from acme.messages import Registration
self.reg = Registration(
key=key, contact=contact, recovery_token=recovery_token,
agreement=agreement)
self.jobj_to = {
'contact': contact,
'recoveryToken': recovery_token,
'agreement': agreement,
'key': key,
}
self.jobj_from = self.jobj_to.copy()
self.jobj_from['key'] = key.to_json()
def test_from_data(self):
from acme.messages import Registration
reg = Registration.from_data(phone='1234', email='[email protected]')
self.assertEqual(reg.contact, (
'tel:1234',
'mailto:[email protected]',
))
def test_phones(self):
self.assertEqual(('1234',), self.reg.phones)
def test_emails(self):
self.assertEqual(('[email protected]',), self.reg.emails)
def test_phone(self):
self.assertEqual('1234', self.reg.phone)
def test_email(self):
self.assertEqual('[email protected]', self.reg.email)
def test_to_partial_json(self):
self.assertEqual(self.jobj_to, self.reg.to_partial_json())
def test_from_json(self):
from acme.messages import Registration
self.assertEqual(self.reg, Registration.from_json(self.jobj_from))
def test_from_json_hashable(self):
from acme.messages import Registration
hash(Registration.from_json(self.jobj_from))
class RegistrationResourceTest(unittest.TestCase):
"""Tests for acme.messages.RegistrationResource."""
def setUp(self):
from acme.messages import RegistrationResource
self.regr = RegistrationResource(
body=mock.sentinel.body, uri=mock.sentinel.uri,
new_authzr_uri=mock.sentinel.new_authzr_uri,
terms_of_service=mock.sentinel.terms_of_service)
def test_to_partial_json(self):
self.assertEqual(self.regr.to_json(), {
'body': mock.sentinel.body,
'uri': mock.sentinel.uri,
'new_authzr_uri': mock.sentinel.new_authzr_uri,
'terms_of_service': mock.sentinel.terms_of_service,
})
class ChallengeResourceTest(unittest.TestCase):
"""Tests for acme.messages.ChallengeResource."""
def test_uri(self):
from acme.messages import ChallengeResource
self.assertEqual('http://challb', ChallengeResource(body=mock.MagicMock(
uri='http://challb'), authzr_uri='http://authz').uri)
class ChallengeBodyTest(unittest.TestCase):
"""Tests for acme.messages.ChallengeBody."""
def setUp(self):
self.chall = challenges.DNS(token='foo')
from acme.messages import ChallengeBody
from acme.messages import STATUS_VALID
self.status = STATUS_VALID
self.challb = ChallengeBody(
uri='http://challb', chall=self.chall, status=self.status)
self.jobj_to = {
'uri': 'http://challb',
'status': self.status,
'type': 'dns',
'token': 'foo',
}
self.jobj_from = self.jobj_to.copy()
self.jobj_from['status'] = 'valid'
def test_to_partial_json(self):
self.assertEqual(self.jobj_to, self.challb.to_partial_json())
def test_from_json(self):
from acme.messages import ChallengeBody
self.assertEqual(self.challb, ChallengeBody.from_json(self.jobj_from))
def test_from_json_hashable(self):
from acme.messages import ChallengeBody
hash(ChallengeBody.from_json(self.jobj_from))
def test_proxy(self):
self.assertEqual('foo', self.challb.token)
class AuthorizationTest(unittest.TestCase):
"""Tests for acme.messages.Authorization."""
def setUp(self):
from acme.messages import ChallengeBody
from acme.messages import STATUS_VALID
self.challbs = (
ChallengeBody(
uri='http://challb1', status=STATUS_VALID,
chall=challenges.SimpleHTTP(token='IlirfxKKXAsHtmzK29Pj8A')),
ChallengeBody(uri='http://challb2', status=STATUS_VALID,
chall=challenges.DNS(token='DGyRejmCefe7v4NfDGDKfA')),
ChallengeBody(uri='http://challb3', status=STATUS_VALID,
chall=challenges.RecoveryToken()),
)
combinations = ((0, 2), (1, 2))
from acme.messages import Authorization
from acme.messages import Identifier
from acme.messages import IDENTIFIER_FQDN
identifier = Identifier(typ=IDENTIFIER_FQDN, value='example.com')
self.authz = Authorization(
identifier=identifier, combinations=combinations,
challenges=self.challbs)
self.jobj_from = {
'identifier': identifier.to_json(),
'challenges': [challb.to_json() for challb in self.challbs],
'combinations': combinations,
}
def test_from_json(self):
from acme.messages import Authorization
Authorization.from_json(self.jobj_from)
def test_from_json_hashable(self):
from acme.messages import Authorization
hash(Authorization.from_json(self.jobj_from))
def test_resolved_combinations(self):
self.assertEqual(self.authz.resolved_combinations, (
(self.challbs[0], self.challbs[2]),
(self.challbs[1], self.challbs[2]),
))
class AuthorizationResourceTest(unittest.TestCase):
"""Tests for acme.messages.AuthorizationResource."""
def test_json_de_serializable(self):
from acme.messages import AuthorizationResource
authzr = AuthorizationResource(
uri=mock.sentinel.uri,
body=mock.sentinel.body,
new_cert_uri=mock.sentinel.new_cert_uri,
)
self.assertTrue(isinstance(authzr, jose.JSONDeSerializable))
class CertificateRequestTest(unittest.TestCase):
"""Tests for acme.messages.CertificateRequest."""
def setUp(self):
from acme.messages import CertificateRequest
self.req = CertificateRequest(csr=CSR, authorizations=('foo',))
def test_json_de_serializable(self):
self.assertTrue(isinstance(self.req, jose.JSONDeSerializable))
from acme.messages import CertificateRequest
self.assertEqual(
self.req, CertificateRequest.from_json(self.req.to_json()))
class CertificateResourceTest(unittest.TestCase):
"""Tests for acme.messages.CertificateResourceTest."""
def setUp(self):
from acme.messages import CertificateResource
self.certr = CertificateResource(
body=CERT, uri=mock.sentinel.uri, authzrs=(),
cert_chain_uri=mock.sentinel.cert_chain_uri)
def test_json_de_serializable(self):
self.assertTrue(isinstance(self.certr, jose.JSONDeSerializable))
from acme.messages import CertificateResource
self.assertEqual(
self.certr, CertificateResource.from_json(self.certr.to_json()))
class RevocationTest(unittest.TestCase):
"""Tests for acme.messages.RevocationTest."""
def test_url(self):
from acme.messages import Revocation
url = 'https://letsencrypt-demo.org/acme/revoke-cert'
self.assertEqual(url, Revocation.url('https://letsencrypt-demo.org'))
self.assertEqual(
url, Revocation.url('https://letsencrypt-demo.org/acme/new-reg'))
def setUp(self):
from acme.messages import Revocation
self.rev = Revocation(certificate=CERT)
def test_from_json_hashable(self):
from acme.messages import Revocation
hash(Revocation.from_json(self.rev.to_json()))
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
|
from sympy.core.numbers import igcd
from primetest import isprime
from factor_ import factorint, trailing, totient
def int_tested(*j):
"""
Return all args as Python integers and confirm that the input
was equivalent to the integer, else raise a ValueError.
Examples
========
>>> from sympy.ntheory.residue_ntheory import int_tested
>>> from sympy import sqrt
>>> 3.0
3.0
>>> int_tested(_) # convert to int and test for equality
3
>>> n = sqrt(10)
>>> int_tested(n)
Traceback (most recent call last):
...
ValueError: All arguments were not integers
"""
i = tuple([int(i) for i in j])
if i != j:
raise ValueError('all arguments were not integers')
if len(i) == 1:
return i[0]
return i
def n_order(a, n):
"""Returns the order of ``a`` modulo ``n``.
The order of ``a`` modulo ``n`` is the smallest integer
``k`` such that ``a**k`` leaves a remainder of 1 with ``n``.
Examples
========
>>> from sympy.ntheory import n_order
>>> n_order(3, 7)
6
>>> n_order(4, 7)
3
"""
a, n = int_tested(a, n)
if igcd(a, n) != 1:
raise ValueError("The two numbers should be relatively prime")
group_order = totient(n)
factors = factorint(group_order)
order = 1
if a > n:
a = a % n
for p, e in factors.iteritems():
exponent = group_order
for f in xrange(0, e + 1):
if (a ** (exponent)) % n != 1:
order *= p ** (e - f + 1)
break
exponent = exponent // p
return order
def is_primitive_root(a, p):
"""
Returns True if ``a`` is a primitive root of ``p``
``a`` is said to be the primitive root of ``p`` if gcd(a, p) == 1 and
totient(p) is the smallest positive number s.t.
a**totient(p) cong 1 mod(p)
Examples
========
>>> from sympy.ntheory import is_primitive_root, n_order, totient
>>> is_primitive_root(3, 10)
True
>>> is_primitive_root(9, 10)
False
>>> n_order(3, 10) == totient(10)
True
>>> n_order(9, 10) == totient(10)
False
"""
a, p = int_tested(a, p)
if igcd(a, p) != 1:
raise ValueError("The two numbers should be relatively prime")
if a > p:
a = a % p
if n_order(a, p) == totient(p):
return True
else:
return False
def is_quad_residue(a, p):
"""
Returns True if ``a`` (mod ``p``) is in the set of squares mod ``p``,
i.e a % p in set([i**2 % p for i in range(p)]). If ``p`` is an odd
prime, an iterative method is used to make the determination:
>>> from sympy.ntheory import is_quad_residue
>>> list(set([i**2 % 7 for i in range(7)]))
[0, 1, 2, 4]
>>> [j for j in range(7) if is_quad_residue(j, 7)]
[0, 1, 2, 4]
See Also
========
legendre_symbol, jacobi_symbol
"""
a, p = int_tested(a, p)
if p < 1:
raise ValueError('p must be > 0')
if a >= p or a < 0:
a = a % p
if a < 2 or p < 3:
return True
if not isprime(p):
if p % 2 and jacobi_symbol(a, p) == -1:
return False
for i in range(2, p//2 + 1):
if i**2 % p == a:
return True
return False
def square_and_multiply(a, n, p):
if n == 1:
return a
elif n % 2 == 1:
return ((square_and_multiply(a, n // 2, p) ** 2) * a) % p
else:
return (square_and_multiply(a, n // 2, p) ** 2) % p
return (square_and_multiply(a, (p - 1) // 2, p) % p) == 1
def legendre_symbol(a, p):
"""
Returns
=======
1. 0 if a is multiple of p
2. 1 if a is a quadratic residue of p
3. -1 otherwise
p should be an odd prime by definition
Examples
========
>>> from sympy.ntheory import legendre_symbol
>>> [legendre_symbol(i, 7) for i in range(7)]
[0, 1, 1, -1, 1, -1, -1]
>>> list(set([i**2 % 7 for i in range(7)]))
[0, 1, 2, 4]
See Also
========
is_quad_residue, jacobi_symbol
"""
a, p = int_tested(a, p)
if not isprime(p) or p == 2:
raise ValueError("p should be an odd prime")
_, a = divmod(a, p)
if not a:
return 0
if is_quad_residue(a, p):
return 1
else:
return -1
def jacobi_symbol(m, n):
"""
Returns the product of the legendre_symbol(m, p)
for all the prime factors, p, of n.
Returns
=======
1. 0 if m cong 0 mod(n)
2. 1 if x**2 cong m mod(n) has a solution
3. -1 otherwise
Examples
========
>>> from sympy.ntheory import jacobi_symbol, legendre_symbol
>>> from sympy import Mul, S
>>> jacobi_symbol(45, 77)
-1
>>> jacobi_symbol(60, 121)
1
The relationship between the jacobi_symbol and legendre_symbol can
be demonstrated as follows:
>>> L = legendre_symbol
>>> S(45).factors()
{3: 2, 5: 1}
>>> jacobi_symbol(7, 45) == L(7, 3)**2 * L(7, 5)**1
True
See Also
========
is_quad_residue, legendre_symbol
"""
m, n = int_tested(m, n)
if not n % 2:
raise ValueError("n should be an odd integer")
if m < 0 or m > n:
m = m % n
if not m:
return int(n == 1)
if n == 1 or m == 1:
return 1
if igcd(m, n) != 1:
return 0
j = 1
s = trailing(m)
m = m >> s
if s % 2 and n % 8 in [3, 5]:
j *= -1
while m != 1:
if m % 4 == 3 and n % 4 == 3:
j *= -1
m, n = n % m, m
s = trailing(m)
m = m >> s
if s % 2 and n % 8 in [3, 5]:
j *= -1
return j
|
|
import itertools
import uuid
import pickle
from collections import namedtuple
from typing import Tuple
from math import ceil
import numpy as np
from hfetch import HNumpyStore, HArrayMetadata
from . import config, log
from .IStorage import IStorage
from .tools import extract_ks_tab, get_istorage_attrs, storage_id_from_name, build_remotely
class StorageNumpy(IStorage, np.ndarray):
BLOCK_MODE = 1
COLUMN_MODE = 2
_build_args = None
_prepared_store_meta = config.session.prepare('INSERT INTO hecuba.istorage'
'(storage_id, class_name, name, numpy_meta, block_id, base_numpy, view_serialization, tokens)'
'VALUES (?,?,?,?,?,?,?,?)')
args_names = ["storage_id", "class_name", "name", "metas", "block_id", "base_numpy", "view_serialization", "tokens"]
args = namedtuple('StorageNumpyArgs', args_names)
def getID(self):
"""
Method to retrieve the storage id as string. Used by PyCOMPSs solely.
if the StorageNumpy is a persistent slice create a this point the entry in the IStorage to avoid
serialization and enhance locality
:return: Storage_id as str
"""
sid = super().getID()
if sid != 'None':
if self._persistance_needed:
# build args should contain the right metas: calculate them at getitem
# we should avoid the store meta for the SN that has been persisted through a make_persistent.
# We mark it as persistentance_needed=true at getitem and at this point create the entry
StorageNumpy._store_meta(self._build_args)
self._persistance_needed = False
self.sync() # Data may be needed in another node, flush data
return sid
def _calculate_nblocks(self, view):
''' Calculate (and set) the number of used blocks in data storage by 'view'
(aka the number of blocks reserved in memory)
This is used to determine if a numpy is full loaded on memory and avoid accesses to cassandra.
Args:
self : object to use
view : view used to calculate the blocks
'''
l = self.calculate_list_of_ranges_of_block_coords(view)
num_blocks = 1
for i in l:
num_blocks = num_blocks * len(i)
self._n_blocks = num_blocks
log.debug("JCOSTA _calculate_nblocks sid={} _n_blocks={}".format(self.storage_id, self._n_blocks))
# used by dislib? To be deleted after checking this
def np_split(self, block_size: Tuple[int, int]):
# For now, only split in two dimensions is supported
bn, bm = block_size
for block_id, i in enumerate(range(0, self.shape[0], bn)):
block = [self[i: i + bn, j:j + bm] for j in range(0, self.shape[1], bm)]
obj = StorageNumpy(input_array=block, name=self._get_name(), storage_id=uuid.uuid4(), block_id=block_id)
yield obj
@staticmethod
def _composite_key(storage_id, cluster_id):
"""
Calculate the cassandra hash (storage_id, cluster_id) courtesy of:
https://stackoverflow.com/questions/22915237/how-to-generate-cassandra-token-for-composite-partition-key
"""
from cassandra import murmur3
tam_sid = len(storage_id.bytes)
bytes_storage_id = bytes([0, tam_sid]) + storage_id.bytes + bytes([0])
bytes_cluster_id = bytes([0, 4]) + cluster_id.to_bytes(4,'big') + bytes([0])
mykey = bytes_storage_id + bytes_cluster_id
return murmur3.murmur3(mykey)
def split(self,cols=None):
"""
Divide numpy into persistent views to exploit parallelism.
cols: Decides how to divide the numpy:
If None, use the inner blocks stored in cassandra.
If True, divide by columns of blocks (this allows to exploit arrow when enabled)
If False, divide by rows of blocks.
"""
# TODO this should work for VOLATILE objects too! Now only works for PERSISTENT
if self._build_args.metas.partition_type == 2:
raise NotImplementedError("Split on columnar data is not supported")
tokens = self._get_tokens(cols)
log.debug("split: shape %s cols %s", self.shape, cols)
if cols is True:
return self._split_by_cols(tokens)
if cols is False:
return self._split_by_rows(tokens)
return self._split_by_blocks(tokens)
def _get_tokens(self, cols):
exploit_locality = False
if StorageNumpy._arrow_enabled(self): #Fortran order
exploit_locality = (cols != False) # By blocks or by columns
else: #Zorder
exploit_locality = (cols == None) # By blocks only
tokens = None
if exploit_locality:
# Calculate the blocks of the numpy
blocks = self._hcache.get_block_ids(self._build_args.metas) # returns a list of tuples (cluster_id, block_id)
#Build map cluster_id -> token
cluster_id_to_token = {}
for (zorder_id, cluster_id, block_id, ccs) in blocks:
if not cluster_id_to_token.__contains__(cluster_id):
hash_key = StorageNumpy._composite_key(self.storage_id, cluster_id)
cluster_id_to_token[cluster_id] = hash_key
#Calculate the tokens for each block
tokens = {}
for (zorder_id, cluster_id, block_id, ccs) in blocks:
log.debug(" split : Create block {} {} ".format(cluster_id, block_id ))
if cluster_id not in tokens:
token_split = []
hash_key = cluster_id_to_token[cluster_id]
for t in self._tokens:
if hash_key >= t[0] and hash_key < t[1]:
token_split.append(t)
break; # Finish for
tokens[cluster_id] = token_split
return tokens
def _split_by_blocks(self, tokens):
blocks = self._hcache.get_block_ids(self._build_args.metas) # returns a list of tuples (cluster_id, block_id)
_parent_numpy_full_loaded=self._numpy_full_loaded
for (zorder_id, cluster_id, block_id, ccs) in blocks:
# 'values' contains block_coords that must be transformed to original_coordinates
pyccs = [ i * self._row_elem for i in ccs]
slc = [ slice(i, i + self._row_elem) for i in pyccs ]
slc = tuple(slc)
token_split = tokens[cluster_id]
self._last_sliced_coord = slc # HACK to call '_create_lazy_persistent_view' in 'array_finalize' when calling the next '__getitem__'
resultado = super(StorageNumpy, self).__getitem__(slc) # Generate view in memory
resultado._numpy_full_loaded = _parent_numpy_full_loaded # Due to the HACK, we need to keep the _numpy_full_loaded status
resultado._build_args = resultado._build_args._replace(tokens=token_split)
yield resultado
# ################
## ccs: Index of blocks
## +-----------------+
## | 0,0 | 0,1 | 0,2 |
## +-----------------+
## | 1,0 | 1,1 | 1,2 |
## +-----------------+
## ...
## +-----------------+
#
# pyccs: initial coordinates of each block
## +======----------------+
## I 0,0 I 0,22 | 0,44 |
## +----------------------+
## I 22,0 I 22,22 | 22,44 |
## +----------------------+
## ...
## +----------------------+
## I 22,0 I 22,22 | 22,44 |
## +======----------------+
def _split_by_cols(self, mytokens):
"""
Generator to divide numpy in blocks of columns (taking into account how the data is stored in disk)
"""
log.debug(" split_by_cols shape:%s row_elem:%s ", self.shape, self._row_elem)
list_of_clusters= range(0, self.shape[1], self._row_elem)
_parent_numpy_full_loaded=self._numpy_full_loaded
for cluster_id in list_of_clusters:
log.debug(" split_by_cols cluster_id: %s", cluster_id)
slc = ( slice(None,None,None), slice(cluster_id, cluster_id + self._row_elem ) )
self._last_sliced_coord = slc # HACK to call '_create_lazy_persistent_view' in 'array_finalize' when calling the next '__getitem__' (we want to AVOID calling 'getitem' directly because it LOADS data)
resultado = super(StorageNumpy, self).__getitem__(slc) # Generate view in memory
resultado._numpy_full_loaded = _parent_numpy_full_loaded # Due to the HACK, we need to keep the _numpy_full_loaded status
if mytokens is not None:
resultado._build_args = resultado._build_args._replace(tokens=mytokens[cluster_id//self._row_elem])
yield resultado
def _split_by_rows(self, tokens):
"""
Generator to divide numpy in blocks of columns (taking into account how the data is stored in disk)
"""
log.debug(" split_by_cols shape:%s row_elem:%s ", self.shape, self._row_elem)
list_of_clusters= range(0, self.shape[0], self._row_elem)
_parent_numpy_full_loaded=self._numpy_full_loaded
for cluster_id in list_of_clusters:
log.debug(" split_by_cols cluster_id: %s", cluster_id)
slc = ( slice(cluster_id, cluster_id + self._row_elem ), slice(None,None,None) )
self._last_sliced_coord = slc # HACK to call '_create_lazy_persistent_view' in 'array_finalize' when calling the next '__getitem__' (we want to AVOID calling 'getitem' directly because it LOADS data)
resultado = super(StorageNumpy, self).__getitem__(slc) # Generate view in memory
resultado._numpy_full_loaded = _parent_numpy_full_loaded # Due to the HACK, we need to keep the _numpy_full_loaded status
# TOKENS are ignored in this case
yield resultado
@staticmethod
def get_arrow_name(name):
# get_arrow_name: Returns the keyspace and table name of the arrow table (READ) of table name
(ksp,table) = extract_ks_tab(name)
return ksp + "_arrow." + table[:42] +"_arrow"
@staticmethod
def _isarrow(name):
'''
Returns true if the name is an arrow table
'''
return name.endswith("_arrow")
@staticmethod
def get_buffer_name(ksp, name):
"""
Returns a full qualified name for a table name in the arrow keyspace
Args:
ksp : keyspace_arrow
name: table_arrow
Returns: keyspace_arrow.table_buffer
"""
return ksp + "." + name[:-6] +"_buffer"
# _initialize_existing_object : Instantiates a new StorageNumpy
# from metadata existent in Hecuba given its name or storage_id.
# Parameters:
# cls : Class to use for instantiation
# name : A *qualified* cassandra name (keyspace.table_name) to instantiate
# storage_id: The UUID to instantiate
# If both, name and storage_id, are given, name is ignored.
# It reserves memory to store the numpy (zeros).
@staticmethod
def _initialize_existing_object(cls, name, storage_id):
# StorageNumpy(None, name="xxx", none)
# or StorageNumpy(None, none, storage_id="xxx")
# or StorageNumpy(None, name="xxx", storage_id="yyy") Does it exist?
log.debug("INITIALIZE EXISTING OBJECT name=%s sid=%s", name, storage_id)
if name and storage_id:
log.warning("INITIALIZE EXISTING OBJECT request passing both name {} and storage_id {}. Ignoring parameter 'name'".format(name, storage_id))
if not storage_id: # StorageNumpy(None, name="xxxx", NONE)
storage_id = storage_id_from_name(name)
# Load metadata
istorage_metas = get_istorage_attrs(storage_id)
if len(istorage_metas) == 0:
msg = "Persistent StorageNumpy Storage_id={}".format(storage_id)
if name:
msg = msg + " name={}".format(name)
raise ValueError("{} does not exist".format(msg))
name = istorage_metas[0].name
my_metas = istorage_metas[0].numpy_meta
metas_to_reserve = my_metas
base_numpy = istorage_metas[0].base_numpy
if storage_id != base_numpy:
# it is a view load the base instead of storage_id
# base_numpy can be None?
log.debug("Shared view of {}".format(base_numpy))
metas_to_reserve = get_istorage_attrs(base_numpy)[0].numpy_meta
tokens = istorage_metas[0].tokens
# Reserve array: even if we are a view we reserve space for the WHOLE numpy, as the memory
result = cls.reserve_numpy_array(storage_id, name, metas_to_reserve) # storage_id is NOT used at all
input_array = result[0]
# Transform memory to a StorageNumpy
# This must be done BEFORE reconstructing the view, to keep the BASE
# numpy loaded (otherwise the 'base' field is overwritten with the base
# of the view)
if StorageNumpy._arrow_enabled(input_array):
obj = np.asfortranarray(input_array).view(cls)
obj._hcache_arrow = result[2]
else: # Reserve for normal numpy
obj = np.asarray(input_array).view(cls)
obj._hcache = result[1]
obj._base_metas = metas_to_reserve #Cache value to avoid cassandra accesses
# The data recovered from the istorage is a persistent view, therefore reconstruct the view
myview = pickle.loads(istorage_metas[0].view_serialization)
log.debug(" view of {}".format(myview))
if isinstance(myview, tuple):
obj = super(StorageNumpy, obj).__getitem__(myview)
else:
raise TypeError(" WARNING: recovered 'view_serialization' has unexpected type ", type(myview))
IStorage.__init__(obj, name=name, storage_id=storage_id, tokens=tokens)
obj._numpy_full_loaded = False
obj._hcache = result[1]
obj._build_args = obj.args(obj.storage_id, istorage_metas[0].class_name,
istorage_metas[0].name, my_metas, istorage_metas[0].block_id, base_numpy,
myview,
istorage_metas[0].tokens)
obj._row_elem = obj._hcache.get_elements_per_row(storage_id, metas_to_reserve)
obj._calculate_nblocks(myview)
return obj
@staticmethod
def _arrow_enabled(input_array):
return (config.arrow_enabled and getattr(input_array, 'ndim', 0) == 2)
def __new__(cls, input_array=None, name=None, storage_id=None, block_id=None, **kwargs):
log.debug("input_array=%s name=%s storage_id=%s ENTER ",input_array is not None, name, storage_id)
if name is not None:
# Construct full qualified name to deal with cases where the name does NOT contain keyspace
(ksp, table) = extract_ks_tab(name)
name = ksp + "." + table
if (len(table)>40 or table.startswith("HECUBA")):
# Cassandra limits name to 48 characters: we reserve 8 characters
# for special Hecuba tables
raise AttributeError("The name of an user StorageNumpy is limited to 40 chars and can not start 'HECUBA' {}".format(table))
if input_array is None and (name is not None or storage_id is not None):
obj = StorageNumpy._initialize_existing_object(cls, name, storage_id)
name = obj._get_name()
else:
if isinstance(input_array, StorageNumpy): # StorageNumpyDesign
log.debug(" NEW from %s", storage_id)
# StorageNumpy(Snumpy, None, None)
# StorageNumpy(Snumpy, name, None)
# StorageNumpy(Snumpy, None, UUID)
# StorageNumpy(Snumpy, name, UUID)
if storage_id is not None:
log.warn("Creating a StorageNumpy with a specific StorageID")
obj = input_array.copy()
else:
# StorageNumpy(numpy, None, None)
if not StorageNumpy._arrow_enabled(input_array):
obj = np.asarray(input_array).copy().view(cls)
else:
obj = np.asfortranarray(input_array.copy()).view(cls) #to set the fortran contiguous flag it is necessary to do the copy before
log.debug("Created ARROW")
IStorage.__init__(obj, name=name, storage_id=storage_id, kwargs=kwargs)
if name or storage_id: # The object needs to be persisted
load_data= (input_array is None) and (config.load_on_demand == False)
if input_array is not None:
if isinstance(input_array,StorageNumpy):
log.warn("Creating a Persistent StorageNumpy.")
obj._persist_data(obj._get_name())
if load_data: #FIXME aixo hauria d'afectar a l'objecte existent (aqui ja existeix a memoria... o hauria)
obj[:] # HACK! Load ALL elements in memory NOW (recursively calls getitem)
#print("JJ name = ", name, flush=True)
#print("JJ _name = ", obj._name, flush=True)
log.debug("CREATED NEW StorageNumpy storage_id=%s with input_array=%s name=%s ", storage_id, input_array is not None, name)
return obj
def __init__(self, input_array=None, name=None, storage_id=None, **kwargs):
pass # DO NOT REMOVE THIS FUNCTION!!! Yolanda's eyes bleed!
@staticmethod
def removenones(n, maxstop=None):
"""
Remove the Nones from a slice:
start -> 0
stop -> None or maxstop
step -> 1
This is a helper utility for the later operations of addition
"""
if not n.start:
oldstart = 0
else:
oldstart = n.start
if not n.step:
oldstep = 1
else:
oldstep=n.step
if not n.stop:
newstop = maxstop
else:
newstop = n.stop
return slice(oldstart, newstop, oldstep)
def calculate_list_of_ranges_of_block_coords(self, view):
"""
Return a list with the ranges of block coordinates for each dimension of 'view'.
The block coordinates are relative to 'self.base.shape' (big).
"""
first=[]
last=[]
shape = self._get_base_array().shape
SIZE= self._row_elem # 7 # ROW_ELEM
for idx, i in enumerate(view):
#print(" {}: element ={} ".format(idx, i))
if isinstance(i, int):
self._check_value_in_shape(i, shape[idx], idx)
first.append(i//SIZE)
last.append(i//SIZE)
else: # It's a slice
n = StorageNumpy.removenones(i, shape[idx])
#print(" {}: n ={} ".format(idx, n))
self._check_value_in_shape(n.start, shape[idx], idx)
self._check_value_in_shape(n.stop-1, shape[idx], idx)
first.append(n.start//SIZE)
last.append((n.stop-1)//SIZE)
#print(" calculate_block_coords: first ={} last = {}".format(first,last), flush=True)
l=[]
for i in range(len(view)):
l.append( range(first[i], last[i]+1))
#print(" calculate_block_coords: l = {}".format(l), flush=True)
return l
def calculate_block_coords(self, view):
"""
Return a list with all the block coordinates relative to 'self.base.shape' corresponding to the elements in 'view'
"""
l = self.calculate_list_of_ranges_of_block_coords(view)
return [b for b in itertools.product(*l)]
@staticmethod
def _compose_index(s, pos):
"""
Returns the corresponding index in the slice 's' for argument 'pos'
(2,10,2), 1 --> 4
0 1 2 3 <------------------+
2 4 8 10 <=== slice indexes \
-4 -3 -2 -1 <--------------------+- pos
It works for negative values (...until a new case is found).
"""
if pos < 0:
res = s.stop + pos*s.step
else:
res = s.start + pos*s.step
return res
@staticmethod
def view_composer_internal(shape, old, new):
"""
shape: tuple with the dimensions of the numpy with the 'old' view where the 'new' will be applied
old : cumulated view
new : MUST be a tuple with the new view to compose on top of old
"""
if len(old) != len(shape):
raise TypeError("old needs to be a tuple with the same dimensions ({}) as the numpy ({})".format(len(old), len(shape)))
return
if isinstance(new, tuple):
# Recursive case
old=list(old)
j=0 # current old shape
i=0 # current new element
while i<len(new) and j < len(old):
n = new[i]
while isinstance(old[j],int) and j < len(old): #skip integers
j=j+1
if j < len(old):
old[j] = StorageNumpy.view_composer_internal((shape[j],),(old[j],), n)
j=j+1
i=i+1
#print(" view_composer: ======> {}".format(old))
return tuple(old)
# Base cases
#print(" view_composer: shape={} old={} new={}".format(shape, old, new))
old0 = old[0]
res = None
if isinstance(new, int):
if isinstance(old0, int):
res = old0 # 'new' is IGNORED
elif isinstance(old0, slice):
old0 = StorageNumpy.removenones(old0, shape[0])
res = StorageNumpy._compose_index(old0, new)
else:
raise NotImplementedError("Compose an int and a {}".format(type(old0)))
elif isinstance(new, slice):
if isinstance(old0, int):
res = old0 # 'new' is IGNORED
elif isinstance(old0, slice):
old0 = StorageNumpy.removenones(old0, shape[0])
new = StorageNumpy.removenones(new, shape[0])
newstart = StorageNumpy._compose_index(old0, new.start)
newstop = StorageNumpy._compose_index(old0, new.stop)
if old0.step >= 0 and new.step >= 0:
resstep = old0.step * new.step
else:
raise NotImplementedError("slice with negative steps") # TODO
res = slice(newstart, min(newstop,old0.stop), resstep)
else:
raise NotImplementedError("Compose an slice and a {}".format(type(old0)))
else:
raise NotImplementedError("Compose an {} with previous slice".format(type(new)))
if len(old) > 1:
toreturn=list(old)
toreturn[0]=res
res = tuple(toreturn)
#print(" view_composer: ====> {}".format(res))
return res
def _view_composer_new(self, new_view):
"""
Compose a view on top of self.base equivalent to 'new_view' on current object
"""
log.debug(" view_composer: shape={} old={} new={}".format(self._get_base_array().shape, self._build_args.view_serialization, new_view))
if isinstance(new_view, int) or isinstance(new_view,slice):
new_view=(new_view,)
elif not isinstance (new_view,tuple):
raise TypeError("View must be a tuple,int or slice instead of {}".format(type(new_view)))
old = self._build_args.view_serialization
res = StorageNumpy.view_composer_internal(self._get_base_array().shape, old, new_view)
log.debug(" view_composer: ======> {}".format(res))
return res
def _create_lazy_persistent_view(self, view):
"""
Create a persistent view of current object.
The resulting view, even it has an storage_id, is NOT persistent.
It will be made persistent when 'getID()' is invoked on it (this
will usually happen automatically when using COMPSS)
"""
new_view_serialization = self._view_composer_new(view)
storage_id = uuid.uuid4()
self.storage_id = storage_id
metas = HArrayMetadata(
list(self.shape),
list(self.strides),
self.dtype.kind,
self.dtype.byteorder,
self.itemsize,
self.flags.num,
self._build_args.metas.partition_type)
new_args = self._build_args._replace(metas=metas, storage_id=storage_id,
view_serialization=new_view_serialization)
self._build_args = new_args
self._calculate_nblocks(new_view_serialization)
self._persistance_needed = True
# used as copy constructor
def __array_finalize__(self, obj):
if obj is None:
log.debug(" __array_finalize__ NEW")
return
log.debug("__array_finalize__ self.base=None?%s obj.base=None?%s", getattr(self, 'base', None) is None, getattr(obj, 'base', None) is None)
if self.base is not None: # It is a view, therefore, copy data from object
log.debug(" __array_finalize__ view (new_from_template/view)")
self.storage_id = getattr(obj, 'storage_id', None)
self._name = getattr(obj, '_name', None)
self._base_metas = getattr(obj, '_base_metas', None)
self._hcache = getattr(obj, '_hcache', None)
if StorageNumpy._arrow_enabled(self._get_base_array()):
self._hcache_arrow = getattr(obj, '_hcache_arrow', None)
self._row_elem = getattr(obj, '_row_elem', None)
# if we are a view we have ALREADY loaded all the subarray
self._loaded_coordinates = getattr(obj, '_loaded_coordinates', [])
self._loaded_columns = getattr(obj, '_loaded_columns', set())
self._is_persistent = getattr(obj, '_is_persistent', False)
self._block_id = getattr(obj, '_block_id', None)
self._class_name = getattr(obj,'_class_name', 'hecuba.hnumpy.StorageNumpy')
self._tokens = getattr(obj,'_tokens',None)
self._build_args = getattr(obj, '_build_args', None)
self._persistance_needed = getattr(obj, '_persistance_needed', False)
self._persistent_columnar = getattr(obj, '_persistent_columnar', False)
self._numpy_full_loaded = getattr(obj, '_numpy_full_loaded', False)
if type(obj) == StorageNumpy: # Instantiate or getitem
log.debug(" array_finalize obj == StorageNumpy")
if getattr(obj, '_last_sliced_coord', None): #getitem or split
if obj.shape == self.shape:
self._n_blocks = getattr(obj, '_n_blocks', None)
else:
log.debug(" array_finalize obj.shape != self.shape create persistent view")
self._create_lazy_persistent_view(obj._last_sliced_coord)
if self.is_columnar(self._build_args.view_serialization):
self._persistent_columnar = True
obj._last_sliced_coord = None
self._numpy_full_loaded = True # By default assume we come from a getitem, otherwise mark it as appropiate (split)
else:
# StorageNumpy from a numpy
log.debug(" array_finalize obj != StorageNumpy")
self._numpy_full_loaded = True # Default value
else:
log.debug(" __array_finalize__ copy")
# Initialize fields as the __new__ case with input_array and not name
self._loaded_coordinates = []
self._loaded_columns = set()
self._numpy_full_loaded = True # FIXME we only support copy for already loaded objects
self._name = None
self.storage_id = None
self._is_persistent = False
self._class_name = getattr(obj,'_class_name', 'hecuba.hnumpy.StorageNumpy')
self._block_id = getattr(obj, '_block_id', None)
self._persistance_needed = False
self._persistent_columnar= False
def _get_base_array(self):
''' Returns the 'base' numpy from this SN. '''
base = getattr(self, 'base',None)
if base is None:
base = self
return base
@staticmethod
def _create_tables(name):
(ksp, table) = extract_ks_tab(name)
log.debug("Create table %s %s", ksp, table)
query_keyspace = "CREATE KEYSPACE IF NOT EXISTS %s WITH replication = %s" % (ksp, config.replication)
config.executelocked(query_keyspace)
query_table='CREATE TABLE IF NOT EXISTS ' + ksp + '.' + table + '(storage_id uuid , '\
'cluster_id int, ' \
'block_id int, ' \
'payload blob, ' \
'PRIMARY KEY((storage_id,cluster_id),block_id))'
config.executelocked(query_table)
@staticmethod
def _create_tables_arrow(name):
(ksp, table) = extract_ks_tab(name)
if config.arrow_enabled:
# Add 'arrow' tables
# harrow_ to read
# buffer_ to write
query_keyspace = "CREATE KEYSPACE IF NOT EXISTS %s WITH replication = %s" % (ksp, config.replication)
config.executelocked(query_keyspace)
tbl_buffer = StorageNumpy.get_buffer_name(ksp, table)
query_table_buff ='CREATE TABLE IF NOT EXISTS ' + tbl_buffer + \
'(storage_id uuid , ' \
'cluster_id int, ' \
'col_id bigint, ' \
'row_id bigint, ' \
'size_elem int, ' \
'payload blob, ' \
'PRIMARY KEY((storage_id, cluster_id), col_id))'
config.executelocked(query_table_buff)
query_table_arrow='CREATE TABLE IF NOT EXISTS ' + name + \
'(storage_id uuid, ' \
'cluster_id int, ' \
'col_id bigint, ' \
'arrow_addr bigint, ' \
'arrow_size int, ' \
'PRIMARY KEY((storage_id, cluster_id), col_id))'
log.debug("Create table %s and %s", name, tbl_buffer)
config.executelocked(query_table_arrow)
@staticmethod
def _create_hcache(name):
(ksp, table) = extract_ks_tab(name)
log.debug("Create cache for %s %s", ksp, table)
hcache_params = (ksp, table,
{'cache_size': config.max_cache_size,
'writer_par': config.write_callbacks_number,
'write_buffer': config.write_buffer_size,
'timestamped_writes': False})
return HNumpyStore(*hcache_params)
@staticmethod
def _store_meta(storage_args):
"""
Saves the information of the object in the istorage table.
Args:.
storage_args (object): contains all data needed to restore the object from the workers
"""
log.debug("StorageObj: storing media %s", storage_args)
try:
config.session.execute(StorageNumpy._prepared_store_meta,
[storage_args.storage_id, storage_args.class_name,
storage_args.name,
storage_args.metas,
storage_args.block_id,
storage_args.base_numpy,
pickle.dumps(storage_args.view_serialization),
storage_args.tokens])
except Exception as ex:
log.warn("Error creating the StorageNumpy metadata with args: %s" % str(storage_args))
raise ex
@staticmethod
def reserve_numpy_array(storage_id, name, metas):
'''Provides a numpy array with the number of elements obtained through storage_id'''
log.debug(" Reserve memory for {} {} {}".format(name, storage_id, metas))
hcache = StorageNumpy._create_hcache(name)
result = hcache.allocate_numpy(storage_id, metas)
if len(result) == 1:
if StorageNumpy._arrow_enabled(result[0]):
hcache_arrow = StorageNumpy._create_hcache(StorageNumpy.get_arrow_name(name))
return [result[0], hcache, hcache_arrow]
return [result[0], hcache]
else:
raise KeyError
def _select_blocks(self,sliced_coord):
"""
Calculate the list of block coordinates to load given a specific numpy slice syntax.
Args:
self: StorageNumpy to apply list of coordinates.
sliced_coord: Slice syntax to evaluate.
May raise an IndexError exception
"""
new_coords = self.calculate_block_coords(sliced_coord)
log.debug("selecting blocks :{} -> {}".format(sliced_coord,new_coords))
return new_coords
def _load_blocks(self, new_coords):
"""
Load the provided block coordinates from cassandra into memory
Args:
self: The StorageNumpy to load data into
new_coords: The coordinates to load (using ZOrder identification)
"""
load = True # By default, load everything
if not new_coords: # Special case: Load everything
log.debug("LOADING ALL BLOCKS OF NUMPY")
self._numpy_full_loaded = True
new_coords = None
self._loaded_coordinates = None
else:
log.debug("LOADING COORDINATES")
# coordinates is the union between the loaded coordinates and the new ones
coordinates = list(set(itertools.chain.from_iterable((self._loaded_coordinates, new_coords))))
if (len(coordinates) != len(self._loaded_coordinates)):
self._numpy_full_loaded = (len(coordinates) == self._n_blocks)
self._loaded_coordinates = coordinates
else:
load = False
if load:
base_numpy = self._get_base_array()
metas = self._base_metas
log.debug(" COORDINATES ARE {} ".format(new_coords))
self._hcache.load_numpy_slices([self._build_args.base_numpy], metas, [base_numpy],
new_coords,
StorageNumpy.BLOCK_MODE)
def is_columnar(self,sliced_coord):
if not StorageNumpy._arrow_enabled(self._get_base_array()):
log.debug("HECUBA_ARROW is not enabled or dimensions > 2. Columnar acces disabled.")
return False
if self._persistent_columnar == True:
return True
# if the number of rows is very low we do not use columnar access
if self.shape[0]<50:
#print("self.shape[0]<50", flush=True)
return False
if isinstance(sliced_coord, slice) and (sliced_coord == slice(None, None, None) or sliced_coord == slice(0, self._get_base_array().shape[0],1)):
return True
if isinstance(sliced_coord, tuple):
# If the getitem parameter is a tuple, then we may catch the
# column accesses: Ex: s[:, i], s[:, [i1,i2]], s[:, slice(...)]
# All these accesses arrive here as a tuple:
# (slice(None,None,None), xxx)
# or a slice that has ALL elements
# where xxx is the last parameter of the tuple.
# FIXME Extend to more than 2 dimensions
dims = sliced_coord.__len__()
if dims == 2: # Only 2 dimensions
if isinstance(sliced_coord[-dims], slice) and (sliced_coord[-dims] == slice(None, None, None) or sliced_coord[-dims]==slice(0,self._get_base_array().shape[-dims],1)):
return True
return False
return False
def _select_columns(self, sliced_coord):
"""
Returns None or a list of columns accesed by 'sliced_coord'
The list of columns is calculated on top of 'self.base'
"""
columns = None
last = sliced_coord[-1]
if isinstance (last,int):
columns = [last]
else: # it is an slice
last = StorageNumpy.removenones(last, self._get_base_array().shape[1])
columns = [ c for c in range(last.start, last.stop, last.step)]
log.debug(" _select_columns ({}) ==> {}".format(sliced_coord, columns))
return columns
def _check_value_in_shape(self, value, shape, axis):
if (value < 0) or (value > shape):
raise IndexError("index {} is out of bounds for axis {} with size {}".format(value, axis, shape))
def _check_columns_in_bounds(self, columns):
"""
Check that the list of columns belongs to shape in base, or raise an exception
"""
for col in columns:
self._check_value_in_shape(col, self._get_base_array().shape[1], 1)
def _load_columns(self, columns):
"""
Load from Cassandra the list of columns.
Args:
self: The StorageNumpy to load data into
columns: The coordinates to load (column position)
PRE: self._is_persistent and not self._numpy_full_loaded
"""
self._check_columns_in_bounds(columns)
load = True
coordinates = self._loaded_columns.union(columns)
if (len(coordinates) != len(self._loaded_columns)):
self._numpy_full_loaded = (len(coordinates) == self.shape[1])
self._loaded_columns = coordinates
if not self._persistent_columnar:
log.debug("_load_columns: Enabling columnar access %s", self.storage_id)
self._persistent_columnar = True
else:
load = False
if load:
log.debug("LOADING COLUMNS {}".format(columns))
base_numpy = self._get_base_array()
self._hcache_arrow.load_numpy_slices([self._build_args.base_numpy],
self._base_metas,
[base_numpy],
columns,
StorageNumpy.COLUMN_MODE)
def _select_and_load_blocks(self, sliced_coord):
"""
PRE: self._is_persistent and not self._numpy_full_loaded
"""
block_coord = self._select_blocks(sliced_coord)
self._load_blocks(block_coord)
def _references_single_element(self, sliced_coord):
'''
Returns True if the 'sliced_coord' references a single element from 'self'
'''
if isinstance(sliced_coord,tuple):
if len(sliced_coord) != self.ndim:
return False
for i in range(len(sliced_coord)):
if not isinstance(sliced_coord[i],int):
return False
return True
if isinstance(sliced_coord, int):
return self.ndim == 1
return False
def __getitem__(self, sliced_coord):
log.info("RETRIEVING NUMPY {} is_persistent {}".format(sliced_coord, self._is_persistent))
if self._is_persistent:
if not (self._numpy_full_loaded and self._references_single_element(sliced_coord)): # Optimization to avoid 'view_composer' for single accessess
#if the slice is a npndarray numpy creates a copy and we do the same
if isinstance(sliced_coord, np.ndarray): # is there any other slicing case that needs a copy of the array????
result = self.view(np.ndarray)[sliced_coord] # TODO: If self is NOT loaded LOAD IT ALL BEFORE
return StorageNumpy(result) # Creates a copy (A StorageNumpy from a Numpy)
self._last_sliced_coord = sliced_coord # Remember the last getitem parameter, because it may force a new entry in the istorage at array_finalize
if not self._numpy_full_loaded:
# Use 'big_sliced_coord' to access disk and 'sliced_coord' to access memory
# Keep 'sliced_coord' to reuse the common return at the end
big_sliced_coord = self._view_composer_new(sliced_coord)
if self.is_columnar(big_sliced_coord):
columns = self._select_columns(big_sliced_coord)
if columns is not None : # Columnar access
self._load_columns(columns)
else: # Normal array access...
self._select_and_load_blocks(big_sliced_coord)
return super(StorageNumpy, self).__getitem__(sliced_coord)
def __setitem__(self, sliced_coord, values):
log.info("WRITING NUMPY")
log.debug("setitem %s", sliced_coord)
if isinstance(values, StorageNumpy) and values._is_persistent and not values._numpy_full_loaded:
values[:] # LOAD the values as the numpy.__setitem__ will only use memory
if self._is_persistent:
big_sliced_coord = self._view_composer_new(sliced_coord)
block_coords = self._select_blocks(big_sliced_coord)
if not self._numpy_full_loaded: # Load the block before writing!
self._load_blocks(block_coords)
#yolandab: execute first the super to modified the base numpy
super(StorageNumpy, self).__setitem__(sliced_coord, values)
base_numpy = self._get_base_array() # self.base is numpy.ndarray
metas = self._base_metas
self._hcache.store_numpy_slices([self._build_args.base_numpy],
metas, [base_numpy],
block_coords,
StorageNumpy.BLOCK_MODE)
return
super(StorageNumpy, self).__setitem__(sliced_coord, values)
return
def _persist_data(self, name, formato=0):
"""
Persist data to cassandra, the common attributes have been generated by IStorage.make_persistent
Args:
StorageNumpy to persist
name to use
[formato] to store the data (0-ZOrder, 2-columnar, 3-FortranOrder) # 0 ==Z_ORDER (find it at SpaceFillingCurve.h)
"""
log.debug("_persist_data: {} format={} ENTER ".format(name, formato))
if None in self or not self.ndim:
raise NotImplemented("Empty array persistance")
if not getattr(self,'_built_remotely', None):
if StorageNumpy._arrow_enabled(self._get_base_array()):
if formato == 0: # If arrow & ZORDER -> FortranOrder
formato = 3
self._create_tables_arrow(StorageNumpy.get_arrow_name(name))
self._create_tables(name)
if not getattr(self, '_hcache', None):
if StorageNumpy._arrow_enabled(self._get_base_array()):
self._hcache_arrow = self._create_hcache(StorageNumpy.get_arrow_name(name))
self._hcache = self._create_hcache(name)
log.debug("_persist_data: after create tables and cache ")
# Persist current object
hfetch_metas = HArrayMetadata(list(self.shape), list(self.strides),
self.dtype.kind, self.dtype.byteorder,
self.itemsize, self.flags.num, formato)
self._base_metas = hfetch_metas
self._build_args = self.args(self.storage_id, self._class_name, self._get_name(), hfetch_metas, self._block_id,
self.storage_id, # base_numpy is storage_id because until now we only reach this point if we are not inheriting from a StorageNumpy. We should update this if we allow StorageNumpy from volatile StorageNumpy
tuple([slice(None,None,None)]*self.ndim), #We are a view of everything
self._tokens)
if len(self.shape) != 0:
sid = self._build_args.base_numpy
log.debug("_persist_data: before store slices ROW")
if self.shape != self._get_base_array().shape:
raise NotImplementedError("Persisting a volatile view with different shape is NOT implemented")
self._hcache.store_numpy_slices([sid], self._build_args.metas, [self._get_base_array()], # CHECK metas del padre i memoria tienen que coincidir
None,
StorageNumpy.BLOCK_MODE)
log.debug("_persist_data: before store slices COLUMN")
if StorageNumpy._arrow_enabled(self._get_base_array()):
self._hcache_arrow.store_numpy_slices([sid], self._build_args.metas, [self._get_base_array()], # CHECK metas del padre i memoria tienen que coincidir
None,
StorageNumpy.COLUMN_MODE)
self._row_elem = self._hcache.get_elements_per_row(sid, self._build_args.metas)
self._calculate_nblocks(self._build_args.view_serialization)
log.debug("_persist_data: before store meta")
StorageNumpy._store_meta(self._build_args)
log.debug("_persist_data: before get_elements_per_row")
self._row_elem = self._hcache.get_elements_per_row(self.storage_id, self._build_args.metas)
log.debug("_persist_data: {} format={}".format(name, formato))
def make_persistent(self, name):
log.debug("Make %s persistent", name)
super().make_persistent(name)
self._persist_data(name)
def stop_persistent(self):
super().stop_persistent()
self.storage_id = None
def delete_persistent(self):
"""
Deletes the Cassandra table where the persistent StorageObj stores data
"""
self.sync() # TODO: we should discard pending writes
super().delete_persistent()
query = "DROP TABLE %s;" %(self._get_name())
query2 = "DELETE FROM hecuba.istorage WHERE storage_id = %s;" % self.storage_id
log.debug("DELETE PERSISTENT: %s", query)
config.session.execute(query)
config.session.execute(query2)
self.storage_id = None
def sync(self):
"""
Wait until all pending stores to Cassandra have been finished.
"""
log.debug("SYNC: %s", self.storage_id)
self._hcache.wait()
def __iter__(self):
if self._numpy_full_loaded:
return iter(self.view(np.ndarray))
else:
return iter(self[:].view(np.ndarray))
def __contains__(self, item):
return item in self.view(np.ndarray)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
log.debug(" UFUNC method({}) ".format(method))
log.debug(" UFUNC self sid ({}) ".format(getattr(self,'storage_id',None)))
args = []
for input_ in inputs:
log.debug(" UFUNC input loop sid={}".format(getattr(input_,'storage_id',None)))
if isinstance(input_, StorageNumpy):
StorageNumpy._preload_memory(input_)
args.append(input_.view(np.ndarray))
else:
args.append(input_)
outputs = kwargs.pop('out', None)
if outputs:
out_args = []
for output in outputs:
log.debug(" UFUNC output loop sid={}".format(getattr(output,'storage_id',None)))
if isinstance(output, StorageNumpy):
StorageNumpy._preload_memory(output)
out_args.append(output.view(np.ndarray))
else:
out_args.append(output)
kwargs['out'] = tuple(out_args)
else:
outputs = (None,) * ufunc.nout
base_numpy = self._get_base_array()
if self._is_persistent and len(self.shape) and self._numpy_full_loaded is False:
StorageNumpy._preload_memory(self)
#metas = self._base_metas
#log.debug(" UFUNC({}) load_block from {} ".format(method, metas))
#if StorageNumpy._arrow_enabled(base_numpy):
# load_method = StorageNumpy.COLUMN_MODE
# self._hcache_arrow.load_numpy_slices([self._build_args.base_numpy], metas, [base_numpy],
# None,
# load_method)
#else:
# load_method = StorageNumpy.BLOCK_MODE
# self._hcache.load_numpy_slices([self._build_args.base_numpy], metas, [base_numpy],
# None,
# load_method)
results = super(StorageNumpy, self).__array_ufunc__(ufunc, method,
*args, **kwargs)
if results is NotImplemented:
return NotImplemented
log.debug(" UFUNC: type(results)=%s results is self? %s outputs[0] is results? %s outputs[0] is self? %s", type(results), results is self, outputs[0] is results, outputs[0] is self)
if method == 'at':
return
if self._is_persistent and len(self.shape):
readonly_methods = ['mean', 'sum', 'reduce'] #methods that DO NOT modify the original memory, and there is NO NEED to store it
if method not in readonly_methods:
block_coord = self._select_blocks(self._build_args.view_serialization)
self._hcache.store_numpy_slices([self._build_args.base_numpy], self._base_metas, [base_numpy],
block_coord,
StorageNumpy.BLOCK_MODE)
if ufunc.nout == 1:
results = (results,)
results = tuple((result
if output is None else output)
for result, output in zip(results, outputs))
return results[0] if len(results) == 1 else results
def reshape(self, newshape, order=None):
'''
reshape the StorageNumpy
Creates a view of the StorageNumpy sharing data with the original data (Both in disk and memory)
'''
log.debug("reshape from %s to %s", self.shape, newshape)
if order == None:
if is_columnar(self):
order = 'A'
obj=super(StorageNumpy, self).reshape(newshape, order)
return obj
def transpose(self,axes=None):
'''
transpose the StorageNumpy
Creates a view of the StorageNumpy sharing data with the original data (Both in disk and memory)
'''
obj=super(StorageNumpy, self).transpose(axes)
return obj
def copy(self, order='K'):
'''
Copy a StorageNumpy: new **volatile** StorageNumpy with the data of the parameter
'''
#FIXME if self is not full loaded... load it
n_sn=super(StorageNumpy,self).copy(order)
return n_sn
###### INTERCEPTED FUNCTIONS #####
@staticmethod
def _preload_memory(a):
"""
Load a persistent object in memory.
"""
srcA = a
if isinstance(a, StorageNumpy) and a._is_persistent and not a._numpy_full_loaded:
log.debug(" PRELOAD: sid = {} ".format(a.storage_id))
srcA = a[:] # HACK! Load ALL elements in memory NOW (recursively calls getitem)
return srcA
def dot(a, b, out=None):
srcA = StorageNumpy._preload_memory(a)
srcB = StorageNumpy._preload_memory(b)
log.debug(" DOT: AFTER PRELOAD ")
return config.intercepted['dot'](srcA,srcB,out) # At the end of this 'copy' is called
def array_equal(a, b):
srcA = StorageNumpy._preload_memory(a)
srcB = StorageNumpy._preload_memory(b)
log.debug(" array_equal: AFTER PRELOAD ")
return config.intercepted['array_equal'](srcA,srcB)
def concatenate(sn_list,axis=0, out=None):
preloaded_sn=[]
for i in range(len(sn_list)):
preloaded_sn.append(StorageNumpy._preload_memory(sn_list[i]))
log.debug(" concatenate: AFTER PRELOAD ")
return config.intercepted['concatenate'](preloaded_sn,axis, out)
|
|
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import weakref
import IECore
import Gaffer
import GafferUI
QtCore = GafferUI._qtImport( "QtCore" )
QtGui = GafferUI._qtImport( "QtGui" )
##########################################################################
# Public functions
##########################################################################
def appendMenuDefinitions( menuDefinition, prefix="" ) :
menuDefinition.append( prefix + "/View Local Jobs", { "command" : __showLocalDispatcherWindow } )
##########################################################################
# Metadata, PlugValueWidgets and Nodules
##########################################################################
Gaffer.Metadata.registerPlugDescription( Gaffer.LocalDispatcher, "executeInBackground", "Executes the dispatched tasks in separate processes via a background thread." )
Gaffer.Metadata.registerPlugDescription( Gaffer.LocalDispatcher, "ignoreScriptLoadErrors", "Ignores errors loading the script when executing in the background. This is not recommended." )
Gaffer.Metadata.registerPlugDescription( Gaffer.ExecutableNode, "dispatcher.Local.executeInForeground", "Forces the tasks from this node (and all preceding tasks) to execute on the current thread." )
##################################################################################
# Dispatcher Window
##################################################################################
class _LocalJobsPath( Gaffer.Path ) :
def __init__( self, jobPool, job = None, path = None, root = "/" ) :
Gaffer.Path.__init__( self, path = path, root = root )
self.__jobPool = jobPool
self.__job = job
def copy( self ) :
c = self.__class__( self.__jobPool, self.__job )
return c
def info( self ) :
result = Gaffer.Path.info( self )
if result is not None and self.__job is not None :
if self.__job.failed() :
result["status"] = Gaffer.LocalDispatcher.Job.Status.Failed
elif self.__job.killed() :
result["status"] = Gaffer.LocalDispatcher.Job.Status.Killed
else :
result["status"] = Gaffer.LocalDispatcher.Job.Status.Running
result["id"] = self.__job.id()
result["name"] = self.__job.name()
result["directory"] = self.__job.directory()
stats = self.__job.statistics()
result["cpu"] = "{0:.2f} %".format( stats["pcpu"] ) if "pcpu" in stats.keys() else "N/A"
result["memory"] = "{0:.2f} GB".format( stats["rss"] / 1024.0 / 1024.0 ) if "rss" in stats.keys() else "N/A"
return result
def job( self ) :
return self.__job
def jobPool( self ) :
return self.__jobPool
def isLeaf( self ) :
return len( self )
def _children( self ) :
if self.isLeaf() :
return []
result = []
jobs = self.__jobPool.jobs() + self.__jobPool.failedJobs()
for job in jobs :
result.append(
_LocalJobsPath(
jobPool = self.__jobPool,
job = job,
path = [ str(jobs.index(job)) ],
)
)
return result
class _LocalJobsWindow( GafferUI.Window ) :
def __init__( self, jobPool, **kw ) :
GafferUI.Window.__init__( self, **kw )
with self :
with GafferUI.SplitContainer() :
self.__jobListingWidget = GafferUI.PathListingWidget(
_LocalJobsPath( jobPool ),
columns = (
GafferUI.PathListingWidget.Column( infoField = "status", label = "Status", displayFunction = _LocalJobsWindow._displayStatus ),
GafferUI.PathListingWidget.Column( infoField = "name", label = "Name" ),
GafferUI.PathListingWidget.Column( infoField = "id", label = "Id" ),
GafferUI.PathListingWidget.Column( infoField = "cpu", label = "CPU" ),
GafferUI.PathListingWidget.Column( infoField = "memory", label = "Memory" ),
),
allowMultipleSelection=True
)
self.__jobListingWidget._qtWidget().header().setSortIndicator( 1, QtCore.Qt.AscendingOrder )
self.__jobSelectionChangedConnection = self.__jobListingWidget.selectionChangedSignal().connect( Gaffer.WeakMethod( self.__jobSelectionChanged ) )
with GafferUI.TabbedContainer() as self.__tabs :
with GafferUI.ScrolledContainer( parenting = { "label" : "Details" } ) as self.__detailsTab :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing=10, borderWidth=10 ) :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=15 ) :
GafferUI.Label( "<h3>Current Batch</h3>" )
self.__detailsCurrentDescription = GafferUI.Label( "N/A" )
self.__detailsCurrentDescription.setTextSelectable( True )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=15 ) :
GafferUI.Label( "<h3>Directory</h3>" )
self.__detailsDirectory = GafferUI.Label( "N/A" )
self.__detailsDirectory.setTextSelectable( True )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing=10, borderWidth=10, parenting = { "label" : "Messages" } ) as self.__messagesTab :
self.__messageWidget = GafferUI.MessageWidget()
self.__tabChangedConnection = self.__tabs.currentChangedSignal().connect( Gaffer.WeakMethod( self.__tabChanged ) )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=5 ) :
self.__killButton = GafferUI.Button( "Kill Selected Jobs" )
self.__killButton.setEnabled( False )
self.__killClickedConnection = self.__killButton.clickedSignal().connect( Gaffer.WeakMethod( self.__killClicked ) )
self.__removeButton = GafferUI.Button( "Remove Failed Jobs" )
self.__removeButton.setEnabled( False )
self.__removedClickedConnection = self.__removeButton.clickedSignal().connect( Gaffer.WeakMethod( self.__removeClicked ) )
self.setTitle( "Local Dispatcher Jobs" )
self.__updateTimer = QtCore.QTimer()
self.__updateTimer.timeout.connect( Gaffer.WeakMethod( self.__update ) )
self.__visibilityChangedConnection = self.visibilityChangedSignal().connect( Gaffer.WeakMethod( self.__visibilityChanged ) )
self.__jobAddedConnection = jobPool.jobAddedSignal().connect( Gaffer.WeakMethod( self.__jobAdded ) )
self.__jobRemovedConnection = jobPool.jobRemovedSignal().connect( Gaffer.WeakMethod( self.__jobRemoved ) )
## Acquires the LocalJobsWindow for the specified application.
@staticmethod
def acquire( jobPool ) :
assert( isinstance( jobPool, Gaffer.LocalDispatcher.JobPool ) )
window = getattr( jobPool, "_window", None )
if window is not None and window() :
return window()
window = _LocalJobsWindow( jobPool )
jobPool._window = weakref.ref( window )
return window
@staticmethod
def _displayStatus( status ) :
if status == Gaffer.LocalDispatcher.Job.Status.Killed :
return GafferUI.Image._qtPixmapFromFile( "debugNotification.png" )
elif status == Gaffer.LocalDispatcher.Job.Status.Failed :
return GafferUI.Image._qtPixmapFromFile( "errorNotification.png" )
return GafferUI.Image._qtPixmapFromFile( "infoNotification.png" )
def __visibilityChanged( self, widget ) :
if widget.visible() :
self.__updateTimer.start( 5000 )
else :
self.__updateTimer.stop()
def __jobAdded( self, job ) :
GafferUI.EventLoop.executeOnUIThread( self.__update )
def __jobRemoved( self, job ) :
GafferUI.EventLoop.executeOnUIThread( self.__update )
def __update( self ) :
self.__jobListingWidget.getPath()._emitPathChanged()
def __updateDetails( self ) :
paths = self.__jobListingWidget.getSelectedPaths()
if not len(paths) :
self.__detailsCurrentDescription.setText( "N/A" )
self.__detailsDirectory.setText( "N/A" )
return
job = paths[0].job()
self.__detailsCurrentDescription.setText( job.description() )
self.__detailsDirectory.setText( job.directory() )
def __updateMessages( self ) :
self.__messageWidget.clear()
paths = self.__jobListingWidget.getSelectedPaths()
if not len(paths) :
return
for m in paths[0].job().messageHandler().messages :
self.__messageWidget.appendMessage( m.level, m.context, m.message )
def __killClicked( self, button ) :
for path in self.__jobListingWidget.getSelectedPaths() :
path.job().kill()
self.__update()
def __removeClicked( self, button ) :
for path in self.__jobListingWidget.getSelectedPaths() :
if path.job().failed() :
path.jobPool()._remove( path.job(), force = True )
self.__update()
def __jobSelectionChanged( self, widget ) :
paths = self.__jobListingWidget.getSelectedPaths()
numFailed = len([ x for x in paths if x.job().failed() ])
self.__removeButton.setEnabled( numFailed )
self.__killButton.setEnabled( len(paths) - numFailed > 0 )
currentTab = self.__tabs.getCurrent()
if currentTab is self.__detailsTab :
self.__updateDetails()
elif currentTab is self.__messagesTab :
self.__updateMessages()
def __tabChanged( self, tabs, currentTab ) :
if currentTab is self.__detailsTab :
self.__updateDetails()
elif currentTab is self.__messagesTab :
self.__updateMessages()
##########################################################################
# Implementation Details
##########################################################################
def __showLocalDispatcherWindow( menu ) :
window = _LocalJobsWindow.acquire( Gaffer.LocalDispatcher.defaultJobPool() )
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
scriptWindow.addChildWindow( window )
window.setVisible( True )
|
|
#!/usr/bin/env python
"""
Part of the World Generator project.
author: Bret Curtis
license: LGPL v2
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2 as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
02110-1301 USA
"""
#
# Constants for world generation that typically are used across multiple modules
# the main code itself.
#
#
# Direction
NORTH = [0, -1]
NORTH_EAST = [1, -1]
EAST = [1, 0]
SOUTH_EAST = [1, 1]
SOUTH = [0, 1]
SOUTH_WEST = [-1, 1]
WEST = [-1, 0]
NORTH_WEST = [-1, -1]
CENTER = [0, 0]
DIR_NEIGHBORS = [NORTH, EAST, SOUTH, WEST]
DIR_NEIGHBORS_CENTER = [CENTER, NORTH, EAST, SOUTH, WEST]
DIR_ALL = [NORTH, NORTH_EAST, EAST, SOUTH_EAST, SOUTH, SOUTH_WEST, WEST, NORTH_WEST]
DIR_ALL_CENTER = [CENTER, NORTH, NORTH_EAST, EAST, SOUTH_EAST, SOUTH, SOUTH_WEST, WEST, NORTH_WEST]
# Heightmap methods
HM_MDA = 0
HM_DSA = 1
HM_SPH = 2
HM_PERLIN = 3
#Viewer types
VIEWER_HEIGHTMAP = 0
VIEWER_HEATMAP = 1
VIEWER_RAINFALL = 2
VIEWER_WIND = 3
VIEWER_DRAINAGE = 4
VIEWER_BIOMES = 5
VIEWER_RIVERS = 6
VIEWER_EROSION = 7
VIEWER_EROSIONAPP = 8
#Biomes
BIOME_TYPE_UNDEFINED = 0
BIOME_TYPE_WATER = 1
BIOME_TYPE_GRASSLAND = 2
BIOME_TYPE_FOREST = 3
BIOME_TYPE_DESERT_SAND = 4
BIOME_TYPE_DESERT_ROCK = 5
BIOME_TYPE_MOUNTAIN_LOW = 6
BIOME_TYPE_MOUNTAIN_HIGH = 7
BIOME_TYPE_SAVANNA = 8
BIOME_TYPE_MARSH = 9
BIOME_TYPE_SHRUBLAND = 10
BIOME_TYPE_HILLS = 11
BIOME_TYPE_SWAMP = 12
BIOME_TYPE_DESERT_BADLANDS = 13
BIOME_TYPE_MOUNTAIN = 14
# BIOME_ELEVATION
BIOME_ELEVATION_HILLS_LOW = 0.60
BIOME_ELEVATION_HILLS = 0.75
BIOME_ELEVATION_MOUNTAIN_LOW = 0.85
BIOME_ELEVATION_MOUNTAIN = 0.90
BIOME_ELEVATION_MOUNTAIN_HIGH = 0.95
# MOVEMENT
DIAGONAL_COST = 1.41
# World contants
WGEN_HEMISPHERE_NORTH = 1
WGEN_HEMISPHERE_EQUATOR = 2
WGEN_HEMISPHERE_SOUTH = 3
#WGEN_SEA_LEVEL = 0.24
WIND_OFFSET = 180
WIND_PARITY = -1 # -1 or 1
WGEN_WIND_RESOLUTION = 4 # 1 is perfect, higher = rougher
WGEN_RAIN_FALLOFF = 0.2 # Default 0.2 - less for less rain, more for more rain
WGEN_WIND_GRAVITY = 0.975
TEMPERATURE_BAND_RESOLUTION = 2 # 1 is perfect, higher = rougher
#Colour contant
# http://df.magmawiki.com/index.php/Colour (as reference)
COLOR_BLACK = 0x000000
COLOR_CLEAR = 0x808080
COLOR_GRAY = 0x808080
COLOR_SILVER = 0xC0C0C0
COLOR_WHITE = 0xFFFFFF
COLOR_TAUPE_ROSE = 0x905D5D
COLOR_CHESTNUT = 0xCD5C5C
COLOR_MAROON = 0x800000
COLOR_RED = 0xFF0000
COLOR_VERMILION = 0xE34234
COLOR_RUSSET = 0x755A57
COLOR_SCARLET = 0xFF2400
COLOR_BURNT_UMBER = 0x8A3324
COLOR_TAUPE_MEDIUM = 0x674C47
COLOR_DARK_CHESTNUT = 0x986960
COLOR_BURNT_SIENNA = 0xE97451
COLOR_RUST = 0xB7410E
COLOR_AUBURN = 0x6F351A
COLOR_MAHOGANY = 0xC04000
COLOR_PUMPKIN = 0xFF7518
COLOR_CHOCOLATE = 0xD2691E
COLOR_TAUPE_PALE = 0xBC987E
COLOR_TAUPE_DARK = 0x483C32
COLOR_DARK_PEACH = 0xFFDAB9
COLOR_COPPER = 0xB87333
COLOR_LIGHT_BROWN = 0xCD853F
COLOR_BRONZE = 0xCD7F32
COLOR_PALE_BROWN = 0x987654
COLOR_DARK_BROWN = 0x654321
COLOR_SEPIA = 0x704214
COLOR_OCHRE = 0xCC7722
COLOR_BROWN = 0x964B00
COLOR_CINNAMON = 0x7B3F00
COLOR_TAN = 0xD2B48C
COLOR_RAW_UMBER = 0x734A12
COLOR_ORANGE = 0xFFA500
COLOR_PEACH = 0xFFE5B4
COLOR_TAUPE_SANDY = 0x967117
COLOR_GOLDENROD = 0xDAA520
COLOR_AMBER = 0xFFBF00
COLOR_DARK_TAN = 0x918151
COLOR_SAFFRON = 0xF4C430
COLOR_ECRU = 0xC2B280
COLOR_GOLD = 0xD4AF37
COLOR_PEARL = 0xF0EAD6
COLOR_BUFF = 0xF0DC82
COLOR_FLAX = 0xEEDC82
COLOR_BRASS = 0xB5A642
COLOR_GOLDEN_YELLOW = 0xFFDF00
COLOR_LEMON = 0xFDE910
COLOR_CREAM = 0xFFFDD0
COLOR_BEIGE = 0xF5F5DC
COLOR_OLIVE = 0x808000
COLOR_YELLOW = 0xFFFF00
COLOR_IVORY = 0xFFFFF0
COLOR_LIME = 0xCCFF00
COLOR_YELLOW_GREEN = 0x9ACD32
COLOR_DARK_OLIVE = 0x556832
COLOR_GREEN_YELLOW = 0xADFF2F
COLOR_CHARTREUSE = 0x7FFF00
COLOR_FERN_GREEN = 0x4F7942
COLOR_MOSS_GREEN = 0xADDFAD
COLOR_GREEN = 0x00FF00
COLOR_MINT_GREEN = 0x98FF98
COLOR_ASH_GRAY = 0xB2BEB5
COLOR_EMERALD = 0x50C878
COLOR_SEA_GREEN = 0x2E8B57
COLOR_SPRING_GREEN = 0x00FF7F
COLOR_DARK_GREEN = 0x013220
COLOR_JADE = 0x00A86B
COLOR_AQUAMARINE = 0x7FFFD4
COLOR_PINE_GREEN = 0x01796F
COLOR_TURQUOISE = 0x30D5C8
COLOR_PALE_BLUE = 0xAFEEEE
COLOR_TEAL = 0x008080
COLOR_AQUA = 0x00FFFF
COLOR_LIGHT_BLUE = 0xADD8E6
COLOR_CERULEAN = 0x007BA7
COLOR_SKY_BLUE = 0x87CEEB
COLOR_CHARCOAL = 0x36454F
COLOR_SLATE_GRAY = 0x708090
COLOR_MIDNIGHT_BLUE = 0x003366
COLOR_AZURE = 0x007FFF
COLOR_COBALT = 0x0047AB
COLOR_LAVENDER = 0xE6E6FA
COLOR_DARK_BLUE = 0x00008B
COLOR_BLUE = 0x0000FF
COLOR_PERIWINKLE = 0xCCCCFF
COLOR_DARK_VIOLET = 0x423189
COLOR_AMETHYST = 0x9966CC
COLOR_DARK_INDIGO = 0x310062
COLOR_VIOLET = 0x8B00FF
COLOR_INDIGO = 0x4B0082
COLOR_PURPLE = 0x660099
COLOR_HELIOTROPE = 0xDF73FF
COLOR_LILAC = 0xC8A2C8
COLOR_PLUM = 0x660066
COLOR_TAUPE_PURPLE = 0x50404D
COLOR_TAUPE_GRAY = 0x8B8589
COLOR_FUCHSIA = 0xF400A1
COLOR_MAUVE = 0x993366
COLOR_LAVENDER_BLUSH = 0xFFF0F5
COLOR_DARK_PINK = 0xE75480
COLOR_MAUVE_TAUPE = 0x915F6D
COLOR_DARK_SCARLET = 0x560319
COLOR_PUCE = 0xCC8899
COLOR_CRIMSON = 0xDC143C
COLOR_PINK = 0xFFC0CB
COLOR_CARDINAL = 0xC41E3A
COLOR_CARMINE = 0x960018
COLOR_PALE_PINK = 0xFADADD
COLOR_PALE_CHESTNUT = 0xDDADAF
COLOR_DEEPSEA = 0x00003F
COLOR_SEA = 0x00007F
COLOR_GRASSLAND = 0x80FF00
COLOR_HILLS = 0x5A805A
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Takes a generator of values, and accumulates them for a frontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import threading
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf.config_pb2 import RunMetadata
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import tensor_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.summary.impl import directory_watcher
from tensorflow.python.summary.impl import io_wrapper
from tensorflow.python.summary.impl import reservoir
from tensorflow.python.util import compat
namedtuple = collections.namedtuple
ScalarEvent = namedtuple('ScalarEvent', ['wall_time', 'step', 'value'])
CompressedHistogramEvent = namedtuple('CompressedHistogramEvent',
['wall_time', 'step',
'compressed_histogram_values'])
CompressedHistogramValue = namedtuple('CompressedHistogramValue',
['basis_point', 'value'])
HistogramEvent = namedtuple('HistogramEvent',
['wall_time', 'step', 'histogram_value'])
HistogramValue = namedtuple('HistogramValue', ['min', 'max', 'num', 'sum',
'sum_squares', 'bucket_limit',
'bucket'])
ImageEvent = namedtuple('ImageEvent', ['wall_time', 'step',
'encoded_image_string', 'width',
'height'])
AudioEvent = namedtuple('AudioEvent', ['wall_time', 'step',
'encoded_audio_string', 'content_type',
'sample_rate', 'length_frames'])
## Different types of summary events handled by the event_accumulator
SUMMARY_TYPES = {'simple_value': '_ProcessScalar',
'histo': '_ProcessHistogram',
'image': '_ProcessImage',
'audio': '_ProcessAudio'}
## The tagTypes below are just arbitrary strings chosen to pass the type
## information of the tag from the backend to the frontend
COMPRESSED_HISTOGRAMS = 'compressedHistograms'
HISTOGRAMS = 'histograms'
IMAGES = 'images'
AUDIO = 'audio'
SCALARS = 'scalars'
GRAPH = 'graph'
META_GRAPH = 'meta_graph'
RUN_METADATA = 'run_metadata'
## Normal CDF for std_devs: (-Inf, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, Inf)
## naturally gives bands around median of width 1 std dev, 2 std dev, 3 std dev,
## and then the long tail.
NORMAL_HISTOGRAM_BPS = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
DEFAULT_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 500,
IMAGES: 4,
AUDIO: 4,
SCALARS: 10000,
HISTOGRAMS: 1,
}
STORE_EVERYTHING_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 0,
IMAGES: 0,
AUDIO: 0,
SCALARS: 0,
HISTOGRAMS: 0,
}
def IsTensorFlowEventsFile(path):
"""Check the path name to see if it is probably a TF Events file."""
return 'tfevents' in compat.as_str_any(os.path.basename(path))
class EventAccumulator(object):
"""An `EventAccumulator` takes an event generator, and accumulates the values.
The `EventAccumulator` is intended to provide a convenient Python interface
for loading Event data written during a TensorFlow run. TensorFlow writes out
`Event` protobuf objects, which have a timestamp and step number, and often
contain a `Summary`. Summaries can have different kinds of data like an image,
a scalar value, or a histogram. The Summaries also have a tag, which we use to
organize logically related data. The `EventAccumulator` supports retrieving
the `Event` and `Summary` data by its tag.
Calling `Tags()` gets a map from `tagType` (e.g. `'images'`,
`'compressedHistograms'`, `'scalars'`, etc) to the associated tags for those
data types. Then, various functional endpoints (eg
`Accumulator.Scalars(tag)`) allow for the retrieval of all data
associated with that tag.
The `Reload()` method synchronously loads all of the data written so far.
Histograms, audio, and images are very large, so storing all of them is not
recommended.
@@Reload
@@Tags
@@Scalars
@@Graph
@@MetaGraph
@@RunMetadata
@@Histograms
@@CompressedHistograms
@@Images
@@Audio
"""
def __init__(self,
path,
size_guidance=DEFAULT_SIZE_GUIDANCE,
compression_bps=NORMAL_HISTOGRAM_BPS,
purge_orphaned_data=True):
"""Construct the `EventAccumulator`.
Args:
path: A file path to a directory containing tf events files, or a single
tf events file. The accumulator will load events from this path.
size_guidance: Information on how much data the EventAccumulator should
store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much
so as to avoid OOMing the client. The size_guidance should be a map
from a `tagType` string to an integer representing the number of
items to keep per tag for items of that `tagType`. If the size is 0,
all events are stored.
compression_bps: Information on how the `EventAccumulator` should compress
histogram data for the `CompressedHistograms` tag (for details see
`ProcessCompressedHistogram`).
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
sizes = {}
for key in DEFAULT_SIZE_GUIDANCE:
if key in size_guidance:
sizes[key] = size_guidance[key]
else:
sizes[key] = DEFAULT_SIZE_GUIDANCE[key]
self._first_event_timestamp = None
self._scalars = reservoir.Reservoir(size=sizes[SCALARS])
self._graph = None
self._graph_from_metagraph = False
self._meta_graph = None
self._tagged_metadata = {}
self._histograms = reservoir.Reservoir(size=sizes[HISTOGRAMS])
self._compressed_histograms = reservoir.Reservoir(
size=sizes[COMPRESSED_HISTOGRAMS])
self._images = reservoir.Reservoir(size=sizes[IMAGES])
self._audio = reservoir.Reservoir(size=sizes[AUDIO])
self._generator_mutex = threading.Lock()
self._generator = _GeneratorFromPath(path)
self._compression_bps = compression_bps
self.purge_orphaned_data = purge_orphaned_data
self.most_recent_step = -1
self.most_recent_wall_time = -1
self.file_version = None
# The attributes that get built up by the accumulator
self.accumulated_attrs = ('_scalars', '_histograms',
'_compressed_histograms', '_images', '_audio')
self._tensor_summaries = {}
def Reload(self):
"""Loads all events added since the last call to `Reload`.
If `Reload` was never called, loads all events in the file.
Returns:
The `EventAccumulator`.
"""
with self._generator_mutex:
for event in self._generator.Load():
self._ProcessEvent(event)
return self
def FirstEventTimestamp(self):
"""Returns the timestamp in seconds of the first event.
If the first event has been loaded (either by this method or by `Reload`,
this returns immediately. Otherwise, it will load in the first event. Note
that this means that calling `Reload` will cause this to block until
`Reload` has finished.
Returns:
The timestamp in seconds of the first event that was loaded.
Raises:
ValueError: If no events have been loaded and there were no events found
on disk.
"""
if self._first_event_timestamp is not None:
return self._first_event_timestamp
with self._generator_mutex:
try:
event = next(self._generator.Load())
self._ProcessEvent(event)
return self._first_event_timestamp
except StopIteration:
raise ValueError('No event timestamp could be found')
def _ProcessEvent(self, event):
"""Called whenever an event is loaded."""
if self._first_event_timestamp is None:
self._first_event_timestamp = event.wall_time
if event.HasField('file_version'):
new_file_version = _ParseFileVersion(event.file_version)
if self.file_version and self.file_version != new_file_version:
## This should not happen.
logging.warn(('Found new file_version for event.proto. This will '
'affect purging logic for TensorFlow restarts. '
'Old: {0} New: {1}').format(self.file_version,
new_file_version))
self.file_version = new_file_version
self._MaybePurgeOrphanedData(event)
## Process the event.
# GraphDef and MetaGraphDef are handled in a special way:
# If no graph_def Event is available, but a meta_graph_def is, and it
# contains a graph_def, then use the meta_graph_def.graph_def as our graph.
# If a graph_def Event is available, always prefer it to the graph_def
# inside the meta_graph_def.
if event.HasField('graph_def'):
if self._graph is not None:
logging.warn(('Found more than one graph event per run, or there was '
'a metagraph containing a graph_def, as well as one or '
'more graph events. Overwriting the graph with the '
'newest event.'))
self._graph = event.graph_def
self._graph_from_metagraph = False
self._UpdateTensorSummaries()
elif event.HasField('meta_graph_def'):
if self._meta_graph is not None:
logging.warn(('Found more than one metagraph event per run. '
'Overwriting the metagraph with the newest event.'))
self._meta_graph = event.meta_graph_def
if self._graph is None or self._graph_from_metagraph:
# We may have a graph_def in the metagraph. If so, and no
# graph_def is directly available, use this one instead.
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.ParseFromString(self._meta_graph)
if meta_graph.graph_def:
if self._graph is not None:
logging.warn(('Found multiple metagraphs containing graph_defs,'
'but did not find any graph events. Overwriting the '
'graph with the newest metagraph version.'))
self._graph_from_metagraph = True
self._graph = meta_graph.graph_def.SerializeToString()
self._UpdateTensorSummaries()
elif event.HasField('tagged_run_metadata'):
tag = event.tagged_run_metadata.tag
if tag in self._tagged_metadata:
logging.warn('Found more than one "run metadata" event with tag ' +
tag + '. Overwriting it with the newest event.')
self._tagged_metadata[tag] = event.tagged_run_metadata.run_metadata
elif event.HasField('summary'):
for value in event.summary.value:
if value.HasField('tensor'):
self._ProcessTensorSummary(value, event)
else:
for summary_type, summary_func in SUMMARY_TYPES.items():
if value.HasField(summary_type):
datum = getattr(value, summary_type)
getattr(self, summary_func)(value.tag, event.wall_time,
event.step, datum)
def _ProcessTensorSummary(self, value, event):
"""Process summaries generated by the TensorSummary op.
These summaries are distinguished by the fact that they have a Tensor field,
rather than one of the old idiosyncratic per-summary data fields.
Processing Tensor summaries is complicated by the fact that Tensor summaries
are not self-descriptive; you need to read the NodeDef of the corresponding
TensorSummary op to know the summary_type, the tag, etc.
This method emits ERROR-level messages to the logs if it encounters Tensor
summaries that it cannot process.
Args:
value: A summary_pb2.Summary.Value with a Tensor field.
event: The event_pb2.Event containing that value.
"""
def LogErrorOnce(msg):
logging.log_first_n(logging.ERROR, msg, 1)
name = value.node_name
if self._graph is None:
LogErrorOnce('Attempting to process TensorSummary output, but '
'no graph is present, so processing is impossible. '
'All TensorSummary output will be ignored.')
return
if name not in self._tensor_summaries:
LogErrorOnce('No node_def for TensorSummary {}; skipping this sequence.'.
format(name))
return
summary_description = self._tensor_summaries[name]
type_hint = summary_description.type_hint
if not type_hint:
LogErrorOnce('No type_hint for TensorSummary {}; skipping this sequence.'.
format(name))
return
if type_hint == 'scalar':
scalar = float(tensor_util.MakeNdarray(value.tensor))
self._ProcessScalar(name, event.wall_time, event.step, scalar)
else:
LogErrorOnce(
'Unsupported type {} for TensorSummary {}; skipping this sequence.'.
format(type_hint, name))
def _UpdateTensorSummaries(self):
g = self.Graph()
for node in g.node:
if node.op == 'TensorSummary':
d = summary.get_summary_description(node)
self._tensor_summaries[node.name] = d
def Tags(self):
"""Return all tags found in the value stream.
Returns:
A `{tagType: ['list', 'of', 'tags']}` dictionary.
"""
return {IMAGES: self._images.Keys(),
AUDIO: self._audio.Keys(),
HISTOGRAMS: self._histograms.Keys(),
SCALARS: self._scalars.Keys(),
COMPRESSED_HISTOGRAMS: self._compressed_histograms.Keys(),
# Use a heuristic: if the metagraph is available, but
# graph is not, then we assume the metagraph contains the graph.
GRAPH: self._graph is not None,
META_GRAPH: self._meta_graph is not None,
RUN_METADATA: list(self._tagged_metadata.keys())}
def Scalars(self, tag):
"""Given a summary tag, return all associated `ScalarEvent`s.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `ScalarEvent`s.
"""
return self._scalars.Items(tag)
def Graph(self):
"""Return the graph definition, if there is one.
If the graph is stored directly, return that. If no graph is stored
directly but a metagraph is stored containing a graph, return that.
Raises:
ValueError: If there is no graph for this run.
Returns:
The `graph_def` proto.
"""
graph = graph_pb2.GraphDef()
if self._graph is not None:
graph.ParseFromString(self._graph)
return graph
raise ValueError('There is no graph in this EventAccumulator')
def MetaGraph(self):
"""Return the metagraph definition, if there is one.
Raises:
ValueError: If there is no metagraph for this run.
Returns:
The `meta_graph_def` proto.
"""
if self._meta_graph is None:
raise ValueError('There is no metagraph in this EventAccumulator')
meta_graph = meta_graph_pb2.MetaGraphDef()
meta_graph.ParseFromString(self._meta_graph)
return meta_graph
def RunMetadata(self, tag):
"""Given a tag, return the associated session.run() metadata.
Args:
tag: A string tag associated with the event.
Raises:
ValueError: If the tag is not found.
Returns:
The metadata in form of `RunMetadata` proto.
"""
if tag not in self._tagged_metadata:
raise ValueError('There is no run metadata with this tag name')
run_metadata = RunMetadata()
run_metadata.ParseFromString(self._tagged_metadata[tag])
return run_metadata
def Histograms(self, tag):
"""Given a summary tag, return all associated histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `HistogramEvent`s.
"""
return self._histograms.Items(tag)
def CompressedHistograms(self, tag):
"""Given a summary tag, return all associated compressed histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `CompressedHistogramEvent`s.
"""
return self._compressed_histograms.Items(tag)
def Images(self, tag):
"""Given a summary tag, return all associated images.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `ImageEvent`s.
"""
return self._images.Items(tag)
def Audio(self, tag):
"""Given a summary tag, return all associated audio.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
Returns:
An array of `AudioEvent`s.
"""
return self._audio.Items(tag)
def _MaybePurgeOrphanedData(self, event):
"""Maybe purge orphaned data due to a TensorFlow crash.
When TensorFlow crashes at step T+O and restarts at step T, any events
written after step T are now "orphaned" and will be at best misleading if
they are included in TensorBoard.
This logic attempts to determine if there is orphaned data, and purge it
if it is found.
Args:
event: The event to use as a reference, to determine if a purge is needed.
"""
if not self.purge_orphaned_data:
return
## Check if the event happened after a crash, and purge expired tags.
if self.file_version and self.file_version >= 2:
## If the file_version is recent enough, use the SessionLog enum
## to check for restarts.
self._CheckForRestartAndMaybePurge(event)
else:
## If there is no file version, default to old logic of checking for
## out of order steps.
self._CheckForOutOfOrderStepAndMaybePurge(event)
def _CheckForRestartAndMaybePurge(self, event):
"""Check and discard expired events using SessionLog.START.
Check for a SessionLog.START event and purge all previously seen events
with larger steps, because they are out of date. Because of supervisor
threading, it is possible that this logic will cause the first few event
messages to be discarded since supervisor threading does not guarantee
that the START message is deterministically written first.
This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which
can inadvertently discard events due to supervisor threading.
Args:
event: The event to use as reference. If the event is a START event, all
previously seen events with a greater event.step will be purged.
"""
if event.HasField(
'session_log') and event.session_log.status == SessionLog.START:
self._Purge(event, by_tags=False)
def _CheckForOutOfOrderStepAndMaybePurge(self, event):
"""Check for out-of-order event.step and discard expired events for tags.
Check if the event is out of order relative to the global most recent step.
If it is, purge outdated summaries for tags that the event contains.
Args:
event: The event to use as reference. If the event is out-of-order, all
events with the same tags, but with a greater event.step will be purged.
"""
if event.step < self.most_recent_step and event.HasField('summary'):
self._Purge(event, by_tags=True)
else:
self.most_recent_step = event.step
self.most_recent_wall_time = event.wall_time
def _ConvertHistogramProtoToTuple(self, histo):
return HistogramValue(min=histo.min,
max=histo.max,
num=histo.num,
sum=histo.sum,
sum_squares=histo.sum_squares,
bucket_limit=list(histo.bucket_limit),
bucket=list(histo.bucket))
def _ProcessHistogram(self, tag, wall_time, step, histo):
"""Processes a proto histogram by adding it to accumulated state."""
histo = self._ConvertHistogramProtoToTuple(histo)
self._histograms.AddItem(tag, HistogramEvent(wall_time, step, histo))
self._compressed_histograms.AddItem(
tag,
CompressedHistogramEvent(
wall_time, step, _CompressHistogram(histo, self._compression_bps)))
def _ProcessImage(self, tag, wall_time, step, image):
"""Processes an image by adding it to accumulated state."""
event = ImageEvent(wall_time=wall_time,
step=step,
encoded_image_string=image.encoded_image_string,
width=image.width,
height=image.height)
self._images.AddItem(tag, event)
def _ProcessAudio(self, tag, wall_time, step, audio):
"""Processes a audio by adding it to accumulated state."""
event = AudioEvent(wall_time=wall_time,
step=step,
encoded_audio_string=audio.encoded_audio_string,
content_type=audio.content_type,
sample_rate=audio.sample_rate,
length_frames=audio.length_frames)
self._audio.AddItem(tag, event)
def _ProcessScalar(self, tag, wall_time, step, scalar):
"""Processes a simple value by adding it to accumulated state."""
sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar)
self._scalars.AddItem(tag, sv)
def _Purge(self, event, by_tags):
"""Purge all events that have occurred after the given event.step.
If by_tags is True, purge all events that occurred after the given
event.step, but only for the tags that the event has. Non-sequential
event.steps suggest that a TensorFlow restart occurred, and we discard
the out-of-order events to display a consistent view in TensorBoard.
Discarding by tags is the safer method, when we are unsure whether a restart
has occurred, given that threading in supervisor can cause events of
different tags to arrive with unsynchronized step values.
If by_tags is False, then purge all events with event.step greater than the
given event.step. This can be used when we are certain that a TensorFlow
restart has occurred and these events can be discarded.
Args:
event: The event to use as reference for the purge. All events with
the same tags, but with a greater event.step will be purged.
by_tags: Bool to dictate whether to discard all out-of-order events or
only those that are associated with the given reference event.
"""
## Keep data in reservoirs that has a step less than event.step
_NotExpired = lambda x: x.step < event.step
if by_tags:
def _ExpiredPerTag(value):
return [getattr(self, x).FilterItems(_NotExpired, value.tag)
for x in self.accumulated_attrs]
expired_per_tags = [_ExpiredPerTag(value)
for value in event.summary.value]
expired_per_type = [sum(x) for x in zip(*expired_per_tags)]
else:
expired_per_type = [getattr(self, x).FilterItems(_NotExpired)
for x in self.accumulated_attrs]
if sum(expired_per_type) > 0:
purge_msg = _GetPurgeMessage(self.most_recent_step,
self.most_recent_wall_time, event.step,
event.wall_time, *expired_per_type)
logging.warn(purge_msg)
def _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step,
event_wall_time, num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images,
num_expired_audio):
"""Return the string message associated with TensorBoard purges."""
return ('Detected out of order event.step likely caused by '
'a TensorFlow restart. Purging expired events from Tensorboard'
' display between the previous step: {} (timestamp: {}) and '
'current step: {} (timestamp: {}). Removing {} scalars, {} '
'histograms, {} compressed histograms, {} images, '
'and {} audio.').format(most_recent_step, most_recent_wall_time,
event_step, event_wall_time,
num_expired_scalars, num_expired_histos,
num_expired_comp_histos, num_expired_images,
num_expired_audio)
def _GeneratorFromPath(path):
"""Create an event generator for file or directory at given path string."""
if IsTensorFlowEventsFile(path):
return io_wrapper.CreateFileLoader(path)
else:
return directory_watcher.DirectoryWatcher(path, io_wrapper.CreateFileLoader,
IsTensorFlowEventsFile)
def _ParseFileVersion(file_version):
"""Convert the string file_version in event.proto into a float.
Args:
file_version: String file_version from event.proto
Returns:
Version number as a float.
"""
tokens = file_version.split('brain.Event:')
try:
return float(tokens[-1])
except ValueError:
## This should never happen according to the definition of file_version
## specified in event.proto.
logging.warn(('Invalid event.proto file_version. Defaulting to use of '
'out-of-order event.step logic for purging expired events.'))
return -1
def _CompressHistogram(histo, bps):
"""Creates fixed size histogram by adding compression to accumulated state.
This routine transforms a histogram at a particular step by linearly
interpolating its variable number of buckets to represent their cumulative
weight at a constant number of compression points. This significantly reduces
the size of the histogram and makes it suitable for a two-dimensional area
plot where the output of this routine constitutes the ranges for a single x
coordinate.
Args:
histo: A HistogramValue namedtuple.
bps: Compression points represented in basis points, 1/100ths of a percent.
Returns:
List of CompressedHistogramValue namedtuples.
"""
# See also: Histogram::Percentile() in core/lib/histogram/histogram.cc
if not histo.num:
return [CompressedHistogramValue(b, 0.0) for b in bps]
bucket = np.array(histo.bucket)
weights = (bucket * bps[-1] / (bucket.sum() or 1.0)).cumsum()
values = []
j = 0
while j < len(bps):
i = np.searchsorted(weights, bps[j], side='right')
while i < len(weights):
cumsum = weights[i]
cumsum_prev = weights[i - 1] if i > 0 else 0.0
if cumsum == cumsum_prev: # prevent remap divide by zero
i += 1
continue
if not i or not cumsum_prev:
lhs = histo.min
else:
lhs = max(histo.bucket_limit[i - 1], histo.min)
rhs = min(histo.bucket_limit[i], histo.max)
weight = _Remap(bps[j], cumsum_prev, cumsum, lhs, rhs)
values.append(CompressedHistogramValue(bps[j], weight))
j += 1
break
else:
break
while j < len(bps):
values.append(CompressedHistogramValue(bps[j], histo.max))
j += 1
return values
def _Remap(x, x0, x1, y0, y1):
"""Linearly map from [x0, x1] unto [y0, y1]."""
return y0 + (x - x0) * float(y1 - y0) / (x1 - x0)
|
|
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import weakref
import semantic_version
import six
from yaql.language import specs
from yaql.language import utils
from murano.dsl import constants
from murano.dsl import dsl_types
from murano.dsl import exceptions
from murano.dsl import helpers
from murano.dsl import meta as dslmeta
from murano.dsl import murano_object
from murano.dsl import murano_type
from murano.dsl import namespace_resolver
from murano.dsl import principal_objects
from murano.dsl import yaql_integration
class MuranoPackage(dsl_types.MuranoPackage, dslmeta.MetaProvider):
def __init__(self, package_loader, name, version=None,
runtime_version=None, requirements=None, meta=None):
super(MuranoPackage, self).__init__()
self._package_loader = weakref.proxy(package_loader)
self._name = name
self._version = helpers.parse_version(version)
self._runtime_version = helpers.parse_version(runtime_version)
self._requirements = {
name: semantic_version.Spec('==' + str(self._version.major))
}
if name != constants.CORE_LIBRARY:
self._requirements[constants.CORE_LIBRARY] = \
semantic_version.Spec('==0')
self._classes = {}
self._imported_types = {object, murano_object.MuranoObject}
for key, value in six.iteritems(requirements or {}):
self._requirements[key] = helpers.parse_version_spec(value)
self._load_queue = {}
self._native_load_queue = {}
if self.name == constants.CORE_LIBRARY:
principal_objects.register(self)
self._package_class = self._create_package_class()
self._meta = lambda: dslmeta.MetaData(
meta, dsl_types.MetaTargets.Package, self._package_class)
@property
def package_loader(self):
return self._package_loader
@property
def name(self):
return self._name
@property
def version(self):
return self._version
@property
def runtime_version(self):
return self._runtime_version
@property
def requirements(self):
return self._requirements
@property
def classes(self):
return set(self._classes.keys()).union(
self._load_queue.keys()).union(self._native_load_queue.keys())
def get_resource(self, name):
raise NotImplementedError('resource API is not implemented')
# noinspection PyMethodMayBeStatic
def get_class_config(self, name):
return {}
def _register_mpl_classes(self, data, name=None):
type_obj = self._classes.get(name)
if type_obj is not None:
return type_obj
if callable(data):
data = data()
data = helpers.list_value(data)
unnamed_class = None
last_ns = {}
for cls_data in data:
last_ns = cls_data.setdefault('Namespaces', last_ns.copy())
if len(cls_data) == 1:
continue
cls_name = cls_data.get('Name')
if not cls_name:
if unnamed_class:
raise exceptions.AmbiguousClassName(name)
unnamed_class = cls_data
else:
ns_resolver = namespace_resolver.NamespaceResolver(last_ns)
cls_name = ns_resolver.resolve_name(cls_name)
if cls_name == name:
type_obj = murano_type.create(
cls_data, self, cls_name, ns_resolver)
self._classes[name] = type_obj
else:
self._load_queue.setdefault(cls_name, cls_data)
if type_obj is None and unnamed_class:
unnamed_class['Name'] = name
return self._register_mpl_classes(unnamed_class, name)
return type_obj
def _register_native_class(self, cls, name):
if cls in self._imported_types:
return self._classes[name]
try:
m_class = self.find_class(name, False)
except exceptions.NoClassFound:
m_class = self._register_mpl_classes({'Name': name}, name)
m_class.extension_class = cls
for method_name in dir(cls):
if method_name.startswith('_'):
continue
method = getattr(cls, method_name)
if not any((
helpers.inspect_is_method(cls, method_name),
helpers.inspect_is_static(cls, method_name),
helpers.inspect_is_classmethod(cls, method_name))):
continue
method_name_alias = (getattr(
method, '__murano_name', None) or
specs.convert_function_name(
method_name, yaql_integration.CONVENTION))
m_class.add_method(method_name_alias, method, method_name)
self._imported_types.add(cls)
return m_class
def register_class(self, cls, name=None):
if inspect.isclass(cls):
name = name or getattr(cls, '__murano_name', None) or cls.__name__
if name in self._classes:
self._register_native_class(cls, name)
else:
self._native_load_queue.setdefault(name, cls)
elif isinstance(cls, dsl_types.MuranoType):
self._classes[cls.name] = cls
elif name not in self._classes:
self._load_queue[name] = cls
def find_class(self, name, search_requirements=True):
payload = self._native_load_queue.pop(name, None)
if payload is not None:
return self._register_native_class(payload, name)
payload = self._load_queue.pop(name, None)
if payload is not None:
result = self._register_mpl_classes(payload, name)
if result:
return result
result = self._classes.get(name)
if result:
return result
if search_requirements:
pkgs_for_search = []
for package_name, version_spec in six.iteritems(
self._requirements):
if package_name == self.name:
continue
referenced_package = self._package_loader.load_package(
package_name, version_spec)
try:
return referenced_package.find_class(name, False)
except exceptions.NoClassFound:
pkgs_for_search.append(referenced_package)
continue
raise exceptions.NoClassFound(
name, packages=pkgs_for_search + [self])
raise exceptions.NoClassFound(name, packages=[self])
@property
def context(self):
return None
def _create_package_class(self):
ns_resolver = namespace_resolver.NamespaceResolver(None)
return murano_type.MuranoClass(
ns_resolver, self.name, self, utils.NO_VALUE)
def get_meta(self, context):
if six.callable(self._meta):
executor = helpers.get_executor(context)
context = executor.create_package_context(self)
self._meta = self._meta().get_meta(context)
return self._meta
def __repr__(self):
return 'MuranoPackage({name})'.format(name=self.name)
|
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Javelin makes resources that should survive an upgrade.
Javelin is a tool for creating, verifying, and deleting a small set of
resources in a declarative way.
"""
import argparse
import collections
import datetime
import os
import sys
import unittest
import yaml
import tempest.auth
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.openstack.common import timeutils
from tempest.services.compute.json import flavors_client
from tempest.services.compute.json import servers_client
from tempest.services.identity.json import identity_client
from tempest.services.image.v2.json import image_client
from tempest.services.object_storage import container_client
from tempest.services.object_storage import object_client
from tempest.services.telemetry.json import telemetry_client
from tempest.services.volume.json import volumes_client
OPTS = {}
USERS = {}
RES = collections.defaultdict(list)
LOG = None
JAVELIN_START = datetime.datetime.utcnow()
class OSClient(object):
_creds = None
identity = None
servers = None
def __init__(self, user, pw, tenant):
_creds = tempest.auth.KeystoneV2Credentials(
username=user,
password=pw,
tenant_name=tenant)
_auth = tempest.auth.KeystoneV2AuthProvider(_creds)
self.identity = identity_client.IdentityClientJSON(_auth)
self.servers = servers_client.ServersClientJSON(_auth)
self.objects = object_client.ObjectClient(_auth)
self.containers = container_client.ContainerClient(_auth)
self.images = image_client.ImageClientV2JSON(_auth)
self.flavors = flavors_client.FlavorsClientJSON(_auth)
self.telemetry = telemetry_client.TelemetryClientJSON(_auth)
self.volumes = volumes_client.VolumesClientJSON(_auth)
def load_resources(fname):
"""Load the expected resources from a yaml flie."""
return yaml.load(open(fname, 'r'))
def keystone_admin():
return OSClient(OPTS.os_username, OPTS.os_password, OPTS.os_tenant_name)
def client_for_user(name):
LOG.debug("Entering client_for_user")
if name in USERS:
user = USERS[name]
LOG.debug("Created client for user %s" % user)
return OSClient(user['name'], user['pass'], user['tenant'])
else:
LOG.error("%s not found in USERS: %s" % (name, USERS))
###################
#
# TENANTS
#
###################
def create_tenants(tenants):
"""Create tenants from resource definition.
Don't create the tenants if they already exist.
"""
admin = keystone_admin()
_, body = admin.identity.list_tenants()
existing = [x['name'] for x in body]
for tenant in tenants:
if tenant not in existing:
admin.identity.create_tenant(tenant)
else:
LOG.warn("Tenant '%s' already exists in this environment" % tenant)
def destroy_tenants(tenants):
admin = keystone_admin()
for tenant in tenants:
tenant_id = admin.identity.get_tenant_by_name(tenant)['id']
r, body = admin.identity.delete_tenant(tenant_id)
##############
#
# USERS
#
##############
def _users_for_tenant(users, tenant):
u_for_t = []
for user in users:
for n in user:
if user[n]['tenant'] == tenant:
u_for_t.append(user[n])
return u_for_t
def _tenants_from_users(users):
tenants = set()
for user in users:
for n in user:
tenants.add(user[n]['tenant'])
return tenants
def _assign_swift_role(user):
admin = keystone_admin()
resp, roles = admin.identity.list_roles()
role = next(r for r in roles if r['name'] == 'Member')
LOG.debug(USERS[user])
try:
admin.identity.assign_user_role(
USERS[user]['tenant_id'],
USERS[user]['id'],
role['id'])
except exceptions.Conflict:
# don't care if it's already assigned
pass
def create_users(users):
"""Create tenants from resource definition.
Don't create the tenants if they already exist.
"""
global USERS
LOG.info("Creating users")
admin = keystone_admin()
for u in users:
try:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
except exceptions.NotFound:
LOG.error("Tenant: %s - not found" % u['tenant'])
continue
try:
admin.identity.get_user_by_username(tenant['id'], u['name'])
LOG.warn("User '%s' already exists in this environment"
% u['name'])
except exceptions.NotFound:
admin.identity.create_user(
u['name'], u['pass'], tenant['id'],
"%s@%s" % (u['name'], tenant['id']),
enabled=True)
def destroy_users(users):
admin = keystone_admin()
for user in users:
tenant_id = admin.identity.get_tenant_by_name(user['tenant'])['id']
user_id = admin.identity.get_user_by_username(tenant_id,
user['name'])['id']
r, body = admin.identity.delete_user(user_id)
def collect_users(users):
global USERS
LOG.info("Collecting users")
admin = keystone_admin()
for u in users:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
u['tenant_id'] = tenant['id']
USERS[u['name']] = u
body = admin.identity.get_user_by_username(tenant['id'], u['name'])
USERS[u['name']]['id'] = body['id']
class JavelinCheck(unittest.TestCase):
def __init__(self, users, resources):
super(JavelinCheck, self).__init__()
self.users = users
self.res = resources
def runTest(self, *args):
pass
def check(self):
self.check_users()
self.check_objects()
self.check_servers()
self.check_volumes()
self.check_telemetry()
def check_users(self):
"""Check that the users we expect to exist, do.
We don't use the resource list for this because we need to validate
that things like tenantId didn't drift across versions.
"""
LOG.info("checking users")
for name, user in self.users.iteritems():
client = keystone_admin()
_, found = client.identity.get_user(user['id'])
self.assertEqual(found['name'], user['name'])
self.assertEqual(found['tenantId'], user['tenant_id'])
# also ensure we can auth with that user, and do something
# on the cloud. We don't care about the results except that it
# remains authorized.
client = client_for_user(user['name'])
resp, body = client.servers.list_servers()
self.assertEqual(resp['status'], '200')
def check_objects(self):
"""Check that the objects created are still there."""
if not self.res.get('objects'):
return
LOG.info("checking objects")
for obj in self.res['objects']:
client = client_for_user(obj['owner'])
r, contents = client.objects.get_object(
obj['container'], obj['name'])
source = _file_contents(obj['file'])
self.assertEqual(contents, source)
def check_servers(self):
"""Check that the servers are still up and running."""
if not self.res.get('servers'):
return
LOG.info("checking servers")
for server in self.res['servers']:
client = client_for_user(server['owner'])
found = _get_server_by_name(client, server['name'])
self.assertIsNotNone(
found,
"Couldn't find expected server %s" % server['name'])
r, found = client.servers.get_server(found['id'])
# get the ipv4 address
addr = found['addresses']['private'][0]['addr']
for count in range(60):
return_code = os.system("ping -c1 " + addr)
if return_code is 0:
break
self.assertNotEqual(count, 59,
"Server %s is not pingable at %s" % (
server['name'], addr))
def check_telemetry(self):
"""Check that ceilometer provides a sane sample.
Confirm that there are more than one sample and that they have the
expected metadata.
If in check mode confirm that the oldest sample available is from
before the upgrade.
"""
if not self.res.get('telemetry'):
return
LOG.info("checking telemetry")
for server in self.res['servers']:
client = client_for_user(server['owner'])
response, body = client.telemetry.list_samples(
'instance',
query=('metadata.display_name', 'eq', server['name'])
)
self.assertEqual(response.status, 200)
self.assertTrue(len(body) >= 1, 'expecting at least one sample')
self._confirm_telemetry_sample(server, body[-1])
def check_volumes(self):
"""Check that the volumes are still there and attached."""
if not self.res.get('volumes'):
return
LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
vol_body = _get_volume_by_name(client, volume['name'])
self.assertIsNotNone(
vol_body,
"Couldn't find expected volume %s" % volume['name'])
# Verify that a volume's attachment retrieved
server_id = _get_server_by_name(client, volume['server'])['id']
attachment = client.volumes.get_attachment_from_volume(vol_body)
self.assertEqual(vol_body['id'], attachment['volume_id'])
self.assertEqual(server_id, attachment['server_id'])
def _confirm_telemetry_sample(self, server, sample):
"""Check this sample matches the expected resource metadata."""
# Confirm display_name
self.assertEqual(server['name'],
sample['resource_metadata']['display_name'])
# Confirm instance_type of flavor
flavor = sample['resource_metadata'].get(
'flavor.name',
sample['resource_metadata'].get('instance_type')
)
self.assertEqual(server['flavor'], flavor)
# Confirm the oldest sample was created before upgrade.
if OPTS.mode == 'check':
oldest_timestamp = timeutils.normalize_time(
timeutils.parse_isotime(sample['timestamp']))
self.assertTrue(
oldest_timestamp < JAVELIN_START,
'timestamp should come before start of second javelin run'
)
#######################
#
# OBJECTS
#
#######################
def _file_contents(fname):
with open(fname, 'r') as f:
return f.read()
def create_objects(objects):
if not objects:
return
LOG.info("Creating objects")
for obj in objects:
LOG.debug("Object %s" % obj)
_assign_swift_role(obj['owner'])
client = client_for_user(obj['owner'])
client.containers.create_container(obj['container'])
client.objects.create_object(
obj['container'], obj['name'],
_file_contents(obj['file']))
def destroy_objects(objects):
for obj in objects:
client = client_for_user(obj['owner'])
r, body = client.objects.delete_object(obj['container'], obj['name'])
if not (200 <= int(r['status']) < 299):
raise ValueError("unable to destroy object: [%s] %s" % (r, body))
#######################
#
# IMAGES
#
#######################
def _resolve_image(image, imgtype):
name = image[imgtype]
fname = os.path.join(OPTS.devstack_base, image['imgdir'], name)
return name, fname
def _get_image_by_name(client, name):
r, body = client.images.image_list()
for image in body:
if name == image['name']:
return image
return None
def create_images(images):
if not images:
return
LOG.info("Creating images")
for image in images:
client = client_for_user(image['owner'])
# only upload a new image if the name isn't there
if _get_image_by_name(client, image['name']):
LOG.info("Image '%s' already exists" % image['name'])
continue
# special handling for 3 part image
extras = {}
if image['format'] == 'ami':
name, fname = _resolve_image(image, 'aki')
r, aki = client.images.create_image(
'javelin_' + name, 'aki', 'aki')
client.images.store_image(aki.get('id'), open(fname, 'r'))
extras['kernel_id'] = aki.get('id')
name, fname = _resolve_image(image, 'ari')
r, ari = client.images.create_image(
'javelin_' + name, 'ari', 'ari')
client.images.store_image(ari.get('id'), open(fname, 'r'))
extras['ramdisk_id'] = ari.get('id')
_, fname = _resolve_image(image, 'file')
r, body = client.images.create_image(
image['name'], image['format'], image['format'], **extras)
image_id = body.get('id')
client.images.store_image(image_id, open(fname, 'r'))
def destroy_images(images):
if not images:
return
LOG.info("Destroying images")
for image in images:
client = client_for_user(image['owner'])
response = _get_image_by_name(client, image['name'])
if not response:
LOG.info("Image '%s' does not exists" % image['name'])
continue
client.images.delete_image(response['id'])
#######################
#
# SERVERS
#
#######################
def _get_server_by_name(client, name):
r, body = client.servers.list_servers()
for server in body['servers']:
if name == server['name']:
return server
return None
def _get_flavor_by_name(client, name):
r, body = client.flavors.list_flavors()
for flavor in body:
if name == flavor['name']:
return flavor
return None
def create_servers(servers):
if not servers:
return
LOG.info("Creating servers")
for server in servers:
client = client_for_user(server['owner'])
if _get_server_by_name(client, server['name']):
LOG.info("Server '%s' already exists" % server['name'])
continue
image_id = _get_image_by_name(client, server['image'])['id']
flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
resp, body = client.servers.create_server(server['name'], image_id,
flavor_id)
server_id = body['id']
client.servers.wait_for_server_status(server_id, 'ACTIVE')
def destroy_servers(servers):
if not servers:
return
LOG.info("Destroying servers")
for server in servers:
client = client_for_user(server['owner'])
response = _get_server_by_name(client, server['name'])
if not response:
LOG.info("Server '%s' does not exist" % server['name'])
continue
client.servers.delete_server(response['id'])
client.servers.wait_for_server_termination(response['id'],
ignore_error=True)
#######################
#
# VOLUMES
#
#######################
def _get_volume_by_name(client, name):
r, body = client.volumes.list_volumes()
for volume in body:
if name == volume['display_name']:
return volume
return None
def create_volumes(volumes):
if not volumes:
return
LOG.info("Creating volumes")
for volume in volumes:
client = client_for_user(volume['owner'])
# only create a volume if the name isn't here
if _get_volume_by_name(client, volume['name']):
LOG.info("volume '%s' already exists" % volume['name'])
continue
size = volume['gb']
v_name = volume['name']
resp, body = client.volumes.create_volume(size=size,
display_name=v_name)
client.volumes.wait_for_volume_status(body['id'], 'available')
def destroy_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
volume_id = _get_volume_by_name(client, volume['name'])['id']
client.volumes.detach_volume(volume_id)
client.volumes.delete_volume(volume_id)
def attach_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
server_id = _get_server_by_name(client, volume['server'])['id']
volume_id = _get_volume_by_name(client, volume['name'])['id']
device = volume['device']
client.volumes.attach_volume(volume_id, server_id, device)
#######################
#
# MAIN LOGIC
#
#######################
def create_resources():
LOG.info("Creating Resources")
# first create keystone level resources, and we need to be admin
# for those.
create_tenants(RES['tenants'])
create_users(RES['users'])
collect_users(RES['users'])
# next create resources in a well known order
create_objects(RES['objects'])
create_images(RES['images'])
create_servers(RES['servers'])
create_volumes(RES['volumes'])
attach_volumes(RES['volumes'])
def destroy_resources():
LOG.info("Destroying Resources")
# Destroy in inverse order of create
destroy_servers(RES['servers'])
destroy_images(RES['images'])
destroy_objects(RES['objects'])
destroy_volumes(RES['volumes'])
destroy_users(RES['users'])
destroy_tenants(RES['tenants'])
LOG.warn("Destroy mode incomplete")
def get_options():
global OPTS
parser = argparse.ArgumentParser(
description='Create and validate a fixed set of OpenStack resources')
parser.add_argument('-m', '--mode',
metavar='<create|check|destroy>',
required=True,
help=('One of (create, check, destroy)'))
parser.add_argument('-r', '--resources',
required=True,
metavar='resourcefile.yaml',
help='Resources definition yaml file')
parser.add_argument(
'-d', '--devstack-base',
required=True,
metavar='/opt/stack/old',
help='Devstack base directory for retrieving artifacts')
parser.add_argument(
'-c', '--config-file',
metavar='/etc/tempest.conf',
help='path to javelin2(tempest) config file')
# auth bits, letting us also just source the devstack openrc
parser.add_argument('--os-username',
metavar='<auth-user-name>',
default=os.environ.get('OS_USERNAME'),
help=('Defaults to env[OS_USERNAME].'))
parser.add_argument('--os-password',
metavar='<auth-password>',
default=os.environ.get('OS_PASSWORD'),
help=('Defaults to env[OS_PASSWORD].'))
parser.add_argument('--os-tenant-name',
metavar='<auth-tenant-name>',
default=os.environ.get('OS_TENANT_NAME'),
help=('Defaults to env[OS_TENANT_NAME].'))
OPTS = parser.parse_args()
if OPTS.mode not in ('create', 'check', 'destroy'):
print("ERROR: Unknown mode -m %s\n" % OPTS.mode)
parser.print_help()
sys.exit(1)
if OPTS.config_file:
config.CONF.set_config_path(OPTS.config_file)
def setup_logging():
global LOG
logging.setup(__name__)
LOG = logging.getLogger(__name__)
def main():
global RES
get_options()
setup_logging()
RES.update(load_resources(OPTS.resources))
if OPTS.mode == 'create':
create_resources()
# Make sure the resources we just created actually work
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'check':
collect_users(RES['users'])
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'destroy':
collect_users(RES['users'])
destroy_resources()
else:
LOG.error('Unknown mode %s' % OPTS.mode)
return 1
LOG.info('javelin2 successfully finished')
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
#!/usr/bin/env python
"""Email motion-detected JPEG image.
This module captures and emails a motion-detected JPEG image from a Raspberry
Pi with a camera module. The image capture part is credited to the excellent
post https://www.raspberrypi.org/forums/viewtopic.php?t=45235, by brainflakes.
Read brainflakes' original post for the algorithm. I have removed the force
capture part for this script.
The design of this software is very similar to that of
https://github.com/syncom/twitimg-rpi. The only difference is that this tool
uses email instead of twitter as the transport layer, which is useful in
countries where twitter access is blocked.
"""
import StringIO
import subprocess
import os
import sys
import time
import argparse
from datetime import datetime
from PIL import Image
import smtplib
import shlex
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
# Tested for @outlook.com and @sina.com emails
smtp_server = ''
smtp_port = ''
username = ''
password = ''
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '.config')
def send_email_with_image(img_filepath, subject):
[smtp_server, smtp_port, username, password] = get_config_info()
from_address = username
to_address = username
image_data = open(img_filepath, 'rb').read()
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = from_address
msg['To'] = to_address
text = MIMEText(subject)
msg.attach(text)
image = MIMEImage(image_data, name=os.path.basename(img_filepath))
msg.attach(image)
s = smtplib.SMTP(smtp_server, smtp_port, timeout=45)
s.ehlo()
s.starttls()
s.ehlo()
s.login(username, password)
s.sendmail(from_address, to_address, msg.as_string())
s.quit()
def get_config_info():
''' Obtain SMTP server info from file .config
Returns list
'''
f = open(config_file, 'rb')
c = f.read()
t = c.splitlines()
return t[0:4]
def get_mtime_str(file):
''' Obtain file modification time string.
Return str
'''
try:
mtime = os.path.getmtime(file)
except OSError:
return ''
is_dst = time.daylight and time.localtime().tm_isdst > 0
# UTC offset in seconds
offset = - (time.altzone if is_dst else time.timezone)
time_str = time.ctime(mtime) + ' UTC ' + str(offset/3600)
return time_str
def do_email(img_filepath):
'''Email, image modification time as subject and image as attachment
'''
subject = get_mtime_str(img_filepath)
if not subject:
print "Something has gone wrong. No email was sent."
else:
send_email_with_image(img_filepath, subject)
print "Emailed image taken at " + subject
# Motion detection and imaging part
#
# Motion detection settings:
# - threshold: how much a pixel has to change by to be marked as "changed"
# - sensitivity: how many changed pixels before capturing an image
threshold = 10
sensitivity = 729
test_width = 100
test_height = 75
# File settings
save_width = 1280
save_height = 960
reserve_diskspace = 40 * 1024 * 1024 # Keep 40 mb free on disk
# Capture a small bitmap test image, for motion detection
def captureTestImage():
command = "raspistill -n -w %s -h %s -t 1000 -e bmp -o -" % (test_width, test_height)
output = None
image_data = StringIO.StringIO()
try:
output = subprocess.check_output(shlex.split(command), shell=False)
except subprocess.CalledProcessError:
print "Command exited with non-zero code. No output."
return None, None
if output:
image_data.write(output)
image_data.seek(0)
im = Image.open(image_data)
buffer = im.load()
image_data.close()
return im, buffer
# Save a full size image to disk
def saveImage(width, height, dirname, diskSpaceToReserve):
keepDiskSpaceFree(dirname, diskSpaceToReserve)
time = datetime.now()
filename = "motion-%04d%02d%02d-%02d%02d%02d.jpg" % (time.year, time.month, time.day, time.hour, time.minute, time.second)
command = "raspistill -n -w %s -h %s -t 10 -e jpg -q 15 -o %s/%s" % (width, height, dirname.rstrip('/'), filename)
try:
subprocess.call(shlex.split(command), shell=False)
except subprocess.CalledProcessError:
print "Command exited with non-zero code. No file captured."
return None
print "Captured %s/%s" % (dirname.rstrip('/'), filename)
return dirname.rstrip('/') + '/' + filename
# Keep free space above given level
def keepDiskSpaceFree(dirname, bytesToReserve):
if (getFreeSpace(dirname) < bytesToReserve):
for filename in sorted(os.listdir(dirname)):
if filename.startswith("motion") and filename.endswith(".jpg"):
os.remove(dirname.rstrip('/') +"/" + filename)
print "Deleted %s/%s to avoid filling disk" % ( dirname.rstrip('/'), filename )
if (getFreeSpace(dirname) > bytesToReserve):
return
return
# Get available disk space
def getFreeSpace(dir):
st = os.statvfs(dir)
du = st.f_bavail * st.f_frsize
return du
# Where work happens
def do_email_motion(dirname):
# Get first image
captured1 = False
while (not captured1):
image1, buffer1 = captureTestImage()
if image1:
captured1 = True
while (True):
# Time-granule for wait in the case of error/exception
basic_wait = 300
# Double multiplicity when error/exception happens
mult = 1
# Get comparison image
captured2 = False
while (not captured2):
image2, buffer2 = captureTestImage()
if image2:
captured2 = True
# Count changed pixels
changedPixels = 0
for x in xrange(0, test_width):
for y in xrange(0, test_height):
# Just check green channel as it's the highest quality channel
pixdiff = abs(buffer1[x,y][1] - buffer2[x,y][1])
if pixdiff > threshold:
changedPixels += 1
# Save an image if pixels changed
if changedPixels > sensitivity:
fpath = saveImage(save_width, save_height, dirname, reserve_diskspace)
# Tweet saved image
if fpath:
try:
do_email(fpath)
# reset multiplicity
mult = 1
except Exception, e:
print("Email might not have been sent. Encountered exception, as follows: ")
print(e)
sleep = mult * basic_wait
time.sleep(sleep) # Wait some time
print("Retry after {0} seconds".format(sleep))
mult = mult * 2
# Swap comparison buffers
image1 = image2
buffer1 = buffer2
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("dir_path")
args = parser.parse_args()
do_email_motion(args.dir_path)
|
|
import math
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestSoftmaxCrossEntropy(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
self.t = numpy.random.randint(0, 3, (4,)).astype(numpy.int32)
def check_forward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(x, t, use_cudnn)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
y = numpy.exp(self.x)
loss_expect = 0.0
count = 0
for i in six.moves.range(y.shape[0]):
if self.t[i] == -1:
continue
loss_expect -= math.log(y[i, self.t[i]] / y[i].sum())
count += 1
if count == 0:
loss_expect = 0.0
else:
loss_expect /= count
self.assertAlmostEqual(loss_expect, loss_value, places=5)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), False)
def check_backward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(x, t, use_cudnn)
loss.backward()
self.assertEqual(None, t.grad)
func = loss.creator
f = lambda: func.forward((x.data, t.data))
gx, = gradient_check.numerical_grad(f, (x.data,), (1,), eps=0.02)
gradient_check.assert_allclose(gx, x.grad, atol=1e-4)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.t)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), False)
class TestReplicatedSoftmaxCrossEntropy1(TestSoftmaxCrossEntropy):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)
self.t = numpy.random.randint(0, 3, (4, 2)).astype(numpy.int32)
def check_forward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(
x, t, use_cudnn, normalize=True)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
y = numpy.exp(self.x)
loss_expect = 0.0
count = 0
for i in six.moves.range(y.shape[0]):
for k in six.moves.range(y.shape[2]):
if self.t[i, k] == -1:
continue
loss_expect -= math.log(
y[i, self.t[i, k], k] / y[i, :, k].sum())
count += 1
if count == 0:
loss_expect = 0.0
else:
loss_expect /= count
self.assertAlmostEqual(loss_expect, loss_value, places=4)
class TestReplicatedSoftmaxCrossEntropy2(TestSoftmaxCrossEntropy):
def setUp(self):
self.x = numpy.random.uniform(
-1, 1, (4, 3, 2, 5)).astype(numpy.float32)
self.t = numpy.random.randint(0, 3, (4, 2, 5)).astype(numpy.int32)
def check_forward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(
x, t, use_cudnn, normalize=False)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
y = numpy.exp(self.x)
loss_expect = 0.0
for i in six.moves.range(y.shape[0]):
for k in six.moves.range(y.shape[2]):
for l in six.moves.range(y.shape[3]):
if self.t[i, k, l] == -1:
continue
loss_expect -= math.log(
y[i, self.t[i, k, l], k, l] / y[i, :, k, l].sum())
loss_expect /= y.shape[0]
self.assertAlmostEqual(loss_expect, loss_value, places=4)
class TestSoftmaxCrossEntropyWithIgnoreLabel(TestSoftmaxCrossEntropy):
def setUp(self):
super(TestSoftmaxCrossEntropyWithIgnoreLabel, self).setUp()
self.t[2] = -1
class TestSoftmaxCrossEntropyIgnoreAll(TestSoftmaxCrossEntropy):
def setUp(self):
super(TestSoftmaxCrossEntropyIgnoreAll, self).setUp()
self.t[:] = -1
class TestReplicatedSoftmaxCrossEntropy1IgnoreLabel(
TestReplicatedSoftmaxCrossEntropy1):
def setUp(self):
super(TestReplicatedSoftmaxCrossEntropy1IgnoreLabel, self).setUp()
self.t[0, 1] = -1
class TestReplicatedSoftmaxCrossEntropy2IgnoreLabel(
TestReplicatedSoftmaxCrossEntropy2):
def setUp(self):
super(TestReplicatedSoftmaxCrossEntropy2IgnoreLabel, self).setUp()
self.t[0, 1, 2] = -1
class TestReplicatedSoftmaxCrossEntropy1IgnoreAll(
TestReplicatedSoftmaxCrossEntropy1):
def setUp(self):
super(TestReplicatedSoftmaxCrossEntropy1IgnoreAll, self).setUp()
self.t[:] = -1
class TestReplicatedSoftmaxCrossEntropy2IgnoreAll(
TestReplicatedSoftmaxCrossEntropy2):
def setUp(self):
super(TestReplicatedSoftmaxCrossEntropy2IgnoreAll, self).setUp()
self.t[:] = -1
testing.run_module(__name__, __file__)
|
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for firebase_admin.db module."""
import collections
import json
import os
import pytest
import firebase_admin
from firebase_admin import db
from firebase_admin import exceptions
from integration import conftest
from tests import testutils
def integration_conf(request):
host_override = os.environ.get('FIREBASE_DATABASE_EMULATOR_HOST')
if host_override:
return None, 'fake-project-id'
return conftest.integration_conf(request)
@pytest.fixture(scope='module')
def app(request):
cred, project_id = integration_conf(request)
ops = {
'databaseURL' : 'https://{0}.firebaseio.com'.format(project_id),
}
return firebase_admin.initialize_app(cred, ops, name='integration-db')
@pytest.fixture(scope='module', autouse=True)
def default_app():
# Overwrites the default_app fixture in conftest.py.
# This test suite should not use the default app. Use the app fixture instead.
pass
@pytest.fixture(scope='module')
def update_rules(app):
with open(testutils.resource_filename('dinosaurs_index.json')) as rules_file:
new_rules = json.load(rules_file)
client = db.reference('', app)._client
rules = client.body('get', '/.settings/rules.json', params='format=strict')
existing = rules.get('rules')
if existing != new_rules:
rules['rules'] = new_rules
client.request('put', '/.settings/rules.json', json=rules)
@pytest.fixture(scope='module')
def testdata():
with open(testutils.resource_filename('dinosaurs.json')) as dino_file:
return json.load(dino_file)
@pytest.fixture(scope='module')
def testref(update_rules, testdata, app):
"""Adds the necessary DB indices, and sets the initial values.
This fixture is attached to the module scope, and therefore is guaranteed to run only once
during the execution of this test module.
Returns:
Reference: A reference to the test dinosaur database.
"""
del update_rules
ref = db.reference('_adminsdk/python/dinodb', app)
ref.set(testdata)
return ref
class TestReferenceAttributes:
"""Test cases for attributes exposed by db.Reference class."""
def test_ref_attributes(self, testref):
assert testref.key == 'dinodb'
assert testref.path == '/_adminsdk/python/dinodb'
def test_child(self, testref):
child = testref.child('dinosaurs')
assert child.key == 'dinosaurs'
assert child.path == '/_adminsdk/python/dinodb/dinosaurs'
def test_parent(self, testref):
parent = testref.parent
assert parent.key == 'python'
assert parent.path == '/_adminsdk/python'
class TestReadOperations:
"""Test cases for reading node values."""
def test_get_value(self, testref, testdata):
value = testref.get()
assert isinstance(value, dict)
assert testdata == value
def test_get_value_and_etag(self, testref, testdata):
value, etag = testref.get(etag=True)
assert isinstance(value, dict)
assert testdata == value
assert isinstance(etag, str)
def test_get_shallow(self, testref):
value = testref.get(shallow=True)
assert isinstance(value, dict)
assert value == {'dinosaurs': True, 'scores': True}
def test_get_if_changed(self, testref, testdata):
success, data, etag = testref.get_if_changed('wrong_etag')
assert success is True
assert data == testdata
assert isinstance(etag, str)
assert testref.get_if_changed(etag) == (False, None, None)
def test_get_child_value(self, testref, testdata):
child = testref.child('dinosaurs')
assert child is not None
value = child.get()
assert isinstance(value, dict)
assert testdata['dinosaurs'] == value
def test_get_grandchild_value(self, testref, testdata):
value = testref.child('dinosaurs').child('lambeosaurus').get()
assert isinstance(value, dict)
assert testdata['dinosaurs']['lambeosaurus'] == value
def test_get_nonexisting_child_value(self, testref):
assert testref.child('none_existing').get() is None
class TestWriteOperations:
"""Test cases for creating and updating node values."""
def test_push(self, testref):
python = testref.parent
ref = python.child('users').push()
assert ref.path == '/_adminsdk/python/users/' + ref.key
assert ref.get() == ''
def test_push_with_value(self, testref):
python = testref.parent
value = {'name' : 'Luis Alvarez', 'since' : 1911}
ref = python.child('users').push(value)
assert ref.path == '/_adminsdk/python/users/' + ref.key
assert ref.get() == value
def test_set_primitive_value(self, testref):
python = testref.parent
ref = python.child('users').push()
ref.set('value')
assert ref.get() == 'value'
def test_set_complex_value(self, testref):
python = testref.parent
value = {'name' : 'Mary Anning', 'since' : 1799}
ref = python.child('users').push()
ref.set(value)
assert ref.get() == value
def test_update_children(self, testref):
python = testref.parent
value = {'name' : 'Robert Bakker', 'since' : 1945}
ref = python.child('users').push()
ref.update(value)
assert ref.get() == value
def test_update_children_with_existing_values(self, testref):
python = testref.parent
value = {'name' : 'Edwin Colbert', 'since' : 1900, 'temp': True}
ref = python.child('users').push(value)
ref.update({'since' : 1905})
value['since'] = 1905
assert ref.get() == value
ref.update({'temp': None})
del value['temp']
assert ref.get() == value
def test_update_nested_children(self, testref):
python = testref.parent
edward = python.child('users').push({'name' : 'Edward Cope', 'since' : 1800})
jack = python.child('users').push({'name' : 'Jack Horner', 'since' : 1940})
delta = {
'{0}/since'.format(edward.key) : 1840,
'{0}/since'.format(jack.key) : 1946
}
python.child('users').update(delta)
assert edward.get() == {'name' : 'Edward Cope', 'since' : 1840}
assert jack.get() == {'name' : 'Jack Horner', 'since' : 1946}
def test_set_if_unchanged(self, testref):
python = testref.parent
push_data = {'name' : 'Edward Cope', 'since' : 1800}
edward = python.child('users').push(push_data)
update_data = {'name' : 'Jack Horner', 'since' : 1940}
success, data, etag = edward.set_if_unchanged('invalid-etag', update_data)
assert success is False
assert data == push_data
assert isinstance(etag, str)
success, data, new_etag = edward.set_if_unchanged(etag, update_data)
assert success is True
assert data == update_data
assert new_etag != etag
def test_transaction(self, testref):
python = testref.parent
def transaction_update(snapshot):
snapshot['name'] += ' Owen'
snapshot['since'] = 1804
return snapshot
ref = python.child('users').push({'name' : 'Richard'})
new_value = ref.transaction(transaction_update)
expected = {'name': 'Richard Owen', 'since': 1804}
assert new_value == expected
assert ref.get() == expected
def test_transaction_scalar(self, testref):
python = testref.parent
ref = python.child('users/count')
ref.set(42)
new_value = ref.transaction(lambda x: x + 1 if x else 1)
expected = 43
assert new_value == expected
assert ref.get() == expected
def test_delete(self, testref):
python = testref.parent
ref = python.child('users').push('foo')
assert ref.get() == 'foo'
ref.delete()
assert ref.get() is None
class TestAdvancedQueries:
"""Test cases for advanced interactions via the db.Query interface."""
height_sorted = [
'linhenykus', 'pterodactyl', 'lambeosaurus',
'triceratops', 'stegosaurus', 'bruhathkayosaurus',
]
def test_order_by_key(self, testref):
value = testref.child('dinosaurs').order_by_key().get()
assert isinstance(value, collections.OrderedDict)
assert list(value.keys()) == [
'bruhathkayosaurus', 'lambeosaurus', 'linhenykus',
'pterodactyl', 'stegosaurus', 'triceratops'
]
def test_order_by_value(self, testref):
value = testref.child('scores').order_by_value().get()
assert list(value.keys()) == [
'stegosaurus', 'lambeosaurus', 'triceratops',
'bruhathkayosaurus', 'linhenykus', 'pterodactyl',
]
def test_order_by_child(self, testref):
value = testref.child('dinosaurs').order_by_child('height').get()
assert list(value.keys()) == self.height_sorted
def test_limit_first(self, testref):
value = testref.child('dinosaurs').order_by_child('height').limit_to_first(2).get()
assert list(value.keys()) == self.height_sorted[:2]
def test_limit_first_all(self, testref):
value = testref.child('dinosaurs').order_by_child('height').limit_to_first(10).get()
assert list(value.keys()) == self.height_sorted
def test_limit_last(self, testref):
value = testref.child('dinosaurs').order_by_child('height').limit_to_last(2).get()
assert list(value.keys()) == self.height_sorted[-2:]
def test_limit_last_all(self, testref):
value = testref.child('dinosaurs').order_by_child('height').limit_to_last(10).get()
assert list(value.keys()) == self.height_sorted
def test_start_at(self, testref):
value = testref.child('dinosaurs').order_by_child('height').start_at(3.5).get()
assert list(value.keys()) == self.height_sorted[-2:]
def test_end_at(self, testref):
value = testref.child('dinosaurs').order_by_child('height').end_at(3.5).get()
assert list(value.keys()) == self.height_sorted[:4]
def test_start_and_end_at(self, testref):
value = testref.child('dinosaurs').order_by_child('height') \
.start_at(2.5).end_at(5).get()
assert list(value.keys()) == self.height_sorted[-3:-1]
def test_equal_to(self, testref):
value = testref.child('dinosaurs').order_by_child('height').equal_to(0.6).get()
assert list(value.keys()) == self.height_sorted[:2]
def test_order_by_nested_child(self, testref):
value = testref.child('dinosaurs').order_by_child('ratings/pos').start_at(4).get()
assert len(value) == 3
assert 'pterodactyl' in value
assert 'stegosaurus' in value
assert 'triceratops' in value
def test_filter_by_key(self, testref):
value = testref.child('dinosaurs').order_by_key().limit_to_first(2).get()
assert len(value) == 2
assert 'bruhathkayosaurus' in value
assert 'lambeosaurus' in value
def test_filter_by_value(self, testref):
value = testref.child('scores').order_by_value().limit_to_last(2).get()
assert len(value) == 2
assert 'pterodactyl' in value
assert 'linhenykus' in value
@pytest.fixture(scope='module')
def override_app(request, update_rules):
del update_rules
cred, project_id = integration_conf(request)
ops = {
'databaseURL' : 'https://{0}.firebaseio.com'.format(project_id),
'databaseAuthVariableOverride' : {'uid' : 'user1'}
}
app = firebase_admin.initialize_app(cred, ops, 'db-override')
yield app
firebase_admin.delete_app(app)
@pytest.fixture(scope='module')
def none_override_app(request, update_rules):
del update_rules
cred, project_id = integration_conf(request)
ops = {
'databaseURL' : 'https://{0}.firebaseio.com'.format(project_id),
'databaseAuthVariableOverride' : None
}
app = firebase_admin.initialize_app(cred, ops, 'db-none-override')
yield app
firebase_admin.delete_app(app)
class TestAuthVariableOverride:
"""Test cases for database auth variable overrides."""
def init_ref(self, path, app):
admin_ref = db.reference(path, app)
admin_ref.set('test')
assert admin_ref.get() == 'test'
def test_no_access(self, app, override_app):
path = '_adminsdk/python/admin'
self.init_ref(path, app)
user_ref = db.reference(path, override_app)
with pytest.raises(exceptions.UnauthenticatedError) as excinfo:
assert user_ref.get()
assert str(excinfo.value) == 'Permission denied'
with pytest.raises(exceptions.UnauthenticatedError) as excinfo:
user_ref.set('test2')
assert str(excinfo.value) == 'Permission denied'
def test_read(self, app, override_app):
path = '_adminsdk/python/protected/user2'
self.init_ref(path, app)
user_ref = db.reference(path, override_app)
assert user_ref.get() == 'test'
with pytest.raises(exceptions.UnauthenticatedError) as excinfo:
user_ref.set('test2')
assert str(excinfo.value) == 'Permission denied'
def test_read_write(self, app, override_app):
path = '_adminsdk/python/protected/user1'
self.init_ref(path, app)
user_ref = db.reference(path, override_app)
assert user_ref.get() == 'test'
user_ref.set('test2')
assert user_ref.get() == 'test2'
def test_query(self, override_app):
user_ref = db.reference('_adminsdk/python/protected', override_app)
with pytest.raises(exceptions.UnauthenticatedError) as excinfo:
user_ref.order_by_key().limit_to_first(2).get()
assert str(excinfo.value) == 'Permission denied'
def test_none_auth_override(self, app, none_override_app):
path = '_adminsdk/python/public'
self.init_ref(path, app)
public_ref = db.reference(path, none_override_app)
assert public_ref.get() == 'test'
ref = db.reference('_adminsdk/python', none_override_app)
with pytest.raises(exceptions.UnauthenticatedError) as excinfo:
assert ref.child('protected/user1').get()
assert str(excinfo.value) == 'Permission denied'
with pytest.raises(exceptions.UnauthenticatedError) as excinfo:
assert ref.child('protected/user2').get()
assert str(excinfo.value) == 'Permission denied'
with pytest.raises(exceptions.UnauthenticatedError) as excinfo:
assert ref.child('admin').get()
assert str(excinfo.value) == 'Permission denied'
|
|
from google_time_string import GoogleTimeString
from datetime import datetime, timedelta
from google_analytics_models import GoogleAnalyticsVisitors, GoogleAnalyticsReferralsModel, GoogleAnalyticsUserModel, db, \
GoogleAnalyticsSignups, GoogleAnalyticsReturningVisitors, GoogleAnalyticsExperimentVariation, GoogleAnalyticsExperiment
from google_analytics_client import GoogleAnalyticsAPI
import json
from users.user_model import User
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def mine_visits(username=None):
if not username:
ga_users = GoogleAnalyticsUserModel.query.all()
else:
ga_users = [GoogleAnalyticsUserModel.query.filter_by(username=username).first()]
ga_users[0].active = True
db.session.add(ga_users[0])
db.session.commit()
if ga_users:
for ga_user in ga_users:
#try:
ga = Google_Analytics_User_Querier(username=ga_user.username)
#get the latest visit data
#ga.get_new_user_visit_data()
#ga.get_referral_data()
#ga.get_new_user_funnel_data()
#print '%s ga data mined' %(ga_user.username)
#except:
# print 'exception mining %s ga data' %(ga_user.username)
ga.get_ga_data()
class Google_Analytics_User_Querier:
"""
Used to make leanworkench specific queries to the Google Analytics API
"""
def __init__(self, username):
self.username = username
self.model = GoogleAnalyticsAPI(username)
self.profile_id = self.model.credentials.profile_id
self.account_id = self.model.credentials.account_id
self.webproperty_id = self.model.credentials.webproperty_id
def get_referral_data(self):
# check to see if mined before
mined = GoogleAnalyticsReferralsModel.query.filter_by(username=self.username).all()
if mined:
# just mine yesterday
days_back = 2
else:
# go a year back
days_back = 366
date = datetime.now()
# go backwards in time up to a year
for backwards_days in range(1,days_back):
try:
date = date - timedelta(days=1)
# google string formatted date
google_date = GoogleTimeString(str(date))
g = GoogleAnalyticsAPI(self.username)
referral_data = g.client.data().ga().get(
ids='ga:' + self.profile_id,
start_date=str(google_date),
end_date=str(google_date),
sort="ga:sessions",
dimensions='ga:source,ga:medium',
metrics='ga:sessions,ga:pageviews,ga:sessionDuration,ga:exits').execute()
column_headers = referral_data.get('columnHeaders')
rows = referral_data.get('rows')
for row in rows:
source, medium, sessions, pageviews, session_duration, exits = row
referral_model = GoogleAnalyticsReferralsModel(username=self.username, date = date, source= source, medium=medium, sessions=sessions, pageviews=pageviews, session_duration=session_duration, exits = exits)
db.session.add(referral_model)
db.session.commit()
except Exception,e: print str(e)
def get_new_user_visit_data(self):
"""
Get all the visits data available for a user who just connected their Google Analytics account, or if visitor data exists get yesterday's data
"""
# start at yesterday
date = datetime.now()-timedelta(days=1)
# google string formatted date
google_date = GoogleTimeString(str(date))
g = GoogleAnalyticsAPI(self.username)
user_visitor_data = GoogleAnalyticsVisitors.query.filter_by(username=self.username).all()
# if already mined, just do yesterday
if user_visitor_data:
date = datetime.now()-timedelta(days=1)
google_date = GoogleTimeString(str(date))
visitor_data = g.client.data().ga().get(
ids='ga:' + self.profile_id,
start_date=str(google_date),
end_date=str(google_date),
dimensions='ga:visitorType',
metrics='ga:visits').execute()
# save visitor data to database
self.process_visitor_data(visitor_data,date)
# if first time mining GA data for user
else:
# go backwards in time up to a year
for backwards_days in range(1,366):
# create google query object
# get visitors and visitor types for the day
#try:
visitor_data = g.client.data().ga().get(
ids='ga:' + self.profile_id,
start_date=str(google_date),
end_date=str(google_date),
dimensions='ga:visitorType',
metrics='ga:visits').execute()
# parse and save visitor data to database
self.process_visitor_data(visitor_data, date)
date = date - timedelta(days=1)
google_date = GoogleTimeString(str(date))
def process_visitor_data(self,visitor_data, date):
"""
Takes the result of a Google Analytics API Client query for visitors, parses, and saves to database
"""
# if there were any visitors that day, rows key will exist
visitors_type = visitor_data.get('rows')
# set default 0 values in case no visitors
total_visitors, new_visits, returning_visitors = 0,0,0
# if visitors
if visitors_type:
for visitor_list in visitors_type:
if visitor_list[0] == "New Visitor":
new_visits = int(visitor_list[1])
if visitor_list[0] == "Returning Visitor":
returning_visitors = int(visitor_list[1])
total_visitors = new_visits + returning_visitors
try:
percent_new_visits = new_visits/total_visitors
except:
percent_new_visits=0
# add data to model
visitors_data_model = GoogleAnalyticsVisitors(
username=self.username,
profile_id=self.profile_id,
date=str(date),
visitors=total_visitors,
percent_new_visits=percent_new_visits,
new_visits=new_visits
)
# save visitor data to database
db.session.add(visitors_data_model)
db.session.commit()
def get_new_user_funnel_data(self):
"""
Get all the funnels data available for a user who just connected their Google Analytics account
args:
username: username/email of the user whose account it is
"""
# start at yesterday
date = datetime.now()-timedelta(days=1)
# google string formatted date
google_date = GoogleTimeString(str(date))
# go backwards in time up to a year
for backwards_days in range(1,366):
# create google query object
g = GoogleAnalyticsAPI(self.username)
page_path_data = g.client.data().mcf().get(
ids='ga:' + self.profile_id,
start_date='2012-01-01',
end_date=str(google_date),
metrics='mcf:totalConversions,mcf:totalConversionValue').execute()
print json.dumps(page_path_data)
def get_ga_data(self):
this_user = User.query.filter_by(email=self.username).first()
user_data = GoogleAnalyticsReturningVisitors.query.filter_by(username=self.username).all()
if user_data:
days_back = 2
else:
days_back = 366
# start at yesterday
date = datetime.now()-timedelta(days=1)
# google string formatted date
google_date = GoogleTimeString(str(date))
# go backwards in time up to a year
for backwards_days in range(1,days_back):
# create google query object
g = GoogleAnalyticsAPI(self.username)
try:
signup_data = g.client.management().goals().get(
accountId=self.account_id,
profileId=self.profile_id,
goalId='1',
webPropertyId=self.webproperty_id).execute()
new_sd = GoogleAnalyticsSignups(
username=self.username,
date = date,
signups = signup_data['value']
)
db.session.add(new_sd)
except:
print '%s ga signup data error' %(self.username)
#try:
returning_visitor_data = g.client.data().ga().get(
ids='ga:' + self.profile_id,
start_date=str(google_date),
end_date=str(google_date),
dimensions='ga:userType',
metrics='ga:sessions').execute()
if returning_visitor_data.get('totalResults') != 0:
print json.dumps(returning_visitor_data)
rows = returning_visitor_data.get('rows')
try:
returning_visitors = int(rows[1][1])
new_visitors = int(rows[0][1])
all_visitors = new_visitors + returning_visitors
except:
pass
else:
returning_visitors = 0
new_visitors = 0
all_visitors = 0
new_rvd = GoogleAnalyticsReturningVisitors(
username=self.username,
all_visitors = all_visitors,
returning_visitors = returning_visitors
)
this_user.returning_visitors.append(new_rvd)
db.session.add(new_rvd)
db.session.add(this_user)
db.session.commit()
#except:
# print '%s ga returning visitor data error' %(self.username)
experiments = g.client.management().experiments().list(
accountId=self.account_id,
webPropertyId=self.webproperty_id,
profileId=self.profile_id).execute()
user_experiment_ids = [x.experiment_id for x in GoogleAnalyticsExperiment.query.filter_by(username=self.username).all()]
for experiment in experiments.get('items', []):
experiment_id = experiment.get('id')
status = experiment.get('status')
winner_found = experiment.get('winnerFound')
start_time = experiment.get('created')
start_time = datetime.strptime(start_time, DATETIME_FORMAT)
duration = experiment.get('minimumExperimentLengthInDays')
end_time = start_time - timedelta(days=duration)
if experiment_id not in user_experiment_ids:
experiment_model= GoogleAnalyticsExperiment(status=status, winner_found=winner_found, start_time=start_time, end_time=end_time, experiment_id=experiment_id, username = self.username)
else:
experiment_model = GoogleAnalyticsExperiment.query.filter_by(experiment_id=experiment_id).first()
experiment_model.status = status
experiment_model.winner_found = winner_found
db.session.add(experiment_model)
db.session.commit()
experiment_variations = [x for x in experiment_model.variations]
experiment_variation_names = [x.name for x in experiment_variations]
for variation in experiment.get('variations', []):
print variation
name = variation.get('name')
url = variation.get('url')
status = variation.get('status')
weight= variation.get('weight')
won= variation.get('won')
if name in experiment_variation_names:
location = experiment_variation_names.index(name)
variation_model = experiment_variations[location]
variation_model.won = won
variation_model.status = status
else:
print 'new variation'
variation_model = GoogleAnalyticsExperimentVariation(url=url,status=status,weight=weight,name=name,won=won)
experiment_model.variations.append(variation_model)
db.session.add(experiment_model)
db.session.commit()
date = date - timedelta(days=1)
google_date = GoogleTimeString(str(date))
|
|
"""
Module for abstract serializer/unserializer base classes.
"""
from django.db import models
from django.utils import six
class SerializerDoesNotExist(KeyError):
"""The requested serializer was not found."""
pass
class SerializationError(Exception):
"""Something bad happened during serialization."""
pass
class DeserializationError(Exception):
"""Something bad happened during deserialization."""
@classmethod
def WithData(cls, original_exc, model, fk, field_value):
"""
Factory method for creating a deserialization error which has a more
explanatory message.
"""
return cls("%s: (%s:pk=%s) field_value was '%s'" % (original_exc, model, fk, field_value))
class ProgressBar(object):
progress_width = 75
def __init__(self, output, total_count):
self.output = output
self.total_count = total_count
self.prev_done = 0
def update(self, count):
if not self.output:
return
perc = count * 100 // self.total_count
done = perc * self.progress_width // 100
if self.prev_done >= done:
return
self.prev_done = done
cr = '' if self.total_count == 1 else '\r'
self.output.write(cr + '[' + '.' * done + ' ' * (self.progress_width - done) + ']')
if done == self.progress_width:
self.output.write('\n')
self.output.flush()
class Serializer(object):
"""
Abstract serializer base class.
"""
# Indicates if the implemented serializer is only available for
# internal Django use.
internal_use_only = False
progress_class = ProgressBar
stream_class = six.StringIO
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.pop("stream", self.stream_class())
self.selected_fields = options.pop("fields", None)
self.use_natural_foreign_keys = options.pop('use_natural_foreign_keys', False)
self.use_natural_primary_keys = options.pop('use_natural_primary_keys', False)
progress_bar = self.progress_class(
options.pop('progress_output', None), options.pop('object_count', 0)
)
self.start_serialization()
self.first = True
for count, obj in enumerate(queryset, start=1):
self.start_object(obj)
# Use the concrete parent class' _meta instead of the object's _meta
# This is to avoid local_fields problems for proxy models. Refs #17717.
concrete_model = obj._meta.concrete_model
for field in concrete_model._meta.local_fields:
if field.serialize:
if field.remote_field is None:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_field(obj, field)
else:
if self.field_is_selected(field) and self.output_pk_field(obj, field):
self.handle_fk_field(obj, field)
for field in concrete_model._meta.many_to_many:
if field.serialize:
if self.selected_fields is None or field.attname in self.selected_fields:
self.handle_m2m_field(obj, field)
self.end_object(obj)
progress_bar.update(count)
if self.first:
self.first = False
self.end_serialization()
return self.getvalue()
def field_is_selected(self, field):
return self.selected_fields is None or field.attname[:-3] in self.selected_fields
def output_pk_field(self, obj, pk_field):
return self.use_natural_primary_keys or pk_field != obj._meta.pk
def start_serialization(self):
"""
Called when serializing of the queryset starts.
"""
raise NotImplementedError('subclasses of Serializer must provide a start_serialization() method')
def end_serialization(self):
"""
Called when serializing of the queryset ends.
"""
pass
def start_object(self, obj):
"""
Called when serializing of an object starts.
"""
raise NotImplementedError('subclasses of Serializer must provide a start_object() method')
def end_object(self, obj):
"""
Called when serializing of an object ends.
"""
pass
def handle_field(self, obj, field):
"""
Called to handle each individual (non-relational) field on an object.
"""
raise NotImplementedError('subclasses of Serializer must provide an handle_field() method')
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey field.
"""
raise NotImplementedError('subclasses of Serializer must provide an handle_fk_field() method')
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField.
"""
raise NotImplementedError('subclasses of Serializer must provide an handle_m2m_field() method')
def getvalue(self):
"""
Return the fully serialized queryset (or None if the output stream is
not seekable).
"""
if callable(getattr(self.stream, 'getvalue', None)):
return self.stream.getvalue()
class Deserializer(six.Iterator):
"""
Abstract base deserializer class.
"""
def __init__(self, stream_or_string, **options):
"""
Init this serializer given a stream or a string
"""
self.options = options
if isinstance(stream_or_string, six.string_types):
self.stream = six.StringIO(stream_or_string)
else:
self.stream = stream_or_string
def __iter__(self):
return self
def __next__(self):
"""Iteration iterface -- return the next item in the stream"""
raise NotImplementedError('subclasses of Deserializer must provide a __next__() method')
class DeserializedObject(object):
"""
A deserialized model.
Basically a container for holding the pre-saved deserialized data along
with the many-to-many data saved with the object.
Call ``save()`` to save the object (with the many-to-many data) to the
database; call ``save(save_m2m=False)`` to save just the object fields
(and not touch the many-to-many stuff.)
"""
def __init__(self, obj, m2m_data=None):
self.object = obj
self.m2m_data = m2m_data
def __repr__(self):
return "<%s: %s(pk=%s)>" % (
self.__class__.__name__,
self.object._meta.label,
self.object.pk,
)
def save(self, save_m2m=True, using=None, **kwargs):
# Call save on the Model baseclass directly. This bypasses any
# model-defined save. The save is also forced to be raw.
# raw=True is passed to any pre/post_save signals.
models.Model.save_base(self.object, using=using, raw=True, **kwargs)
if self.m2m_data and save_m2m:
for accessor_name, object_list in self.m2m_data.items():
getattr(self.object, accessor_name).set(object_list)
# prevent a second (possibly accidental) call to save() from saving
# the m2m data twice.
self.m2m_data = None
def build_instance(Model, data, db):
"""
Build a model instance.
If the model instance doesn't have a primary key and the model supports
natural keys, try to retrieve it from the database.
"""
obj = Model(**data)
if (obj.pk is None and hasattr(Model, 'natural_key') and
hasattr(Model._default_manager, 'get_by_natural_key')):
natural_key = obj.natural_key()
try:
obj.pk = Model._default_manager.db_manager(db).get_by_natural_key(*natural_key).pk
except Model.DoesNotExist:
pass
return obj
|
|
import time
from weakref import ref as weakref
from redis import StrictRedis
from redis.exceptions import ConnectionError, TimeoutError
from rb.promise import Promise
from rb.poll import poll
def assert_open(client):
if client.closed:
raise ValueError('I/O operation on closed file')
class CommandBuffer(object):
"""The command buffer is an internal construct """
def __init__(self, host_id, connection):
self.host_id = host_id
self.connection = connection
self.commands = []
self.last_command_sent = 0
self.last_command_received = 0
# Ensure we're connected. Without this, we won't have a socket
# we can select over.
connection.connect()
@property
def closed(self):
"""Indicates if the command buffer is closed."""
return self.connection is None or self.connection._sock is None
def fileno(self):
"""Returns the file number of the underlying connection's socket
to be able to select over it.
"""
assert_open(self)
return self.connection._sock.fileno()
def enqueue_command(self, command_name, args):
"""Enqueue a new command into this pipeline."""
assert_open(self)
promise = Promise()
self.commands.append((command_name, args, promise))
return promise
def send_pending_requests(self):
"""Sends all pending requests into the connection."""
assert_open(self)
unsent_commands = self.commands[self.last_command_sent:]
if not unsent_commands:
return
all_cmds = self.connection.pack_commands(
[(x[0],) + tuple(x[1]) for x in unsent_commands])
self.connection.send_packed_command(all_cmds)
self.last_command_sent += len(unsent_commands)
def wait_for_responses(self, client):
"""Waits for all responses to come back and resolves the
eventual results.
"""
assert_open(self)
pending = self.last_command_sent - self.last_command_received
if pending <= 0:
return
for idx in xrange(pending):
real_idx = self.last_command_received + idx
command_name, _, promise = self.commands[real_idx]
value = client.parse_response(
self.connection, command_name)
promise.resolve(value)
self.last_command_received += idx
class RoutingPool(object):
"""The routing pool works together with the routing client to
internally dispatch through the cluster's router to the correct
internal connection pool.
"""
def __init__(self, cluster):
self.cluster = cluster
def get_connection(self, command_name, shard_hint=None):
host_id = shard_hint
if host_id is None:
raise RuntimeError('The routing pool requires the host id '
'as shard hint')
real_pool = self.cluster.get_pool_for_host(host_id)
con = real_pool.get_connection(command_name)
con.__creating_pool = weakref(real_pool)
return con
def release(self, connection):
# The real pool is referenced by the connection through an
# internal weakref. If the weakref is broken it means the
# pool is already gone and we do not need to release the
# connection.
try:
real_pool = connection.__creating_pool()
except (AttributeError, TypeError):
real_pool = None
if real_pool is not None:
real_pool.release(connection)
def disconnect(self):
self.cluster.disconnect_pools()
def reset(self):
pass
class BaseClient(StrictRedis):
pass
class RoutingBaseClient(BaseClient):
def pubsub(self, **kwargs):
raise NotImplementedError('Pubsub is unsupported.')
def pipeline(self, transaction=True, shard_hint=None):
raise NotImplementedError('Manual pipelines are unsupported. rb '
'automatically pipelines commands.')
def lock(self, *args, **kwargs):
raise NotImplementedError('Locking is not supported.')
class MappingClient(RoutingBaseClient):
"""The routing client uses the cluster's router to target an individual
node automatically based on the key of the redis command executed.
"""
def __init__(self, connection_pool, max_concurrency=None):
RoutingBaseClient.__init__(self, connection_pool=connection_pool)
# careful. If you introduce any other variables here, then make
# sure that FanoutClient.target still works correctly!
self._max_concurrency = max_concurrency
self._command_buffer_poll = poll()
# Standard redis methods
def execute_command(self, *args):
router = self.connection_pool.cluster.get_router()
host_id = router.get_host_for_command(args[0], args[1:])
buf = self._get_command_buffer(host_id, args[0])
return buf.enqueue_command(args[0], args[1:])
# Custom Internal API
def _get_command_buffer(self, host_id, command_name):
"""Returns the command buffer for the given command and arguments."""
buf = self._command_buffer_poll.get(host_id)
if buf is not None:
return buf
while len(self._command_buffer_poll) >= self._max_concurrency:
self._try_to_clear_outstanding_requests()
connection = self.connection_pool.get_connection(
command_name, shard_hint=host_id)
buf = CommandBuffer(host_id, connection)
self._command_buffer_poll.register(host_id, buf)
return buf
def _release_command_buffer(self, command_buffer):
"""This is called by the command buffer when it closes."""
if command_buffer.closed:
return
self._command_buffer_poll.unregister(command_buffer.host_id)
self.connection_pool.release(command_buffer.connection)
command_buffer.connection = None
def _try_to_clear_outstanding_requests(self, timeout=1.0):
"""Tries to clear some outstanding requests in the given timeout
to reduce the concurrency pressure.
"""
if not self._command_buffer_poll:
return
for command_buffer in self._command_buffer_poll:
command_buffer.send_pending_requests()
for command_buffer in self._command_buffer_poll.poll(timeout):
command_buffer.wait_for_responses(self)
self._release_command_buffer(command_buffer)
# Custom Public API
def join(self, timeout=None):
"""Waits for all outstanding responses to come back or the timeout
to be hit.
"""
remaining = timeout
for command_buffer in self._command_buffer_poll:
command_buffer.send_pending_requests()
while self._command_buffer_poll and (remaining is None or
remaining > 0):
now = time.time()
rv = self._command_buffer_poll.poll(remaining)
if remaining is not None:
remaining -= (time.time() - now)
for command_buffer in rv:
command_buffer.wait_for_responses(self)
self._release_command_buffer(command_buffer)
def cancel(self):
"""Cancels all outstanding requests."""
for command_buffer in self._command_buffer_poll:
self._release_command_buffer(command_buffer)
class FanoutClient(MappingClient):
"""This works similar to the :class:`MappingClient` but instead of
using the router to target hosts, it sends the commands to all manually
specified hosts.
The results are accumulated in a dictionary keyed by the `host_id`.
"""
def __init__(self, hosts, connection_pool, max_concurrency=None):
MappingClient.__init__(self, connection_pool, max_concurrency)
self._target_hosts = hosts
self.__is_retargeted = False
def target(self, hosts):
"""Temporarily retarget the client for one call. This is useful
when having to deal with a subset of hosts for one call.
"""
if self.__is_retargeted:
raise TypeError('Cannot use target more than once.')
rv = FanoutClient(hosts, connection_pool=self.connection_pool,
max_concurrency=self._max_concurrency)
rv._command_buffer_poll = self._command_buffer_poll
rv._target_hosts = hosts
rv.__is_retargeted = True
return rv
def execute_command(self, *args):
promises = {}
hosts = self._target_hosts
if hosts == 'all':
hosts = self.connection_pool.cluster.hosts.keys()
elif hosts is None:
raise RuntimeError('Fanout client was not targeted to hosts.')
for host_id in hosts:
buf = self._get_command_buffer(host_id, args[0])
promises[host_id] = buf.enqueue_command(args[0], args[1:])
return Promise.all(promises)
class RoutingClient(RoutingBaseClient):
"""A client that can route to individual targets."""
def __init__(self, cluster):
RoutingBaseClient.__init__(self, connection_pool=RoutingPool(cluster))
# Standard redis methods
def execute_command(self, *args, **options):
pool = self.connection_pool
command_name = args[0]
command_args = args[1:]
router = self.connection_pool.cluster.get_router()
host_id = router.get_host_for_command(command_name, command_args)
connection = pool.get_connection(command_name, shard_hint=host_id)
try:
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
except (ConnectionError, TimeoutError) as e:
connection.disconnect()
if not connection.retry_on_timeout and isinstance(e, TimeoutError):
raise
connection.send_command(*args)
return self.parse_response(connection, command_name, **options)
finally:
pool.release(connection)
# Custom Public API
def get_mapping_client(self, max_concurrency=64):
"""Returns a thread unsafe mapping client. This client works
similar to a redis pipeline and returns eventual result objects.
It needs to be joined on to work properly. Instead of using this
directly you shold use the :meth:`map` context manager which
automatically joins.
Returns an instance of :class:`MappingClient`.
"""
return MappingClient(connection_pool=self.connection_pool,
max_concurrency=max_concurrency)
def get_fanout_client(self, hosts, max_concurrency=64):
return FanoutClient(hosts, connection_pool=self.connection_pool,
max_concurrency=max_concurrency)
def map(self, timeout=None, max_concurrency=64):
"""Returns a context manager for a map operation. This runs
multiple queries in parallel and then joins in the end to collect
all results.
In the context manager the client available is a
:class:`MappingClient`. Example usage::
results = {}
with cluster.map() as client:
for key in keys_to_fetch:
results[key] = client.get(key)
for key, promise in results.iteritems():
print '%s => %s' % (key, promise.value)
"""
return MapManager(self.get_mapping_client(max_concurrency),
timeout=timeout)
def fanout(self, hosts=None, timeout=None, max_concurrency=64):
"""Returns a context manager for a map operation that fans out to
manually specified hosts instead of using the routing system. This
can for instance be used to empty the database on all hosts. The
context manager returns a :class:`FanoutClient`. Example usage::
with cluster.fanout(hosts=[0, 1, 2, 3]) as client:
results = client.info()
for host_id, info in results.value.iteritems():
print '%s -> %s' % (host_id, info['is'])
The promise returned accumulates all results in a dictionary keyed
by the `host_id`.
The `hosts` parameter is a list of `host_id`\s or alternatively the
string ``'all'`` to send the commands to all hosts.
The fanout APi needs to be used with a lot of care as it can cause
a lot of damage when keys are written to hosts that do not expect
them.
"""
return MapManager(self.get_fanout_client(hosts, max_concurrency),
timeout=timeout)
class LocalClient(BaseClient):
"""The local client is just a convenient method to target one specific
host.
"""
def __init__(self, cluster, connection_pool=None, **kwargs):
if connection_pool is None:
raise TypeError('The local client needs a connection pool')
BaseClient.__init__(self, cluster, connection_pool=connection_pool,
**kwargs)
class MapManager(object):
"""Helps with mapping."""
def __init__(self, mapping_client, timeout):
self.mapping_client = mapping_client
self.timeout = timeout
self.entered = None
def __enter__(self):
self.entered = time.time()
return self.mapping_client
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
self.mapping_client.cancel()
else:
timeout = self.timeout
if timeout is not None:
timeout = max(1, timeout - (time.time() - self.started))
self.mapping_client.join(timeout=timeout)
|
|
"""FlatDict is a dict object that allows for single level, delimited
key/value pair mapping of nested dictionaries.
"""
try:
from collections.abc import MutableMapping
except ImportError: # pragma: nocover
from collections import MutableMapping
import sys
__version__ = '4.0.1'
NO_DEFAULT = object()
class FlatDict(MutableMapping):
""":class:`~flatdict.FlatDict` is a dictionary object that allows for
single level, delimited key/value pair mapping of nested dictionaries.
The default delimiter value is ``:`` but can be changed in the constructor
or by calling :meth:`FlatDict.set_delimiter`.
"""
_COERCE = dict
def __init__(self, value=None, delimiter=':', dict_class=dict):
super(FlatDict, self).__init__()
self._values = dict_class()
self._delimiter = delimiter
self.update(value)
def __contains__(self, key):
"""Check to see if the key exists, checking for both delimited and
not delimited key values.
:param mixed key: The key to check for
"""
if self._has_delimiter(key):
pk, ck = key.split(self._delimiter, 1)
return pk in self._values and ck in self._values[pk]
return key in self._values
def __delitem__(self, key):
"""Delete the item for the specified key, automatically dealing with
nested children.
:param mixed key: The key to use
:raises: KeyError
"""
if key not in self:
raise KeyError
if self._has_delimiter(key):
pk, ck = key.split(self._delimiter, 1)
del self._values[pk][ck]
if not self._values[pk]:
del self._values[pk]
else:
del self._values[key]
def __eq__(self, other):
"""Check for equality against the other value
:param other: The value to compare
:type other: FlatDict
:rtype: bool
:raises: TypeError
"""
if isinstance(other, dict):
return self.as_dict() == other
elif not isinstance(other, self.__class__):
raise TypeError
return self.as_dict() == other.as_dict()
def __ne__(self, other):
"""Check for inequality against the other value
:param other: The value to compare
:type other: dict or FlatDict
:rtype: bool
"""
return not self.__eq__(other)
def __getitem__(self, key):
"""Get an item for the specified key, automatically dealing with
nested children.
:param mixed key: The key to use
:rtype: mixed
:raises: KeyError
"""
values = self._values
key = [key] if isinstance(key, int) else key.split(self._delimiter)
for part in key:
values = values[part]
return values
def __iter__(self):
"""Iterate over the flat dictionary key and values
:rtype: Iterator
:raises: RuntimeError
"""
return iter(self.keys())
def __len__(self):
"""Return the number of items.
:rtype: int
"""
return len(self.keys())
def __reduce__(self):
"""Return state information for pickling
:rtype: tuple
"""
return type(self), (self.as_dict(), self._delimiter)
def __repr__(self):
"""Return the string representation of the instance.
:rtype: str
"""
return '<{} id={} {}>"'.format(self.__class__.__name__, id(self),
str(self))
def __setitem__(self, key, value):
"""Assign the value to the key, dynamically building nested
FlatDict items where appropriate.
:param mixed key: The key for the item
:param mixed value: The value for the item
:raises: TypeError
"""
if isinstance(value, self._COERCE) and not isinstance(value, FlatDict):
value = self.__class__(value, self._delimiter)
if self._has_delimiter(key):
pk, ck = key.split(self._delimiter, 1)
if pk not in self._values:
self._values[pk] = self.__class__({ck: value}, self._delimiter)
return
elif not isinstance(self._values[pk], FlatDict):
raise TypeError(
'Assignment to invalid type for key {}'.format(pk))
self._values[pk][ck] = value
else:
self._values[key] = value
def __str__(self):
"""Return the string value of the instance.
:rtype: str
"""
return '{{{}}}'.format(', '.join(
['{!r}: {!r}'.format(k, self[k]) for k in self.keys()]))
def as_dict(self):
"""Return the :class:`~flatdict.FlatDict` as a :class:`dict`
:rtype: dict
"""
out = dict({})
for key in self.keys():
if self._has_delimiter(key):
pk, ck = key.split(self._delimiter, 1)
if self._has_delimiter(ck):
ck = ck.split(self._delimiter, 1)[0]
if isinstance(self._values[pk], FlatDict) and pk not in out:
out[pk] = {}
if isinstance(self._values[pk][ck], FlatDict):
out[pk][ck] = self._values[pk][ck].as_dict()
else:
out[pk][ck] = self._values[pk][ck]
else:
out[key] = self._values[key]
return out
def clear(self):
"""Remove all items from the flat dictionary."""
self._values.clear()
def copy(self):
"""Return a shallow copy of the flat dictionary.
:rtype: flatdict.FlatDict
"""
return self.__class__(self.as_dict(), delimiter=self._delimiter)
def get(self, key, d=None):
"""Return the value for key if key is in the flat dictionary, else
default. If default is not given, it defaults to ``None``, so that this
method never raises :exc:`KeyError`.
:param mixed key: The key to get
:param mixed d: The default value
:rtype: mixed
"""
try:
return self.__getitem__(key)
except KeyError:
return d
def items(self):
"""Return a copy of the flat dictionary's list of ``(key, value)``
pairs.
.. note:: CPython implementation detail: Keys and values are listed in
an arbitrary order which is non-random, varies across Python
implementations, and depends on the flat dictionary's history of
insertions and deletions.
:rtype: list
"""
return [(k, self.__getitem__(k)) for k in self.keys()]
def iteritems(self):
"""Return an iterator over the flat dictionary's (key, value) pairs.
See the note for :meth:`flatdict.FlatDict.items`.
Using ``iteritems()`` while adding or deleting entries in the flat
dictionary may raise :exc:`RuntimeError` or fail to iterate over all
entries.
:rtype: Iterator
:raises: RuntimeError
"""
for item in self.items():
yield item
def iterkeys(self):
"""Iterate over the flat dictionary's keys. See the note for
:meth:`flatdict.FlatDict.items`.
Using ``iterkeys()`` while adding or deleting entries in the flat
dictionary may raise :exc:`RuntimeError` or fail to iterate over all
entries.
:rtype: Iterator
:raises: RuntimeError
"""
for key in self.keys():
yield key
def itervalues(self):
"""Return an iterator over the flat dictionary's values. See the note
:meth:`flatdict.FlatDict.items`.
Using ``itervalues()`` while adding or deleting entries in the flat
dictionary may raise a :exc:`RuntimeError` or fail to iterate over all
entries.
:rtype: Iterator
:raises: RuntimeError
"""
for value in self.values():
yield value
def keys(self):
"""Return a copy of the flat dictionary's list of keys.
See the note for :meth:`flatdict.FlatDict.items`.
:rtype: list
"""
keys = []
for key, value in self._values.items():
if isinstance(value, (FlatDict, dict)):
nested = [
self._delimiter.join([str(key), str(k)])
for k in value.keys()]
keys += nested if nested else [key]
else:
keys.append(key)
return keys
def pop(self, key, default=NO_DEFAULT):
"""If key is in the flat dictionary, remove it and return its value,
else return default. If default is not given and key is not in the
dictionary, :exc:`KeyError` is raised.
:param mixed key: The key name
:param mixed default: The default value
:rtype: mixed
"""
if key not in self and default != NO_DEFAULT:
return default
value = self[key]
self.__delitem__(key)
return value
def setdefault(self, key, default):
"""If key is in the flat dictionary, return its value. If not,
insert key with a value of default and return default.
default defaults to ``None``.
:param mixed key: The key name
:param mixed default: The default value
:rtype: mixed
"""
if key not in self:
self.__setitem__(key, default)
return self.__getitem__(key)
def set_delimiter(self, delimiter):
"""Override the default or passed in delimiter with a new value. If
the requested delimiter already exists in a key, a :exc:`ValueError`
will be raised.
:param str delimiter: The delimiter to use
:raises: ValueError
"""
for key in self.keys():
if delimiter in key:
raise ValueError('Key {!r} collides with delimiter {!r}', key,
delimiter)
self._delimiter = delimiter
for key in self._values.keys():
if isinstance(self._values[key], FlatDict):
self._values[key].set_delimiter(delimiter)
def update(self, other=None, **kwargs):
"""Update the flat dictionary with the key/value pairs from other,
overwriting existing keys.
``update()`` accepts either another flat dictionary object or an
iterable of key/value pairs (as tuples or other iterables of length
two). If keyword arguments are specified, the flat dictionary is then
updated with those key/value pairs: ``d.update(red=1, blue=2)``.
:param iterable other: Iterable of key, value pairs
:rtype: None
"""
[self.__setitem__(k, v) for k, v in dict(other or kwargs).items()]
def values(self):
"""Return a copy of the flat dictionary's list of values. See the note
for :meth:`flatdict.FlatDict.items`.
:rtype: list
"""
return [self.__getitem__(k) for k in self.keys()]
def _has_delimiter(self, key):
"""Checks to see if the key contains the delimiter.
:rtype: bool
"""
return isinstance(key, str) and self._delimiter in key
class FlatterDict(FlatDict):
"""Like :class:`~flatdict.FlatDict` but also coerces lists and sets
to child-dict instances with the offset as the key. Alternative to
the implementation added in v1.2 of FlatDict.
"""
_COERCE = list, tuple, set, dict, FlatDict
_ARRAYS = list, set, tuple
def __init__(self, value=None, delimiter=':', dict_class=dict):
self.original_type = type(value)
if self.original_type in self._ARRAYS:
value = {str(i): v for i, v in enumerate(value)}
super(FlatterDict, self).__init__(value, delimiter, dict_class)
def __setitem__(self, key, value):
"""Assign the value to the key, dynamically building nested
FlatDict items where appropriate.
:param mixed key: The key for the item
:param mixed value: The value for the item
:raises: TypeError
"""
if isinstance(value, self._COERCE) and \
not isinstance(value, FlatterDict):
value = self.__class__(value, self._delimiter)
if self._has_delimiter(key):
pk, ck = key.split(self._delimiter, 1)
if pk not in self._values:
self._values[pk] = self.__class__({ck: value}, self._delimiter)
return
if getattr(self._values[pk], 'original_type',
None) in self._ARRAYS:
try:
k, cck = ck.split(self._delimiter, 1)
int(k)
except ValueError:
raise TypeError(
'Assignment to invalid type for key {}{}{}'.format(
pk, self._delimiter, ck))
self._values[pk][k][cck] = value
return
elif not isinstance(self._values[pk], FlatterDict):
raise TypeError(
'Assignment to invalid type for key {}'.format(pk))
self._values[pk][ck] = value
else:
self._values[key] = value
def as_dict(self):
"""Return the :class:`~flatdict.FlatterDict` as a nested
:class:`dict`.
:rtype: dict
"""
out = {}
for key in self.keys():
if self._has_delimiter(key):
pk, ck = key.split(self._delimiter, 1)
if self._has_delimiter(ck):
ck = ck.split(self._delimiter, 1)[0]
if isinstance(self._values[pk], FlatterDict) and pk not in out:
if self._values[pk].original_type == tuple:
out[pk] = tuple(self._child_as_list(pk))
elif self._values[pk].original_type == list:
out[pk] = self._child_as_list(pk)
elif self._values[pk].original_type == set:
out[pk] = set(self._child_as_list(pk))
elif self._values[pk].original_type == dict:
out[pk] = self._values[pk].as_dict()
else:
if isinstance(self._values[key], FlatterDict):
out[key] = self._values[key].original_type()
else:
out[key] = self._values[key]
return out
def _child_as_list(self, pk, ck=None):
"""Returns a list of values from the child FlatterDict instance
with string based integer keys.
:param str pk: The parent key
:param str ck: The child key, optional
:rtype: list
"""
if ck is None:
subset = self._values[pk]
else:
subset = self._values[pk][ck]
# Check if keys has delimiter, which implies deeply nested dict
keys = subset.keys()
if any(self._has_delimiter(k) for k in keys):
out = []
split_keys = {k.split(self._delimiter)[0] for k in keys}
for k in sorted(split_keys, key=lambda x: int(x)):
if subset[k].original_type == tuple:
out.append(tuple(self._child_as_list(pk, k)))
elif subset[k].original_type == list:
out.append(self._child_as_list(pk, k))
elif subset[k].original_type == set:
out.append(set(self._child_as_list(pk, k)))
elif subset[k].original_type == dict:
out.append(subset[k].as_dict())
return out
# Python prior 3.6 does not guarantee insertion order, remove it after
# EOL python 3.5 - 2020-09-13
if sys.version_info[0:2] < (3, 6): # pragma: nocover
return [subset[k] for k in sorted(keys, key=lambda x: int(x))]
else:
return [subset[k] for k in keys]
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for vectorization of math kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MathTest(PForTestCase, parameterized.TestCase):
def _test_unary_cwise_ops(self, ops, is_complex):
for op in ops:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
if is_complex:
y = random_ops.random_uniform([3, 5])
g.watch(y)
x = math_ops.complex(x, y)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
y = op(x)
x_i = array_ops.gather(x, i)
y_i = op(x_i)
outputs = [y_i]
# Build cross product of loop variant/invariant outputs and gradients.
for out in (y, y_i):
if out.dtype == dtypes.float32:
for output_gradients in (None, out * math_ops.cast(i, out.dtype)):
grad = g.gradient(out, x_i, output_gradients=output_gradients)
if grad is not None:
outputs.append(grad)
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_unary_cwise_complex_ops(self):
complex_ops = [
math_ops.angle,
math_ops.imag,
math_ops.complex_abs,
math_ops.real,
math_ops.conj,
]
self._test_unary_cwise_ops(complex_ops, True)
def test_unary_cwise_real_ops_1(self):
real_ops = [
lambda x: math_ops.acosh(1 + math_ops.square(x)),
math_ops.abs,
math_ops.acos,
math_ops.asin,
math_ops.asinh,
math_ops.atan,
math_ops.atanh,
math_ops.bessel_i0e,
math_ops.bessel_i1e,
math_ops.cos,
math_ops.cosh,
math_ops.digamma,
math_ops.erf,
math_ops.erfc,
math_ops.erfinv,
math_ops.exp,
math_ops.expm1,
math_ops.inv,
math_ops.is_finite,
math_ops.is_inf,
math_ops.lgamma,
math_ops.log,
math_ops.log1p,
math_ops.ndtri,
]
self._test_unary_cwise_ops(real_ops, False)
def test_unary_cwise_real_ops_2(self):
real_ops = [
math_ops.neg,
math_ops.negative,
math_ops.reciprocal,
math_ops.rint,
math_ops.round,
math_ops.rsqrt,
math_ops.sigmoid,
math_ops.sign,
math_ops.sin,
math_ops.sinh,
math_ops.sqrt,
math_ops.square,
math_ops.tan,
math_ops.tanh,
nn.elu,
nn.relu,
nn.relu6,
lambda t: nn.leaky_relu(t, alpha=0.1),
nn.selu,
nn.softplus,
nn.softsign,
]
self._test_unary_cwise_ops(real_ops, False)
def test_unary_cwise_no_grad(self):
for op in [math_ops.ceil, math_ops.floor, math_ops.logical_not]:
x = random_ops.random_uniform([3, 5])
if op == math_ops.logical_not:
x = x > 0
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return op(array_ops.gather(x, i))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_binary_cwise_ops(self):
logical_ops = [
math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor
]
# Wrapper functions restricting the range of inputs of zeta and polygamma.
def safe_polygamma(x, y):
return math_ops.polygamma(
math_ops.round(clip_ops.clip_by_value(y, 1, 10)), x * x + 1)
def safe_zeta(x, y):
return math_ops.zeta(x * x + 1, y * y)
float_ops = [
math_ops.add,
math_ops.add_v2,
math_ops.atan2,
math_ops.complex,
math_ops.div,
math_ops.divide,
math_ops.div_no_nan,
math_ops.equal,
math_ops.floor_mod,
math_ops.greater,
math_ops.greater_equal,
math_ops.igamma,
math_ops.igammac,
math_ops.igamma_grad_a,
math_ops.less,
math_ops.less_equal,
math_ops.maximum,
math_ops.minimum,
math_ops.mod,
math_ops.multiply,
math_ops.not_equal,
math_ops.pow,
math_ops.squared_difference,
math_ops.subtract,
math_ops.truncate_mod,
safe_polygamma,
safe_zeta,
]
# FloorDiv fails on XLA due floor's discontinuities exacerbating small
# division differences.
if not test_util.is_xla_enabled():
float_ops += [math_ops.floor_div]
for op in logical_ops + float_ops:
x = random_ops.random_uniform([7, 3, 5])
y = random_ops.random_uniform([3, 5])
if op in logical_ops:
x = x > 0
y = y > 0
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend(t.dtype for t in outputs)
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_approximate_equal(self):
x = random_ops.random_uniform([3, 5])
y = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
return math_ops.approximate_equal(x1, y1)
self._test_loop_fn(loop_fn, 3)
def test_addn(self):
x = random_ops.random_uniform([2, 3, 5])
y = random_ops.random_uniform([3, 5])
z = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return math_ops.add_n([x1, y, z])
self._test_loop_fn(loop_fn, 2)
def test_cross(self):
x = random_ops.random_uniform([4, 2, 3])
y = random_ops.random_uniform([4, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
x_0 = array_ops.gather(x, 0)
return math_ops.cross(x_i, y_i), math_ops.cross(x_0, y_i)
self._test_loop_fn(loop_fn, 4)
def test_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (5, 3) if tr_a else (3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (7, 5) if tr_b else (5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_batch_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (4, 5, 3) if tr_a else (4, 3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (4, 7, 5) if tr_b else (4, 5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_batch_matmul_broadcast(self):
for broadcast_a in (True, False):
for broadcast_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (2, 3, 5) if broadcast_a else (4, 2, 3, 5)
shape_b = (2, 5, 7) if broadcast_b else (4, 2, 5, 7)
shape_a = (2,) + shape_a if stack_a else shape_a
shape_b = (2,) + shape_b if stack_b else shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_reduction(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for op in [
math_ops.reduce_sum,
math_ops.reduce_prod,
math_ops.reduce_max,
math_ops.reduce_min,
math_ops.reduce_mean,
]:
for axis in ([1], None, [0, 2]):
for keepdims in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, keepdims=keepdims)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_boolean_reduction(self):
x = random_ops.random_uniform([2, 3, 4, 5]) > 0.5
for op in [math_ops.reduce_any, math_ops.reduce_all]:
for axis in ([1], None, [0, 2]):
for keepdims in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, keepdims=keepdims)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_argmin_argmax(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for op in [math_ops.argmin, math_ops.argmax]:
for axis in (1, None, -1):
for output_dtype in (dtypes.int32, dtypes.int64, None):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, output_type=output_dtype)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_bucketize(self):
x = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.bucketize(a, [-1, 0.5, 1])
self._test_loop_fn(loop_fn, 2)
def test_clip_by_value(self):
x = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
a = array_ops.gather(x, i)
return clip_ops.clip_by_value(a, 0.5, 1.0)
self._test_loop_fn(loop_fn, 2)
def test_cum_sum(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumsum(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_cum_prod(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumprod(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_bias_add(self):
for data_format in ("NCHW", "NHWC"):
for stacked_value in (True, False):
x_shape = [3, 4, 5, 6]
if stacked_value:
x_shape = [2] + x_shape
x = random_ops.random_uniform(x_shape)
for stacked_bias in (True, False):
if not (stacked_value or stacked_bias):
continue
with backprop.GradientTape(persistent=True) as g:
bias_dim = -1
if data_format == "NCHW":
bias_dim = 2 if stacked_value else 1
bias_shape = [x_shape[bias_dim]]
if stacked_bias:
bias_shape = [2] + bias_shape
bias = random_ops.random_uniform(bias_shape)
g.watch(bias)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
a = array_ops.gather(x, i) if stacked_value else x
b = array_ops.gather(bias, i) if stacked_bias else bias
y = nn.bias_add(a, b, data_format=data_format)
loss = math_ops.reduce_sum(y * y)
grad = g.gradient(loss, bias)
if stacked_bias:
# If we gather over bias in loop_fn, the gradient will be an
# instance of `IndexedSlices` with attrs `values` and `indices`.
return y, grad.values, grad.indices
else:
return y, grad
# pylint: enable=cell-var-from-loop
out_dtypes = [dtypes.float32, dtypes.float32]
if stacked_bias:
out_dtypes = out_dtypes + [dtypes.int32]
self._test_loop_fn(loop_fn, 2)
@parameterized.parameters(
(math_ops.unsorted_segment_sum,), (math_ops.unsorted_segment_min,),
(math_ops.unsorted_segment_max,), (math_ops.unsorted_segment_prod,))
def test_unsorted_segment_reduction(self, reduction_op):
t = random_ops.random_uniform([3, 3, 2])
for segment_ids_dtype in (dtypes.int32, dtypes.int64):
for num_segments_dtype in (dtypes.int32, dtypes.int64):
segment_ids = constant_op.constant([[0, 0, 2], [0, 1, 2], [2, 2, 2]],
dtype=segment_ids_dtype)
num_segments = constant_op.constant(3, dtype=num_segments_dtype)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
data = array_ops.gather(t, i)
data_0 = array_ops.gather(t, 0)
seg_ids = array_ops.gather(segment_ids, i)
seg_ids_0 = array_ops.gather(segment_ids, 0)
return (reduction_op(data, seg_ids, num_segments),
reduction_op(data_0, seg_ids, num_segments),
reduction_op(data, seg_ids_0, num_segments))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
@parameterized.parameters((math_ops.sparse_segment_sum_v2, True),
(math_ops.sparse_segment_mean_v2, True),
(math_ops.sparse_segment_sqrt_n_v2, True),
(math_ops.sparse_segment_sum_v2, False),
(math_ops.sparse_segment_mean_v2, False),
(math_ops.sparse_segment_sqrt_n_v2, False))
def test_sparse_segment(self, op_func, with_num_segments):
data = random_ops.random_uniform([3, 4, 2])
indices = constant_op.constant([[1, 2, 3], [0, 1, 2], [0, 2, 3]])
seg_ids = constant_op.constant([[0, 0, 2], [1, 1, 1], [0, 1, 1]])
if with_num_segments:
num_segments = 3
else:
num_segments = None
def loop_fn(i):
data_i = array_ops.gather(data, i)
data_0 = array_ops.gather(data, 0)
indices_i = array_ops.gather(indices, i)
indices_0 = array_ops.gather(indices, 0)
seg_ids_i = array_ops.gather(seg_ids, i)
seg_ids_0 = array_ops.gather(seg_ids, 0)
outputs = [
op_func(data_0, indices_i, seg_ids_0, num_segments=num_segments),
op_func(data_i, indices_i, seg_ids_0, num_segments=num_segments),
op_func(data_0, indices_0, seg_ids_0, num_segments=num_segments),
op_func(data_i, indices_0, seg_ids_0, num_segments=num_segments)
]
if with_num_segments:
# For this case, we support loop variant segment_ids as well.
outputs += [
op_func(data_0, indices_i, seg_ids_i, num_segments=num_segments),
op_func(data_i, indices_i, seg_ids_i, num_segments=num_segments),
op_func(data_0, indices_0, seg_ids_i, num_segments=num_segments),
op_func(data_i, indices_0, seg_ids_i, num_segments=num_segments)
]
return outputs
self._test_loop_fn(loop_fn, 3)
@parameterized.parameters(math_ops.sparse_segment_mean_grad,
math_ops.sparse_segment_sqrt_n_grad)
def test_sparse_segment_grad(self, op_func):
grad = random_ops.random_uniform([3, 3, 2])
indices = constant_op.constant([1, 2, 3])
seg_ids = constant_op.constant([0, 0, 2])
dim0 = 4
def loop_fn(i):
grad_i = array_ops.gather(grad, i)
return op_func(grad_i, indices, seg_ids, dim0)
self._test_loop_fn(loop_fn, 3)
def test_cast(self):
x = constant_op.constant([[1], [2]])
y = constant_op.constant([[1.0], [2.0]])
def loop_fn(i):
return (math_ops.cast(array_ops.gather(x, i), dtypes.float32),
math_ops.cast(array_ops.gather(y, i), dtypes.int32))
self._test_loop_fn(loop_fn, 2)
def test_tanh_axpy(self):
a = constant_op.constant(3.)
x = random_ops.random_uniform([4, 5])
y = random_ops.random_uniform([6, 5])
n = x.shape[0]
def loop_fn(i):
return math_ops.tanh(a * array_ops.gather(x, i) + array_ops.gather(y, i))
self._test_loop_fn(loop_fn, n)
def test_select(self):
a = random_ops.random_uniform([2, 3, 5])
b = random_ops.random_uniform([2, 3, 5])
for cond_shape in [2], [2, 3], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_selectv2_cond_needs_broadcast(self):
a = random_ops.random_uniform([2, 3, 5])
b = random_ops.random_uniform([2, 3, 5])
# wherev2 assumes all shapes are broadcastable with each other.
# This means that we can only specify conditions that are
# broadcastable with [3, 5].
for cond_shape in [2], [2, 1], [2, 5], [2, 3, 1], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where_v2(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_selectv2_args_need_broadcast(self):
a = random_ops.random_uniform([2, 5])
b = random_ops.random_uniform([2, 3, 5])
# wherev2 assumes all shapes are broadcastable with each other.
# This means that we can only specify conditions that are
# broadcastable with [3, 5].
for cond_shape in [2], [2, 1], [2, 5], [2, 3, 1], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where_v2(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_selectv2_cond_fixed(self):
cond = random_ops.random_uniform([3, 5]) > 0.5
b = random_ops.random_uniform([2, 3, 5])
# wherev2 assumes all shapes are broadcastable with each other.
# This means that we can only specify conditions that are
# broadcastable with [3, 5].
for a_shape in [2], [2, 1], [2, 5], [2, 3, 1], [2, 3, 5]:
a = random_ops.random_uniform(a_shape)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
return array_ops.where_v2(cond, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
class LinalgTest(PForTestCase):
def test_cholesky(self):
z = random_ops.random_normal([2, 3, 3])
x = (
math_ops.matmul(z, array_ops.matrix_transpose(z)) # Ensure pos. def.
+ linalg_ops.eye(3)) # Ensure well-conditioned.
def loop_fn(i):
return linalg_ops.cholesky(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 2)
def test_log_matrix_determinant(self):
for x_shape in ([3, 4, 2, 2], [3, 2, 2]):
x = random_ops.random_normal(x_shape)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return linalg_ops.log_matrix_determinant(array_ops.gather(x, i))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_matrix_inverse(self):
x = (random_ops.random_uniform([3, 4, 2, 2]) +
10 * linalg_ops.eye(2)) # Ensure well-conditioned.
for adjoint in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return linalg_ops.matrix_inverse(array_ops.gather(x, i),
adjoint=adjoint)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_matrix_solve(self):
for adjoint in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (2, 4, 3, 3) if stack_a else (4, 3, 3)
shape_b = (2, 4, 3, 5) if stack_b else (4, 3, 5)
x = (random_ops.random_uniform(shape_a) +
10 * linalg_ops.eye(3)) # Ensure well-conditioned.
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return linalg_ops.matrix_solve(a, b, adjoint=adjoint)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_matrix_triangular_solve(self):
for lower in (True, False):
for adjoint in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (2, 4, 3, 3) if stack_a else (4, 3, 3)
shape_b = (2, 4, 3, 5) if stack_b else (4, 3, 5)
x = array_ops.matrix_band_part(
random_ops.random_uniform(shape_a) +
linalg_ops.eye(3), # Ensure well-conditioned.
*((-1, 0) if lower else (0, -1))) # Ensure triangular.
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return linalg_ops.matrix_triangular_solve(
a, b, lower=lower, adjoint=adjoint)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_self_adjoint_eig(self):
z = random_ops.random_normal([2, 3, 3])
x = z + array_ops.matrix_transpose(z) # Ensure self-adjoint.
def loop_fn(i):
return (linalg_ops.self_adjoint_eig(array_ops.gather(x, i)),
linalg_ops.self_adjoint_eigvals(array_ops.gather(x, i)))
self._test_loop_fn(loop_fn, 2)
def test_einsum(self):
b = 10
x_series = random_ops.random_uniform([b, 9, 9])
y_series = random_ops.random_uniform([b, 9, 1])
def loop_fn(i):
x = array_ops.gather(x_series, 0) # invariant.
y = array_ops.gather(y_series, 0) # invariant.
x_i = array_ops.gather(x_series, i)
y_i = array_ops.gather(y_series, i)
z1 = special_math_ops.einsum("ab,bc->ac", x_i, y)
z2 = special_math_ops.einsum("ab,bc->ac", x, y_i)
z3 = special_math_ops.einsum("ab,bc->ac", x, y)
z4 = special_math_ops.einsum("ab,bc->ac", x_i, y_i)
z5 = special_math_ops.einsum("cd,ce->de", y_i, x_i) # Includes transpose.
outputs = [z1, z2, z3, z4, z5]
return outputs
self._test_loop_fn(loop_fn, b)
if __name__ == "__main__":
test.main()
|
|
from collections import namedtuple
from functools import wraps
import attr
from fuzzysearch.common import FuzzySearchBase, Match, \
consolidate_overlapping_matches
from fuzzysearch.compat import xrange
from fuzzysearch.search_exact import search_exact
__all__ = [
'find_near_matches_generic',
'find_near_matches_generic_linear_programming',
'find_near_matches_generic_ngrams',
'has_near_match_generic_ngrams',
]
GenericSearchCandidate = namedtuple(
'GenericSearchCandidate',
['start', 'subseq_index', 'l_dist', 'n_subs', 'n_ins', 'n_dels'],
)
def find_near_matches_generic(subsequence, sequence, search_params):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the maximum allowed number of character substitutions
* the maximum allowed number of new characters inserted
* and the maximum allowed number of character deletions
* the total number of substitutions, insertions and deletions
"""
if not subsequence:
raise ValueError('Given subsequence is empty!')
# if the limitations are so strict that only exact matches are allowed,
# use search_exact()
if search_params.max_l_dist == 0:
return [
Match(start_index, start_index + len(subsequence), 0,
matched=sequence[start_index:start_index + len(subsequence)])
for start_index in search_exact(subsequence, sequence)
]
# if the n-gram length would be at least 3, use the n-gram search method
elif len(subsequence) // (search_params.max_l_dist + 1) >= 3:
return find_near_matches_generic_ngrams(subsequence, sequence, search_params)
# use the linear programming search method
else:
return find_near_matches_generic_linear_programming(subsequence, sequence, search_params)
def _find_near_matches_generic_linear_programming(subsequence, sequence, search_params):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the maximum allowed number of character substitutions
* the maximum allowed number of new characters inserted
* and the maximum allowed number of character deletions
* the total number of substitutions, insertions and deletions
"""
if not subsequence:
raise ValueError('Given subsequence is empty!')
max_substitutions, max_insertions, max_deletions, max_l_dist = search_params.unpacked
# optimization: prepare some often used things in advance
subseq_len = len(subsequence)
def make_match(start, end, dist):
return Match(start, end, dist, matched=sequence[start:end])
candidates = []
for index, char in enumerate(sequence):
candidates.append(GenericSearchCandidate(index, 0, 0, 0, 0, 0))
new_candidates = []
for cand in candidates:
# if this sequence char is the candidate's next expected char
if char == subsequence[cand.subseq_index]:
# if reached the end of the subsequence, return a match
if cand.subseq_index + 1 == subseq_len:
yield make_match(cand.start, index + 1, cand.l_dist)
# otherwise, update the candidate's subseq_index and keep it
else:
new_candidates.append(cand._replace(
subseq_index=cand.subseq_index + 1,
))
# if this sequence char is *not* the candidate's next expected char
else:
# we can try skipping a sequence or sub-sequence char (or both),
# unless this candidate has already skipped the maximum allowed
# number of characters
if cand.l_dist == max_l_dist:
continue
if cand.n_ins < max_insertions:
# add a candidate skipping a sequence char
new_candidates.append(cand._replace(
n_ins=cand.n_ins + 1,
l_dist=cand.l_dist + 1,
))
if cand.subseq_index + 1 < subseq_len:
if cand.n_subs < max_substitutions:
# add a candidate skipping both a sequence char and a
# subsequence char
new_candidates.append(cand._replace(
n_subs=cand.n_subs + 1,
subseq_index=cand.subseq_index + 1,
l_dist=cand.l_dist + 1,
))
elif cand.n_dels < max_deletions and cand.n_ins < max_insertions:
# add a candidate skipping both a sequence char and a
# subsequence char
new_candidates.append(cand._replace(
n_ins=cand.n_ins + 1,
n_dels=cand.n_dels + 1,
subseq_index=cand.subseq_index + 1,
l_dist=cand.l_dist + 1,
))
else:
# cand.subseq_index == _subseq_len - 1
if (
cand.n_subs < max_substitutions or
(
cand.n_dels < max_deletions and
cand.n_ins < max_insertions
)
):
yield make_match(cand.start, index + 1, cand.l_dist + 1)
# try skipping subsequence chars
for n_skipped in xrange(1, min(max_deletions - cand.n_dels, max_l_dist - cand.l_dist) + 1):
# if skipping n_dels sub-sequence chars reaches the end
# of the sub-sequence, yield a match
if cand.subseq_index + n_skipped == subseq_len:
yield make_match(cand.start, index,
cand.l_dist + n_skipped)
break
# otherwise, if skipping n_skipped sub-sequence chars
# reaches a sub-sequence char identical to this sequence
# char ...
elif subsequence[cand.subseq_index + n_skipped] == char:
# if this is the last char of the sub-sequence, yield
# a match
if cand.subseq_index + n_skipped + 1 == subseq_len:
yield make_match(cand.start, index,
cand.l_dist + n_skipped)
# otherwise add a candidate skipping n_skipped
# subsequence chars
else:
new_candidates.append(cand._replace(
n_dels=cand.n_dels + n_skipped,
subseq_index=cand.subseq_index + 1 + n_skipped,
l_dist=cand.l_dist + n_skipped,
))
break
# note: if the above loop ends without a break, that means that
# no candidate could be added / yielded by skipping sub-sequence
# chars
candidates = new_candidates
for cand in candidates:
# note: index + 1 == length(sequence)
n_skipped = subseq_len - cand.subseq_index
if cand.n_dels + n_skipped <= max_deletions and \
cand.l_dist + n_skipped <= max_l_dist:
yield make_match(cand.start, index + 1, cand.l_dist + n_skipped)
try:
from fuzzysearch._generic_search import \
c_find_near_matches_generic_linear_programming as c_fnm_generic_lp
except ImportError:
find_near_matches_generic_linear_programming = \
_find_near_matches_generic_linear_programming
else:
@wraps(_find_near_matches_generic_linear_programming)
def find_near_matches_generic_linear_programming(subsequence, sequence, search_params):
try:
for match in c_fnm_generic_lp(subsequence, sequence, search_params):
yield match
except (TypeError, UnicodeEncodeError):
for match in _find_near_matches_generic_linear_programming(
subsequence, sequence, search_params):
yield match
def find_near_matches_generic_ngrams(subsequence, sequence, search_params):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the maximum allowed number of character substitutions
* the maximum allowed number of new characters inserted
* and the maximum allowed number of character deletions
* the total number of substitutions, insertions and deletions
"""
if not subsequence:
raise ValueError('Given subsequence is empty!')
max_l_dist = search_params.max_l_dist
# optimization: prepare some often used things in advance
subseq_len = len(subsequence)
seq_len = len(sequence)
ngram_len = subseq_len // (max_l_dist + 1)
if ngram_len == 0:
raise ValueError('the subsequence length must be greater than max_l_dist')
for ngram_start in xrange(0, subseq_len - ngram_len + 1, ngram_len):
ngram_end = ngram_start + ngram_len
start_index = max(0, ngram_start - max_l_dist)
end_index = min(seq_len, seq_len - subseq_len + ngram_end + max_l_dist)
for index in search_exact(subsequence[ngram_start:ngram_end], sequence, start_index, end_index):
# try to expand left and/or right according to n_ngram
for match in find_near_matches_generic_linear_programming(
subsequence, sequence[max(0, index - ngram_start - max_l_dist):index - ngram_start + subseq_len + max_l_dist],
search_params,
):
yield attr.evolve(match,
start=match.start + max(0, index - ngram_start - max_l_dist),
end=match.end + max(0, index - ngram_start - max_l_dist),
)
def has_near_match_generic_ngrams(subsequence, sequence, search_params):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the maximum allowed number of character substitutions
* the maximum allowed number of new characters inserted
* and the maximum allowed number of character deletions
* the total number of substitutions, insertions and deletions
"""
for match in find_near_matches_generic_ngrams(subsequence, sequence, search_params):
return True
return False
class GenericSearch(FuzzySearchBase):
@classmethod
def search(cls, subsequence, sequence, search_params):
for match in find_near_matches_generic(subsequence, sequence,
search_params):
yield match
@classmethod
def consolidate_matches(cls, matches):
return consolidate_overlapping_matches(matches)
@classmethod
def extra_items_for_chunked_search(cls, subsequence, search_params):
return max(
x for x in [search_params.max_l_dist,
search_params.max_insertions]
if x is not None
)
|
|
#!/usr/bin/python
#
# texture_atlas_builder.py
# CSPong
# Created by Scott Downie on 30/06/2014.
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Tag Games Limited
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import sys
import os
import subprocess
import shutil
import random
import file_system_utils
relative_tool_path = "../../ChilliSource/Tools/CSAtlasBuilder.jar"
temp_dir_prefix = "_temp-textureatlasbuilder-"
#------------------------------------------------------------------------------
# Walks the input directory and packs all pngs in each folder onto an atlas
# for that folder
#
# @author S Downie
#
# @param Input path
# @param Output path
#------------------------------------------------------------------------------
def build(input_path, output_path):
print("-----------------------------------------")
print(" Building atlases")
print("-----------------------------------------")
if(input_path.endswith("/") == False):
input_path = input_path + "/"
if(output_path.endswith("/") == False):
output_path = output_path + "/"
if os.path.exists(output_path) == True:
shutil.rmtree(output_path)
for directory, sub_dirs, file_names in os.walk(input_path):
output_dir = os.path.join(output_path, directory[len(input_path):len(directory)]);
if len(sub_dirs) == 0:
contains_png = False
for file_name in file_names:
if file_system_utils.has_extension(file_name, ".png") == True:
contains_png = True
break
if contains_png == True:
if os.path.exists(output_dir) == False:
os.makedirs(output_dir);
build_altases_in_directory(directory, output_dir, file_names)
print(" ")
#------------------------------------------------------------------------------
# @author Ian Copland
#
# @param A file name.
#
# @return The tags in the file name, or an empty string if there isn't one.
#------------------------------------------------------------------------------
def get_tags_from_file_name(file_name):
if file_name.count('.') > 1:
first = file_name.find(".")
last = file_name.rfind(".")
tags = file_name[first + 1 : last]
return tags.lower()
else:
return ""
#------------------------------------------------------------------------------
# @author Ian Copland
#
# @param A file name.
# @param The tags string
#
# @return Whether or not the file name has the given tags.
#------------------------------------------------------------------------------
def file_name_has_tags(file_name, tags):
if get_tags_from_file_name(file_name) == tags.lower():
return True
return False
#------------------------------------------------------------------------------
# @author Ian Copland
#
# @param A file name.
#
# @return The file name with any tags removed.
#------------------------------------------------------------------------------
def remove_tags_from_file_name(file_name):
if file_name.count('.') > 1:
first = file_name.find(".")
last = file_name.rfind(".")
tagless_file_name = file_name[0 : first] + file_name[last : len(file_name)]
return tagless_file_name
else:
return file_name
#------------------------------------------------------------------------------
# @author Ian Copland
#
# @param A file path.
# @param The tags string.
#
# @return The file path with tags removed.
#------------------------------------------------------------------------------
def add_tags_to_file_path(file_path, tags):
if len(tags) > 0:
period_index = file_path.rfind(".")
tagged_file_path = file_path[0 : period_index] + "." + tags + file_path[period_index : len(file_path)]
return tagged_file_path
else:
return file_path
#------------------------------------------------------------------------------
# @author Ian Copland
#
# @param The path to an atlas.
# @param The tags.
#
# @return The output atlas file path with tags.
#------------------------------------------------------------------------------
def generate_atlas_file_path(output_dir, tags):
atlas_file_path = os.path.join(output_dir, os.path.basename(output_dir) + ".csatlas");
return add_tags_to_file_path(atlas_file_path, tags)
#------------------------------------------------------------------------------
# Builds each of the different atlases in the given directory. Different atlases
# are build for assets with different "resource tags", i.e Image.low.png and
# Image.high.png would end up on different texture atlases.
#
# @author Ian Copland
#
# @param Input path
#
# @return Output name
#------------------------------------------------------------------------------
def build_altases_in_directory(input_dir, output_dir, file_names):
remains = [file_name for file_name in file_names if file_system_utils.has_extension(file_name, ".png")]
while len(remains) > 0:
tags = get_tags_from_file_name(remains[0])
tagged_files = [file_name for file_name in remains if file_name_has_tags(file_name, tags)]
build_atlas_with_tags(input_dir, output_dir, tagged_files, tags);
[remains.remove(file_name) for file_name in tagged_files]
#------------------------------------------------------------------------------
# Builds the given files with the given tags to a texture atlas.
#
# @author Ian Copland
#
# @param The input directory.
# @param The output directory.
# @param The file names to build.
# @param The tags.
#
# @return Output name
#------------------------------------------------------------------------------
def build_atlas_with_tags(input_dir, output_dir, file_names, tags):
temp_dir = temp_dir_prefix + str(random.randint(0, 2147483647))
os.mkdir(temp_dir);
for file_name in file_names:
source_file_name = os.path.join(input_dir, file_name)
dest_file_name = os.path.join(temp_dir, remove_tags_from_file_name(file_name))
shutil.copy2(source_file_name, dest_file_name)
output_file_path = generate_atlas_file_path(output_dir, tags)
build_texture_atlas(temp_dir, output_file_path, tags)
shutil.rmtree(temp_dir)
#------------------------------------------------------------------------------
# Builds a single atlas from the pngs in the given directory.
#
# @author S Downie
#
# @param Input directory path
# @param Output file path
# @param The tags.
#------------------------------------------------------------------------------
def build_texture_atlas(input_file_path, output_file_path, tags):
print(output_file_path)
max_size = 2048;
if tags.lower().count("high"):
max_size = 4096
tool_path = file_system_utils.get_path_from_here(relative_tool_path)
subprocess.call(["java", "-Djava.awt.headless=true", "-Xmx512m", "-jar", tool_path, "--input", input_file_path, "--output", output_file_path, "--maxwidth", str(max_size), "--maxheight", str(max_size), "--padding", "2"]);
#------------------------------------------------------------------------------
# The entry point into the script.
#
# @author S Downie
#
# @param The list of arguments.
#------------------------------------------------------------------------------
def main(args):
if not len(args) is 3:
print("ERROR: Incorrect parameters supplied.")
return
input_path = args[1]
output_path = args[2]
build(input_path, output_path)
if __name__ == "__main__":
main(sys.argv)
|
|
from sita.core.api.viewsets import GenericViewSet
from rest_framework import status
from sita.api.v1.routers import router
from sita.core.api.viewsets.nested import NestedViewset
from rest_framework.permissions import IsAuthenticated
from .serializers import AppointmentSerializer, AppointmentSerializerModel, AppointmentListSerializer
from sita.users.api import UserViewSet
from sita.appointments.models import Appointment
from sita.patients.models import Patient
from sita.users.models import User
from rest_framework.response import Response
from sita.utils.refresh_token import has_permission
from sita.core.api.mixins import base as base_mixins
from django.contrib.auth import get_user_model
from sita.utils.urlresolvers import get_query_params
from rest_framework.decorators import detail_route
from sita.utils.appointmentQuery import construct_query_view_month
from datetime import datetime
from calendar import monthrange
class AppointmentViewSet(
base_mixins.ListModelMixin,
GenericViewSet):
serializer_class = AppointmentSerializerModel
retrieve_serializer_class = AppointmentSerializerModel
partial_update_serializer_class = AppointmentSerializerModel
update_serializer_class = AppointmentSerializerModel
create_serializer_class = AppointmentSerializerModel
permission_classes = (IsAuthenticated, )
def get_queryset(self, user_id=None, patient_id=None, *args, **kwargs):
queryset = Appointment.objects.filter(user_id=user_id, patient_id=patient_id)
print(datetime.now())
queryset = queryset.extra(where=["date_appointment >= '{0}'".format(datetime.now())])
return queryset
def create(self, request, user_pk=None, patient_pk=None, *args, **kwargs):
"""
Add card from user
---
omit_parameters:
- form
parameters:
- name: body
pytype: AppointmentSerializer
paramType: body
description:
'name: <b>required</b> <br>
email: <b>required</b> <br>
mobilePhone: <b>required</b> <br>
lastName:NOT required <br>
mothersName: NOT required <br>
age: NOT required <br>
housePhone: NOT required'
- name: Authorization
description: Bearer {token}.
required: true
type: string
paramType: header
responseMessages:
- code: 201
message: CREATED
- code: 404
message: NOT FOUND
- code: 401
message: UNAUTHORIZED
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
if User.objects.exists_user(pk=user_pk):
user = User.objects.get(id=user_pk)
if Patient.objects.exists(pk=patient_pk):
patient = Patient.objects.get(pk=patient_pk)
if patient.is_active and patient.user_id==user.id:
if has_permission(request.META, user):
serializer = AppointmentSerializer(data=request.data)
if serializer.is_valid():
fields = Appointment().get_fields()
appointment = Appointment.objects.register(
data=request.data, fields=fields, user=user, patient=patient)
if appointment:
return Response(status=status.HTTP_201_CREATED)
else:
return Response({"message": "Exist a date in the same time"},status=422)
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_401_UNAUTHORIZED)
return Response(status=status.HTTP_404_NOT_FOUND)
def list(self, request, user_pk=None, patient_pk=None, *args, **kwards):
"""
Show all cards from user
---
omit_parameters:
- form
parameters:
- name: Authorization
description: Bearer {token}.
required: true
type: string
paramType: header
- name: q
description: Search word.
paramType: query
type: string
responseMessages:
- code: 200
message: OK
- code: 404
message: NOT FOUND
- code: 401
message: UNAUTHORIZED
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
if User.objects.exists_user(pk=user_pk):
user = User.objects.get(id=user_pk)
if Patient.objects.exists(pk=patient_pk):
patient = Patient.objects.get(pk=patient_pk)
if patient.is_active and patient.user_id==user.id:
if has_permission(request.META, user):
return super(
AppointmentViewSet, self).list(
request,
queryset=self.get_queryset(user.id, patient.id),
*args,
**kwards )
return Response(status=status.HTTP_401_UNAUTHORIZED)
return Response(status=status.HTTP_404_NOT_FOUND)
class AppointmentListViewSet(
base_mixins.ListModelMixin,
GenericViewSet):
serializer_class = AppointmentSerializerModel
retrieve_serializer_class = AppointmentSerializerModel
partial_update_serializer_class = AppointmentSerializerModel
update_serializer_class = AppointmentSerializerModel
create_serializer_class = AppointmentSerializerModel
permission_classes = (IsAuthenticated, )
def get_queryset(self, user_id, date_init, date_end, *args, **kwargs):
queryset = Appointment.objects.extra(where=["date_appointment >= '{0}'".format(date_init)])
queryset = queryset.extra(where=["date_appointment <= '{0}'".format(date_end)])
queryset = queryset.order_by("date_appointment")
queryset = queryset.filter(user_id=user_id)
return queryset
def list(self, request, user_pk=None, *args, **kwargs):
"""
Show all cards from user
---
omit_parameters:
- form
parameters:
- name: Authorization
description: Bearer {token}.
required: true
type: string
paramType: header
- name: date_init
description: Search word.
paramType: query
type: datetime
required: true
- name: date_end
description: Search word.
paramType: query
type: datetime
required: true
responseMessages:
- code: 200
message: OK
- code: 404
message: NOT FOUND
- code: 401
message: UNAUTHORIZED
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
if User.objects.exists_user(pk=user_pk):
user = User.objects.get(id=user_pk)
if has_permission(request.META, user):
query_params = get_query_params(request)
try:
date_init = datetime.strptime(query_params.get("date_init"), "%Y-%m-%dT%H:%M:%S")
except ValueError:
return Response({"date_init": "Is not valid date"},status=status.HTTP_400_BAD_REQUEST)
try:
date_end = datetime.strptime(query_params.get("date_end"), "%Y-%m-%dT%H:%M:%S")
except ValueError:
return Response({"date_end": "Is not valid date"},status=status.HTTP_400_BAD_REQUEST)
serializer = AppointmentListSerializer()
query = self.get_queryset(user.id, date_init, date_end)
data = serializer.serialize(
query,
fields=("subject", "date_appointment", "user", "patient", "duration_hours", "time_zone"))
# return super(
# AppointmentListViewSet, self).list(
# request,
# queryset=self.get_queryset(user.id, date_init, date_end),
# *args,
# **kwargs )
return Response({"data":data},status=status.HTTP_200_OK)
return Response(status=status.HTTP_401_UNAUTHORIZED)
return Response(status=status.HTTP_404_NOT_FOUND)
router.register_nested(
r'patients',
r'appointments',
AppointmentViewSet,
parent_lookup_name='patient',
base_name='appointment',
depth_level=2
)
router.register_nested(
r'users',
r'appointments',
AppointmentListViewSet,
parent_lookup_name='user',
base_name='appointments'
)
|
|
from __future__ import unicode_literals
import functools
import sys
import tablib
import traceback
from copy import deepcopy
from diff_match_patch import diff_match_patch
from django import VERSION
from django.conf import settings
from django.core.management.color import no_style
from django.db import connections, transaction, DEFAULT_DB_ALIAS
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query import QuerySet
from django.db.transaction import TransactionManagementError
from django.utils import six
from django.utils.safestring import mark_safe
from . import widgets
from .fields import Field
from .instance_loaders import ModelInstanceLoader
from .results import Error, Result, RowResult
try:
from django.db.transaction import atomic, savepoint, savepoint_rollback, savepoint_commit # noqa
except ImportError:
from .django_compat import atomic, savepoint, savepoint_rollback, savepoint_commit # noqa
if VERSION < (1, 8):
from django.db.models.related import RelatedObject
ForeignObjectRel = RelatedObject
else:
from django.contrib.postgres.fields import ArrayField
from django.db.models.fields.related import ForeignObjectRel
RelatedObject = None
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
# Set default logging handler to avoid "No handler found" warnings.
import logging # isort:skip
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
USE_TRANSACTIONS = getattr(settings, 'IMPORT_EXPORT_USE_TRANSACTIONS', False)
class ResourceOptions(object):
"""
The inner Meta class allows for class-level configuration of how the
Resource should behave. The following options are available:
"""
model = None
"""
Django Model class. It is used to introspect available
fields.
"""
fields = None
"""
Controls what introspected fields the Resource should include. A whitelist
of fields.
"""
exclude = None
"""
Controls what introspected fields the Resource should
NOT include. A blacklist of fields.
"""
instance_loader_class = None
"""
Controls which class instance will take
care of loading existing objects.
"""
import_id_fields = ['id']
"""
Controls which object fields will be used to
identify existing instances.
"""
export_order = None
"""
Controls export order for columns.
"""
widgets = None
"""
This dictionary defines widget kwargs for fields.
"""
use_transactions = None
"""
Controls if import should use database transactions. Default value is
``None`` meaning ``settings.IMPORT_EXPORT_USE_TRANSACTIONS`` will be
evaluated.
"""
skip_unchanged = False
"""
Controls if the import should skip unchanged records. Default value is
False
"""
report_skipped = True
"""
Controls if the result reports skipped rows Default value is True
"""
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
declared_fields = []
meta = ResourceOptions()
# If this class is subclassing another Resource, add that Resource's
# fields. Note that we loop over the bases in *reverse*. This is
# necessary in order to preserve the correct order of fields.
for base in bases[::-1]:
if hasattr(base, 'fields'):
declared_fields = list(six.iteritems(base.fields)) + declared_fields
# Collect the Meta options
options = getattr(base, 'Meta', None)
for option in [option for option in dir(options)
if not option.startswith('_')]:
setattr(meta, option, getattr(options, option))
# Add direct fields
for field_name, obj in attrs.copy().items():
if isinstance(obj, Field):
field = attrs.pop(field_name)
if not field.column_name:
field.column_name = field_name
declared_fields.append((field_name, field))
attrs['fields'] = OrderedDict(declared_fields)
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name,
bases, attrs)
# Add direct options
options = getattr(new_class, 'Meta', None)
for option in [option for option in dir(options)
if not option.startswith('_')]:
setattr(meta, option, getattr(options, option))
new_class._meta = meta
return new_class
class Resource(six.with_metaclass(DeclarativeMetaclass)):
"""
Resource defines how objects are mapped to their import and export
representations and handle importing and exporting data.
"""
@classmethod
def get_result_class(self):
"""
Returns the class used to store the result of an import.
"""
return Result
@classmethod
def get_row_result_class(self):
"""
Returns the class used to store the result of a row import.
"""
return RowResult
@classmethod
def get_error_result_class(self):
"""
Returns the class used to store an error resulting from an import.
"""
return Error
def get_use_transactions(self):
if self._meta.use_transactions is None:
return USE_TRANSACTIONS
else:
return self._meta.use_transactions
def get_fields(self):
"""
Returns fields sorted according to
:attr:`~import_export.resources.ResourceOptions.export_order`.
"""
return [self.fields[f] for f in self.get_export_order()]
@classmethod
def get_field_name(cls, field):
"""
Returns the field name for a given field.
"""
for field_name, f in cls.fields.items():
if f == field:
return field_name
raise AttributeError("Field %s does not exists in %s resource" % (
field, cls))
def init_instance(self, row=None):
raise NotImplementedError()
def get_instance(self, instance_loader, row):
"""
Calls the :doc:`InstanceLoader <api_instance_loaders>`.
"""
return instance_loader.get_instance(row)
def get_or_init_instance(self, instance_loader, row):
"""
Either fetches an already existing instance or initializes a new one.
"""
instance = self.get_instance(instance_loader, row)
if instance:
return (instance, False)
else:
return (self.init_instance(row), True)
def save_instance(self, instance, dry_run=False):
"""
Takes care of saving the object to the database.
Keep in mind that this is done by calling ``instance.save()``, so
objects are not created in bulk!
"""
self.before_save_instance(instance, dry_run)
if not dry_run:
instance.save()
self.after_save_instance(instance, dry_run)
def before_save_instance(self, instance, dry_run):
"""
Override to add additional logic. Does nothing by default.
"""
pass
def after_save_instance(self, instance, dry_run):
"""
Override to add additional logic. Does nothing by default.
"""
pass
def delete_instance(self, instance, dry_run=False):
"""
Calls :meth:`instance.delete` as long as ``dry_run`` is not set.
"""
self.before_delete_instance(instance, dry_run)
if not dry_run:
instance.delete()
self.after_delete_instance(instance, dry_run)
def before_delete_instance(self, instance, dry_run):
"""
Override to add additional logic. Does nothing by default.
"""
pass
def after_delete_instance(self, instance, dry_run):
"""
Override to add additional logic. Does nothing by default.
"""
pass
def import_field(self, field, obj, data):
"""
Calls :meth:`import_export.fields.Field.save` if ``Field.attribute``
and ``Field.column_name`` are found in ``data``.
"""
if field.attribute and field.column_name in data:
field.save(obj, data)
def import_obj(self, obj, data, dry_run):
"""
Traverses every field in this Resource and calls
:meth:`~import_export.resources.Resource.import_field`.
"""
for field in self.get_fields():
if isinstance(field.widget, widgets.ManyToManyWidget):
continue
self.import_field(field, obj, data)
def save_m2m(self, obj, data, dry_run):
"""
Saves m2m fields.
Model instance need to have a primary key value before
a many-to-many relationship can be used.
"""
if not dry_run:
for field in self.get_fields():
if not isinstance(field.widget, widgets.ManyToManyWidget):
continue
self.import_field(field, obj, data)
def for_delete(self, row, instance):
"""
Returns ``True`` if ``row`` importing should delete instance.
Default implementation returns ``False``.
Override this method to handle deletion.
"""
return False
def skip_row(self, instance, original):
"""
Returns ``True`` if ``row`` importing should be skipped.
Default implementation returns ``False`` unless skip_unchanged == True.
Override this method to handle skipping rows meeting certain
conditions.
"""
if not self._meta.skip_unchanged:
return False
for field in self.get_fields():
try:
# For fields that are models.fields.related.ManyRelatedManager
# we need to compare the results
if list(field.get_value(instance).all()) != list(field.get_value(original).all()):
return False
except AttributeError:
if field.get_value(instance) != field.get_value(original):
return False
return True
def get_diff(self, original, current, dry_run=False):
"""
Get diff between original and current object when ``import_data``
is run.
``dry_run`` allows handling special cases when object is not saved
to database (ie. m2m relationships).
"""
data = []
dmp = diff_match_patch()
for field in self.get_fields():
v1 = self.export_field(field, original) if original else ""
v2 = self.export_field(field, current) if current else ""
diff = dmp.diff_main(force_text(v1), force_text(v2))
dmp.diff_cleanupSemantic(diff)
html = dmp.diff_prettyHtml(diff)
html = mark_safe(html)
data.append(html)
return data
def get_diff_headers(self):
"""
Diff representation headers.
"""
return self.get_export_headers()
def before_import(self, dataset, dry_run, **kwargs):
"""
Override to add additional logic. Does nothing by default.
This method receives the ``dataset`` that's going to be imported, the
``dry_run`` parameter which determines whether changes are saved to
the database, and any additional keyword arguments passed to
``import_data`` in a ``kwargs`` dict.
"""
return dataset
def after_import(self, dataset, result, dry_run, **kwargs):
"""
Override to add additional logic. Does nothing by default.
This method receives the ``dataset`` that's just been imported, the
``result`` of the import and the ``dry_run`` parameter which determines
whether changes will be saved to the database, and any additional
keyword arguments passed to ``import_data`` in a ``kwargs`` dict. This
method runs after the main import finishes but before the changes are
committed or rolled back.
"""
pass
def import_row(self, row, instance_loader, dry_run=False, **kwargs):
"""
Imports data from ``tablib.Dataset``. Refer to :doc:`import_workflow`
for a more complete description of the whole import process.
:param row: A ``dict`` of the row to import
:param instance_loader: The instance loader to be used to load the row
:param dry_run: If ``dry_run`` is set, or error occurs, transaction
will be rolled back.
"""
try:
row_result = self.get_row_result_class()()
instance, new = self.get_or_init_instance(instance_loader, row)
if new:
row_result.import_type = RowResult.IMPORT_TYPE_NEW
else:
row_result.import_type = RowResult.IMPORT_TYPE_UPDATE
row_result.new_record = new
row_result.object_repr = force_text(instance)
row_result.object_id = instance.pk
original = deepcopy(instance)
if self.for_delete(row, instance):
if new:
row_result.import_type = RowResult.IMPORT_TYPE_SKIP
row_result.diff = self.get_diff(None, None, dry_run)
else:
row_result.import_type = RowResult.IMPORT_TYPE_DELETE
self.delete_instance(instance, dry_run)
row_result.diff = self.get_diff(original, None, dry_run)
else:
self.import_obj(instance, row, dry_run)
if self.skip_row(instance, original):
row_result.import_type = RowResult.IMPORT_TYPE_SKIP
else:
with transaction.atomic():
self.save_instance(instance, dry_run)
self.save_m2m(instance, row, dry_run)
# Add object info to RowResult for LogEntry
row_result.object_repr = force_text(instance)
row_result.object_id = instance.pk
row_result.diff = self.get_diff(original, instance, dry_run)
except Exception as e:
# There is no point logging a transaction error for each row
# when only the original error is likely to be relevant
if not isinstance(e, TransactionManagementError):
logging.exception(e)
tb_info = traceback.format_exc()
row_result.errors.append(self.get_error_result_class()(e, tb_info, row))
return row_result
@atomic()
def import_data(self, dataset, dry_run=False, raise_errors=False,
use_transactions=None, **kwargs):
"""
Imports data from ``tablib.Dataset``. Refer to :doc:`import_workflow`
for a more complete description of the whole import process.
:param dataset: A ``tablib.Dataset``
:param raise_errors: Whether errors should be printed to the end user
or raised regularly.
:param use_transactions: If ``True`` import process will be processed
inside transaction.
:param dry_run: If ``dry_run`` is set, or error occurs, transaction
will be rolled back.
"""
result = self.get_result_class()()
result.diff_headers = self.get_diff_headers()
result.totals = OrderedDict([(RowResult.IMPORT_TYPE_NEW, 0),
(RowResult.IMPORT_TYPE_UPDATE, 0),
(RowResult.IMPORT_TYPE_DELETE, 0),
(RowResult.IMPORT_TYPE_SKIP, 0),
(RowResult.IMPORT_TYPE_ERROR, 0),
('total', len(dataset))])
if use_transactions is None:
use_transactions = self.get_use_transactions()
if use_transactions is True:
# when transactions are used we want to create/update/delete object
# as transaction will be rolled back if dry_run is set
real_dry_run = False
sp1 = savepoint()
else:
real_dry_run = dry_run
try:
dataset = self.before_import(dataset, real_dry_run, **kwargs)
except Exception as e:
logging.exception(e)
tb_info = traceback.format_exc()
result.base_errors.append(self.get_error_result_class()(e, tb_info))
if raise_errors:
if use_transactions:
savepoint_rollback(sp1)
raise
instance_loader = self._meta.instance_loader_class(self, dataset)
# Update the total in case the dataset was altered by before_import()
result.totals['total'] = len(dataset)
for row in dataset.dict:
row_result = self.import_row(row, instance_loader, real_dry_run, **kwargs)
if row_result.errors:
result.totals[row_result.IMPORT_TYPE_ERROR] += 1
if raise_errors:
if use_transactions:
savepoint_rollback(sp1)
raise row_result.errors[-1].error
else:
result.totals[row_result.import_type] += 1
if (row_result.import_type != RowResult.IMPORT_TYPE_SKIP or
self._meta.report_skipped):
result.rows.append(row_result)
try:
self.after_import(dataset, result, real_dry_run, **kwargs)
except Exception as e:
logging.exception(e)
tb_info = traceback.format_exc()
result.base_errors.append(self.get_error_result_class()(e, tb_info))
if raise_errors:
if use_transactions:
savepoint_rollback(sp1)
raise
if use_transactions:
if dry_run or result.has_errors():
savepoint_rollback(sp1)
else:
savepoint_commit(sp1)
return result
def get_export_order(self):
order = tuple(self._meta.export_order or ())
return order + tuple(k for k in self.fields.keys() if k not in order)
def export_field(self, field, obj):
field_name = self.get_field_name(field)
method = getattr(self, 'dehydrate_%s' % field_name, None)
if method is not None:
return method(obj)
return field.export(obj)
def export_resource(self, obj):
return [self.export_field(field, obj) for field in self.get_fields()]
def get_export_headers(self):
headers = [
force_text(field.column_name) for field in self.get_fields()]
return headers
def get_user_visible_fields(self):
return self.get_fields()
def export(self, queryset=None):
"""
Exports a resource.
"""
if queryset is None:
queryset = self.get_queryset()
headers = self.get_export_headers()
data = tablib.Dataset(headers=headers)
if isinstance(queryset, QuerySet):
# Iterate without the queryset cache, to avoid wasting memory when
# exporting large datasets.
iterable = queryset.iterator()
else:
iterable = queryset
for obj in iterable:
data.append(self.export_resource(obj))
return data
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
new_class = super(ModelDeclarativeMetaclass,
cls).__new__(cls, name, bases, attrs)
opts = new_class._meta
if not opts.instance_loader_class:
opts.instance_loader_class = ModelInstanceLoader
if opts.model:
model_opts = opts.model._meta
declared_fields = new_class.fields
field_list = []
for f in sorted(model_opts.fields + model_opts.many_to_many):
if opts.fields is not None and not f.name in opts.fields:
continue
if opts.exclude and f.name in opts.exclude:
continue
if f.name in declared_fields:
continue
field = new_class.field_from_django_field(f.name, f,
readonly=False)
field_list.append((f.name, field, ))
new_class.fields.update(OrderedDict(field_list))
# add fields that follow relationships
if opts.fields is not None:
field_list = []
for field_name in opts.fields:
if field_name in declared_fields:
continue
if field_name.find('__') == -1:
continue
model = opts.model
attrs = field_name.split('__')
for i, attr in enumerate(attrs):
verbose_path = ".".join([opts.model.__name__] + attrs[0:i+1])
try:
if VERSION >= (1, 8):
f = model._meta.get_field(attr)
else:
f = model._meta.get_field_by_name(attr)[0]
except FieldDoesNotExist as e:
logging.exception(e)
raise FieldDoesNotExist(
"%s: %s has no field named '%s'" %
(verbose_path, model.__name__, attr))
if i < len(attrs) - 1:
# We're not at the last attribute yet, so check
# that we're looking at a relation, and move on to
# the next model.
if isinstance(f, ForeignObjectRel):
if RelatedObject is None:
model = f.related_model
else:
# Django < 1.8
model = f.model
else:
if f.rel is None:
raise KeyError(
'%s is not a relation' % verbose_path)
model = f.rel.to
if isinstance(f, ForeignObjectRel):
f = f.field
field = new_class.field_from_django_field(field_name, f,
readonly=True)
field_list.append((field_name, field))
new_class.fields.update(OrderedDict(field_list))
return new_class
class ModelResource(six.with_metaclass(ModelDeclarativeMetaclass, Resource)):
"""
ModelResource is Resource subclass for handling Django models.
"""
@classmethod
def widget_from_django_field(cls, f, default=widgets.Widget):
"""
Returns the widget that would likely be associated with each
Django type.
"""
result = default
internal_type = f.get_internal_type()
if internal_type in ('ManyToManyField', ):
result = functools.partial(widgets.ManyToManyWidget,
model=f.rel.to)
if internal_type in ('ForeignKey', 'OneToOneField', ):
result = functools.partial(widgets.ForeignKeyWidget,
model=f.rel.to)
if internal_type in ('DecimalField', ):
result = widgets.DecimalWidget
if internal_type in ('DateTimeField', ):
result = widgets.DateTimeWidget
elif internal_type in ('DateField', ):
result = widgets.DateWidget
elif internal_type in ('TimeField', ):
result = widgets.TimeWidget
elif internal_type in ('FloatField',):
result = widgets.FloatWidget
elif internal_type in ('IntegerField', 'PositiveIntegerField',
'BigIntegerField', 'PositiveSmallIntegerField',
'SmallIntegerField', 'AutoField'):
result = widgets.IntegerWidget
elif internal_type in ('BooleanField', 'NullBooleanField'):
result = widgets.BooleanWidget
elif VERSION >= (1, 8):
if type(f) == ArrayField:
return widgets.SimpleArrayWidget
return result
@classmethod
def widget_kwargs_for_field(self, field_name):
"""
Returns widget kwargs for given field_name.
"""
if self._meta.widgets:
return self._meta.widgets.get(field_name, {})
return {}
@classmethod
def field_from_django_field(self, field_name, django_field, readonly):
"""
Returns a Resource Field instance for the given Django model field.
"""
FieldWidget = self.widget_from_django_field(django_field)
widget_kwargs = self.widget_kwargs_for_field(field_name)
field = Field(
attribute=field_name,
column_name=field_name,
widget=FieldWidget(**widget_kwargs),
readonly=readonly,
default=django_field.default,
)
return field
def get_import_id_fields(self):
"""
"""
return self._meta.import_id_fields
def get_queryset(self):
"""
Returns a queryset of all objects for this model. Override this if you
want to limit the returned queryset.
"""
return self._meta.model.objects.all()
def init_instance(self, row=None):
"""
Initializes a new Django model.
"""
return self._meta.model()
def after_import(self, dataset, result, dry_run, **kwargs):
"""
Reset the SQL sequences after new objects are imported
"""
# Adapted from django's loaddata
if not dry_run and any(r.import_type == RowResult.IMPORT_TYPE_NEW for r in result.rows):
connection = connections[DEFAULT_DB_ALIAS]
sequence_sql = connection.ops.sequence_reset_sql(no_style(), [self._meta.model])
if sequence_sql:
cursor = connection.cursor()
try:
for line in sequence_sql:
cursor.execute(line)
finally:
cursor.close()
def modelresource_factory(model, resource_class=ModelResource):
"""
Factory for creating ``ModelResource`` class for given Django model.
"""
attrs = {'model': model}
Meta = type(str('Meta'), (object,), attrs)
class_name = model.__name__ + str('Resource')
class_attrs = {
'Meta': Meta,
}
metaclass = ModelDeclarativeMetaclass
return metaclass(class_name, (resource_class,), class_attrs)
|
|
# $Id: io.py 8129 2017-06-27 14:55:22Z grubert $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
I/O classes provide a uniform API for low-level input and output. Subclasses
exist for a variety of input/output mechanisms.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import codecs
from docutils import TransformSpec
from docutils._compat import b
from docutils.utils.error_reporting import locale_encoding, ErrorString, ErrorOutput
class InputError(IOError): pass
class OutputError(IOError): pass
def check_encoding(stream, encoding):
"""Test, whether the encoding of `stream` matches `encoding`.
Returns
:None: if `encoding` or `stream.encoding` are not a valid encoding
argument (e.g. ``None``) or `stream.encoding is missing.
:True: if the encoding argument resolves to the same value as `encoding`,
:False: if the encodings differ.
"""
try:
return codecs.lookup(stream.encoding) == codecs.lookup(encoding)
except (LookupError, AttributeError, TypeError):
return None
class Input(TransformSpec):
"""
Abstract base class for input wrappers.
"""
component_type = 'input'
default_source_path = None
def __init__(self, source=None, source_path=None, encoding=None,
error_handler='strict'):
self.encoding = encoding
"""Text encoding for the input source."""
self.error_handler = error_handler
"""Text decoding error handler."""
self.source = source
"""The source of input data."""
self.source_path = source_path
"""A text reference to the source."""
if not source_path:
self.source_path = self.default_source_path
self.successful_encoding = None
"""The encoding that successfully decoded the source data."""
def __repr__(self):
return '%s: source=%r, source_path=%r' % (self.__class__, self.source,
self.source_path)
def read(self):
raise NotImplementedError
def decode(self, data):
"""
Decode a string, `data`, heuristically.
Raise UnicodeError if unsuccessful.
The client application should call ``locale.setlocale`` at the
beginning of processing::
locale.setlocale(locale.LC_ALL, '')
"""
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, str), (
'input encoding is "unicode" '
'but input is not a unicode object')
if isinstance(data, str):
# Accept unicode even if self.encoding != 'unicode'.
return data
if self.encoding:
# We believe the user/application when the encoding is
# explicitly given.
encodings = [self.encoding]
else:
data_encoding = self.determine_encoding_from_data(data)
if data_encoding:
# If the data declares its encoding (explicitly or via a BOM),
# we believe it.
encodings = [data_encoding]
else:
# Apply heuristics only if no encoding is explicitly given and
# no BOM found. Start with UTF-8, because that only matches
# data that *IS* UTF-8:
encodings = ['utf-8', 'latin-1']
if locale_encoding:
encodings.insert(1, locale_encoding)
for enc in encodings:
try:
decoded = str(data, enc, self.error_handler)
self.successful_encoding = enc
# Return decoded, removing BOMs.
return decoded.replace('\ufeff', '')
except (UnicodeError, LookupError) as err:
error = err # in Python 3, the <exception instance> is
# local to the except clause
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: '
'%s.\n(%s)' % (', '.join([repr(enc) for enc in encodings]),
ErrorString(error)))
coding_slug = re.compile(b(r"coding[:=]\s*([-\w.]+)"))
"""Encoding declaration pattern."""
byte_order_marks = ((codecs.BOM_UTF8, 'utf-8'), # 'utf-8-sig' new in v2.5
(codecs.BOM_UTF16_BE, 'utf-16-be'),
(codecs.BOM_UTF16_LE, 'utf-16-le'),)
"""Sequence of (start_bytes, encoding) tuples for encoding detection.
The first bytes of input data are checked against the start_bytes strings.
A match indicates the given encoding."""
def determine_encoding_from_data(self, data):
"""
Try to determine the encoding of `data` by looking *in* `data`.
Check for a byte order mark (BOM) or an encoding declaration.
"""
# check for a byte order mark:
for start_bytes, encoding in self.byte_order_marks:
if data.startswith(start_bytes):
return encoding
# check for an encoding declaration pattern in first 2 lines of file:
for line in data.splitlines()[:2]:
match = self.coding_slug.search(line)
if match:
return match.group(1).decode('ascii')
return None
class Output(TransformSpec):
"""
Abstract base class for output wrappers.
"""
component_type = 'output'
default_destination_path = None
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict'):
self.encoding = encoding
"""Text encoding for the output destination."""
self.error_handler = error_handler or 'strict'
"""Text encoding error handler."""
self.destination = destination
"""The destination for output data."""
self.destination_path = destination_path
"""A text reference to the destination."""
if not destination_path:
self.destination_path = self.default_destination_path
def __repr__(self):
return ('%s: destination=%r, destination_path=%r'
% (self.__class__, self.destination, self.destination_path))
def write(self, data):
"""`data` is a Unicode string, to be encoded by `self.encode`."""
raise NotImplementedError
def encode(self, data):
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, str), (
'the encoding given is "unicode" but the output is not '
'a Unicode string')
return data
if not isinstance(data, str):
# Non-unicode (e.g. bytes) output.
return data
else:
return data.encode(self.encoding, self.error_handler)
class FileInput(Input):
"""
Input for single, simple file-like objects.
"""
def __init__(self, source=None, source_path=None,
encoding=None, error_handler='strict',
autoclose=True, mode='rU', **kwargs):
"""
:Parameters:
- `source`: either a file-like object (which is read directly), or
`None` (which implies `sys.stdin` if no `source_path` given).
- `source_path`: a path to a file, which is opened and then read.
- `encoding`: the expected text encoding of the input file.
- `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after read (except when
`sys.stdin` is the source).
- `mode`: how the file is to be opened (see standard function
`open`). The default 'rU' provides universal newline support
for text files.
"""
Input.__init__(self, source, source_path, encoding, error_handler)
self.autoclose = autoclose
self._stderr = ErrorOutput()
# deprecation warning
for key in kwargs:
if key == 'handle_io_errors':
sys.stderr.write('deprecation warning: '
'io.FileInput() argument `handle_io_errors` '
'is ignored since "Docutils 0.10 (2012-12-16)" '
'and will soon be removed.')
else:
raise TypeError('__init__() got an unexpected keyword '
"argument '%s'" % key)
if source is None:
if source_path:
# Specify encoding in Python 3
if sys.version_info >= (3,0):
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.source = open(source_path, mode, **kwargs)
except IOError as error:
raise InputError(error.errno, error.strerror, source_path)
else:
self.source = sys.stdin
elif (sys.version_info >= (3,0) and
check_encoding(self.source, self.encoding) is False):
# TODO: re-open, warn or raise error?
raise UnicodeError('Encoding clash: encoding given is "%s" '
'but source is opened with encoding "%s".' %
(self.encoding, self.source.encoding))
if not source_path:
try:
self.source_path = self.source.name
except AttributeError:
pass
def read(self):
"""
Read and decode a single file and return the data (Unicode string).
"""
try: # In Python < 2.5, try...except has to be nested in try...finally.
try:
if self.source is sys.stdin and sys.version_info >= (3,0):
# read as binary data to circumvent auto-decoding
data = self.source.buffer.read()
# normalize newlines
data = b('\n').join(data.splitlines()) + b('\n')
else:
data = self.source.read()
except (UnicodeError, LookupError) as err: # (in Py3k read() decodes)
if not self.encoding and self.source_path:
# re-read in binary mode and decode with heuristics
b_source = open(self.source_path, 'rb')
data = b_source.read()
b_source.close()
# normalize newlines
data = b('\n').join(data.splitlines()) + b('\n')
else:
raise
finally:
if self.autoclose:
self.close()
return self.decode(data)
def readlines(self):
"""
Return lines of a single file as list of Unicode strings.
"""
return self.read().splitlines(True)
def close(self):
if self.source is not sys.stdin:
self.source.close()
class FileOutput(Output):
"""
Output for single, simple file-like objects.
"""
mode = 'w'
"""The mode argument for `open()`."""
# 'wb' for binary (e.g. OpenOffice) files (see also `BinaryFileOutput`).
# (Do not use binary mode ('wb') for text files, as this prevents the
# conversion of newlines to the system specific default.)
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict', autoclose=True,
handle_io_errors=None, mode=None):
"""
:Parameters:
- `destination`: either a file-like object (which is written
directly) or `None` (which implies `sys.stdout` if no
`destination_path` given).
- `destination_path`: a path to a file, which is opened and then
written.
- `encoding`: the text encoding of the output file.
- `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after write (except when
`sys.stdout` or `sys.stderr` is the destination).
- `handle_io_errors`: ignored, deprecated, will be removed.
- `mode`: how the file is to be opened (see standard function
`open`). The default is 'w', providing universal newline
support for text files.
"""
Output.__init__(self, destination, destination_path,
encoding, error_handler)
self.opened = True
self.autoclose = autoclose
if mode is not None:
self.mode = mode
self._stderr = ErrorOutput()
if destination is None:
if destination_path:
self.opened = False
else:
self.destination = sys.stdout
elif (# destination is file-type object -> check mode:
mode and hasattr(self.destination, 'mode')
and mode != self.destination.mode):
print(('Warning: Destination mode "%s" '
'differs from specified mode "%s"' %
(self.destination.mode, mode)), file=self._stderr)
if not destination_path:
try:
self.destination_path = self.destination.name
except AttributeError:
pass
def open(self):
# Specify encoding in Python 3.
if sys.version_info >= (3,0) and 'b' not in self.mode:
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.destination = open(self.destination_path, self.mode, **kwargs)
except IOError as error:
raise OutputError(error.errno, error.strerror,
self.destination_path)
self.opened = True
def write(self, data):
"""Encode `data`, write it to a single file, and return it.
With Python 3 or binary output mode, `data` is returned unchanged,
except when specified encoding and output encoding differ.
"""
if not self.opened:
self.open()
if ('b' not in self.mode and sys.version_info < (3,0)
or check_encoding(self.destination, self.encoding) is False
):
data = self.encode(data)
if sys.version_info >= (3,0) and os.linesep != '\n':
data = data.replace(b('\n'), b(os.linesep)) # fix endings
try: # In Python < 2.5, try...except has to be nested in try...finally.
try:
self.destination.write(data)
except TypeError as e:
if sys.version_info >= (3,0) and isinstance(data, bytes):
try:
self.destination.buffer.write(data)
except AttributeError:
if check_encoding(self.destination,
self.encoding) is False:
raise ValueError('Encoding of %s (%s) differs \n'
' from specified encoding (%s)' %
(self.destination_path or 'destination',
self.destination.encoding, self.encoding))
else:
raise e
except (UnicodeError, LookupError) as err:
raise UnicodeError(
'Unable to encode output data. output-encoding is: '
'%s.\n(%s)' % (self.encoding, ErrorString(err)))
finally:
if self.autoclose:
self.close()
return data
def close(self):
if self.destination not in (sys.stdout, sys.stderr):
self.destination.close()
self.opened = False
class BinaryFileOutput(FileOutput):
"""
A version of docutils.io.FileOutput which writes to a binary file.
"""
# Used by core.publish_cmdline_to_binary() which in turn is used by
# rst2odt (OpenOffice writer)
mode = 'wb'
class StringInput(Input):
"""
Direct string input.
"""
default_source_path = '<string>'
def read(self):
"""Decode and return the source string."""
return self.decode(self.source)
class StringOutput(Output):
"""
Direct string output.
"""
default_destination_path = '<string>'
def write(self, data):
"""Encode `data`, store it in `self.destination`, and return it."""
self.destination = self.encode(data)
return self.destination
class NullInput(Input):
"""
Degenerate input: read nothing.
"""
default_source_path = 'null input'
def read(self):
"""Return a null string."""
return ''
class NullOutput(Output):
"""
Degenerate output: write nothing.
"""
default_destination_path = 'null output'
def write(self, data):
"""Do nothing ([don't even] send data to the bit bucket)."""
pass
class DocTreeInput(Input):
"""
Adapter for document tree input.
The document tree must be passed in the ``source`` parameter.
"""
default_source_path = 'doctree input'
def read(self):
"""Return the document tree."""
return self.source
|
|
from __future__ import unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.contenttypes.admin import GenericStackedInline
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, ignore_warnings, override_settings
from .models import Album, Book, City, Influence, Song, State, TwoAlbumFKAndAnE
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(None, {
'fields': ('name',),
}),
)
class MyAdmin(admin.ModelAdmin):
@classmethod
def check(cls, model, **kwargs):
return ['error!']
@override_settings(
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes', 'admin_checks']
)
class SystemChecksTestCase(TestCase):
@override_settings(DEBUG=True)
def test_checks_are_performed(self):
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
admin.sites.system_check_errors = []
@override_settings(DEBUG=True)
def test_custom_adminsite(self):
class CustomAdminSite(admin.AdminSite):
pass
custom_site = CustomAdminSite()
custom_site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
custom_site.unregister(Song)
admin.sites.system_check_errors = []
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin."),
hint=None,
obj=SongAdmin,
id='admin.E125',
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
errors = ValidFields.check(model=Song)
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
"""
Ensure that the fieldsets checks are skipped when the ModelAdmin.get_form() method
is overridden.
Refs #19445.
"""
errors = ValidFormFieldsets.check(model=Song)
self.assertEqual(errors, [])
def test_fieldsets_fields_non_tuple(self):
"""
Tests for a tuple/list within fieldsets[1]['fields'].
"""
class NotATupleAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": "title" # not a tuple
}),
]
errors = NotATupleAdmin.check(model=Song)
expected = [
checks.Error(
"The value of 'fieldsets[1]['fields']' must be a list or tuple.",
hint=None,
obj=NotATupleAdmin,
id='admin.E008',
)
]
self.assertEqual(errors, expected)
def test_nonfirst_fieldset(self):
"""
Tests for a tuple/list within the second fieldsets[2]['fields'].
"""
class NotATupleAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
"fields": ("title",)
}),
('foo', {
"fields": "author" # not a tuple
}),
]
errors = NotATupleAdmin.check(model=Song)
expected = [
checks.Error(
"The value of 'fieldsets[1]['fields']' must be a list or tuple.",
hint=None,
obj=NotATupleAdmin,
id='admin.E008',
)
]
self.assertEqual(errors, expected)
def test_exclude_values(self):
"""
Tests for basic system checks of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = 'foo'
errors = ExcludedFields1.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFields1,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
errors = ExcludedFields2.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=ExcludedFields2,
id='admin.E015',
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = 'foo'
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin.check(model=Album)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFieldsInline,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
"""
Regression test for #9932 - exclude in InlineModelAdmin should not
contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin.check(model=Album)
expected = [
checks.Error(
("Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'."),
hint=None,
obj=SongInline,
id='admin.E201',
)
]
self.assertEqual(errors, expected)
def test_valid_generic_inline_model_admin(self):
"""
Regression test for #22034 - check that generic inlines don't look for
normal ForeignKey relations.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_generic_inline_model_admin_non_generic_model(self):
"""
Ensure that a model without a GenericForeignKey raises problems if it's included
in an GenericInlineModelAdmin definition.
"""
class BookInline(GenericStackedInline):
model = Book
class SongAdmin(admin.ModelAdmin):
inlines = [BookInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'admin_checks.Book' has no GenericForeignKey.",
hint=None,
obj=BookInline,
id='admin.E301',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_ct_field(self):
"A GenericInlineModelAdmin raises problems if the ct_field points to a non-existent field."
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'ct_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
hint=None,
obj=InfluenceInline,
id='admin.E302',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_fk_field(self):
"A GenericInlineModelAdmin raises problems if the ct_fk_field points to a non-existent field."
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'ct_fk_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
hint=None,
obj=InfluenceInline,
id='admin.E303',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_ct_field(self):
"A GenericInlineModelAdmin raises problems if the ct_field points to a field that isn't part of a GenericForeignKey"
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using content type field 'name' and object ID field 'object_id'.",
hint=None,
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_fk_field(self):
"A GenericInlineModelAdmin raises problems if the ct_fk_field points to a field that isn't part of a GenericForeignKey"
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using content type field 'content_type' and object ID field 'name'.",
hint=None,
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
"""
Regression test for #15669 - Include app label in admin system check messages
"""
class RawIdNonexistingAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexisting',)
errors = RawIdNonexistingAdmin.check(model=Album)
expected = [
checks.Error(
("The value of 'raw_id_fields[0]' refers to 'nonexisting', which is "
"not an attribute of 'admin_checks.Album'."),
hint=None,
obj=RawIdNonexistingAdmin,
id='admin.E002',
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when exclude is
given) make sure fk_name is honored or things blow up when there is more
than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey to 'admin_checks.Album'.",
hint=None,
obj=TwoAlbumFKAndAnEInline,
id='admin.E202',
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_method(self):
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_nonexistent_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistent")
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'readonly_fields[1]' is not a callable, an attribute "
"of 'SongAdmin', or an attribute of 'admin_checks.Song'."),
hint=None,
obj=SongAdmin,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_nonexistent_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ['i_dont_exist'] # Missing attribute
errors = CityInline.check(State)
expected = [
checks.Error(
("The value of 'readonly_fields[0]' is not a callable, an attribute "
"of 'CityInline', or an attribute of 'admin_checks.City'."),
hint=None,
obj=CityInline,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M field that
specifies the 'through' option is included in the 'fields' or the 'fieldsets'
ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
errors = BookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model."),
hint=None,
obj=BookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
errors = FieldsetBookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField "
"'authors', because that field manually specifies a relationship model."),
hint=None,
obj=FieldsetBookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
errors = NestedFieldsAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
errors = NestedFieldsetAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = '__all__'
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
@ignore_warnings(module='django.contrib.admin.options')
def test_validator_compatibility(self):
class MyValidator(object):
def validate(self, cls, model):
raise ImproperlyConfigured("error!")
class MyModelAdmin(admin.ModelAdmin):
validator_class = MyValidator
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
'error!',
hint=None,
obj=MyModelAdmin,
)
]
self.assertEqual(errors, expected)
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ['state', ['state']]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=MyModelAdmin,
id='admin.E006'
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['title', 'album', ('title', 'album')]
}),
]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
hint=None,
obj=MyModelAdmin,
id='admin.E012'
)
]
self.assertEqual(errors, expected)
def test_list_filter_works_on_through_field_even_when_apps_not_ready(self):
"""
Ensure list_filter can access reverse fields even when the app registry
is not ready; refs #24146.
"""
class BookAdminWithListFilter(admin.ModelAdmin):
list_filter = ['authorsbooks__featured']
# Temporarily pretending apps are not ready yet. This issue can happen
# if the value of 'list_filter' refers to a 'through__field'.
Book._meta.apps.ready = False
try:
errors = BookAdminWithListFilter.check(model=Book)
self.assertEqual(errors, [])
finally:
Book._meta.apps.ready = True
|
|
# Tests `interval.py`.
# Copyright (c) 2018 Aubrey Barnard. This is free software. See
# LICENSE for details.
import datetime
import unittest
from ..interval import AllenRelation, Interval, CompoundInterval
class AllenRelationTest(unittest.TestCase):
def test_inverse(self):
self.assertEqual(AllenRelation.before.inverse(),
AllenRelation.after)
self.assertEqual(AllenRelation.abut_before.inverse(),
AllenRelation.abut_after)
self.assertEqual(AllenRelation.overlap_before.inverse(),
AllenRelation.overlap_after)
self.assertEqual(AllenRelation.outside_end.inverse(),
AllenRelation.inside_end)
self.assertEqual(AllenRelation.outside.inverse(),
AllenRelation.inside)
self.assertEqual(AllenRelation.inside_begin.inverse(),
AllenRelation.outside_begin)
self.assertEqual(AllenRelation.equal.inverse(),
AllenRelation.equal)
self.assertEqual(AllenRelation.outside_begin.inverse(),
AllenRelation.inside_begin)
self.assertEqual(AllenRelation.inside.inverse(),
AllenRelation.outside)
self.assertEqual(AllenRelation.inside_end.inverse(),
AllenRelation.outside_end)
self.assertEqual(AllenRelation.overlap_after.inverse(),
AllenRelation.overlap_before)
self.assertEqual(AllenRelation.abut_after.inverse(),
AllenRelation.abut_before)
self.assertEqual(AllenRelation.after.inverse(),
AllenRelation.before)
def test_is_inverse(self):
self.assertTrue(AllenRelation.equal.is_inverse(
AllenRelation.equal))
self.assertTrue(AllenRelation.before.is_inverse(
AllenRelation.after))
self.assertFalse(AllenRelation.before.is_inverse(
AllenRelation.inside_end))
class AllenAlgebraTest(unittest.TestCase):
def test_before(self):
itvl1 = Interval(32, 56)
itvl2 = Interval(81, 97)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.before, rel)
def test_abut_before(self):
itvl1 = Interval(15, 64)
itvl2 = Interval(64, 80)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.abut_before, rel)
def test_abut_before_empty(self):
itvl1 = Interval(32, 32)
itvl2 = Interval(32, 79)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.abut_before, rel)
def test_overlap_before(self):
itvl1 = Interval(5, 57)
itvl2 = Interval(20, 88)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.overlap_before, rel)
def test_outside_end(self):
itvl1 = Interval(2, 99)
itvl2 = Interval(6, 99)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.outside_end, rel)
def test_outside(self):
itvl1 = Interval(1, 76)
itvl2 = Interval(51, 72)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.outside, rel)
def test_inside_begin(self):
itvl1 = Interval(35, 59)
itvl2 = Interval(35, 64)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.inside_begin, rel)
def test_equal(self):
itvl1 = Interval(11, 78)
itvl2 = Interval(11, 78)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.equal, rel)
def test_outside_begin(self):
itvl1 = Interval(65, 84)
itvl2 = Interval(65, 69)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.outside_begin, rel)
def test_inside(self):
itvl1 = Interval(43, 54)
itvl2 = Interval(26, 95)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.inside, rel)
def test_inside_end(self):
itvl1 = Interval(67, 73)
itvl2 = Interval(8, 73)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.inside_end, rel)
def test_overlap_after(self):
itvl1 = Interval(38, 90)
itvl2 = Interval(1, 71)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.overlap_after, rel)
def test_abut_after_empty(self):
itvl1 = Interval(20, 20)
itvl2 = Interval(1, 20)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.abut_after, rel)
def test_abut_after(self):
itvl1 = Interval(7, 32)
itvl2 = Interval(5, 7)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.abut_after, rel)
def test_after(self):
itvl1 = Interval(90, 95)
itvl2 = Interval(15, 33)
rel = itvl1.allen_relation(itvl2)
self.assertEqual(AllenRelation.after, rel)
class IntervalTest(unittest.TestCase):
def test_construct_bad_lo_hi(self):
for lo, hi in (
(2, 1),
('2018-11-01', '2018-10-31'),
(datetime.date(2018, 11, 1),
datetime.date(2018, 10, 31)),
):
with self.assertRaises(ValueError):
Interval(lo, hi)
# Negative length should also result in the same error
with self.assertRaises(ValueError):
Interval(2, length=-1)
def test_construct_compute_length(self):
# Type that supports subtraction
i = Interval(3, 8)
self.assertEqual(3, i.lo)
self.assertEqual(8, i.hi)
self.assertEqual(5, i.length())
# Type that does not support subtraction
i = Interval('2018-10-31', '2018-11-01')
self.assertEqual('2018-10-31', i.lo)
self.assertEqual('2018-11-01', i.hi)
self.assertEqual(None, i.length())
def test_construct_from_lo_length(self):
i = Interval(3, length=5)
self.assertEqual(3, i.lo)
self.assertEqual(8, i.hi)
self.assertEqual(5, i.length())
i = Interval(datetime.date(2018, 10, 31),
length=datetime.timedelta(1))
self.assertEqual(datetime.date(2018, 10, 31), i.lo)
self.assertEqual(datetime.date(2018, 11, 1), i.hi)
self.assertEqual(datetime.timedelta(1), i.length())
# Non-integer zero length. The type of the zero must be
# preserved.
i = Interval(datetime.date(2018, 10, 31),
length=datetime.timedelta(0))
self.assertEqual(datetime.date(2018, 10, 31), i.lo)
self.assertEqual(datetime.date(2018, 10, 31), i.hi)
self.assertEqual(datetime.timedelta(0), i.length())
def test_construct_point(self):
# Types that support subtraction
i = Interval(3)
self.assertEqual(3, i.lo)
self.assertEqual(3, i.hi)
self.assertEqual(0, i.length())
i = Interval(datetime.date(2018, 10, 31))
self.assertEqual(datetime.date(2018, 10, 31), i.lo)
self.assertEqual(datetime.date(2018, 10, 31), i.hi)
self.assertEqual(datetime.timedelta(0), i.length())
# Type that does not support subtraction
i = Interval('2018-10-31')
self.assertEqual('2018-10-31', i.lo)
self.assertEqual('2018-10-31', i.hi)
self.assertEqual(0, i.length())
def test_is_point(self):
i = Interval(3)
self.assertTrue(i.is_point(), i)
i = Interval(3, lo_open=True)
self.assertFalse(i.is_point(), i)
i = Interval(3, hi_open=True)
self.assertFalse(i.is_point(), i)
def test_is_empty(self):
i = Interval(3)
self.assertFalse(i.is_empty(), i)
i = Interval(3, lo_open=True)
self.assertTrue(i.is_empty(), i)
i = Interval(3, hi_open=True)
self.assertTrue(i.is_empty(), i)
_orderings = (
# Empty (empty is always less than non-empty)
(Interval(52, lo_open=True), Interval(5, 21), 'lt'),
(Interval(50, lo_open=True), Interval(50, lo_open=True), 'eq'),
(Interval(2, lo_open=True), Interval(46, lo_open=True), 'eq'),
(Interval(7, 26), Interval(27, lo_open=True), 'gt'),
# Before
(Interval(8, 54), Interval(61, 98), 'lt'),
# Abut before
(Interval(46, 50), Interval(50, 57), 'lt'),
# Overlap before
(Interval(39, 60), Interval(57, 71), 'lt'),
# Outside end
(Interval(5, 54, True, True), Interval(47, 54, True, True), 'lt'),
(Interval(7, 85, True, True), Interval(83, 85, True, False), 'lt'),
(Interval(21, 99, True, False), Interval(46, 99, True, True), 'lt'),
(Interval(1, 25, True, False), Interval(20, 25, True, False), 'lt'),
# Outside
(Interval(3, 70), Interval(36, 67), 'lt'),
# Inside begin
(Interval(29, 73, False, True), Interval(29, 96, False, True), 'lt'),
(Interval(2, 25, False, True), Interval(2, 43, True, True), 'lt'),
(Interval(52, 62, True, True), Interval(52, 92, False, True), 'gt'),
(Interval(2, 7, True, True), Interval(2, 47, True, True), 'lt'),
# Equal
(Interval(30, 36, False, True), Interval(30, 36, False, True), 'eq'),
(Interval(1, 9, False, True), Interval(1, 9, False, False), 'lt'),
(Interval(37, 39, False, True), Interval(37, 39, True, True), 'lt'),
(Interval(11, 42, False, True), Interval(11, 42, True, False), 'lt'),
(Interval(9, 73, False, False), Interval(9, 73, False, True), 'gt'),
(Interval(35, 49, False, False), Interval(35, 49, False, False), 'eq'),
(Interval(18, 82, False, False), Interval(18, 82, True, True), 'lt'),
(Interval(16, 40, False, False), Interval(16, 40, True, False), 'lt'),
(Interval(66, 94, True, True), Interval(66, 94, False, True), 'gt'),
(Interval(34, 43, True, True), Interval(34, 43, False, False), 'gt'),
(Interval(54, 75, True, True), Interval(54, 75, True, True), 'eq'),
(Interval(35, 48, True, True), Interval(35, 48, True, False), 'lt'),
(Interval(44, 63, True, False), Interval(44, 63, False, True), 'gt'),
(Interval(9, 13, True, False), Interval(9, 13, False, False), 'gt'),
(Interval(3, 50, True, False), Interval(3, 50, True, True), 'gt'),
(Interval(22, 33, True, False), Interval(22, 33, True, False), 'eq'),
# Outside begin
(Interval(60, 90, False, True), Interval(60, 87, False, True), 'gt'),
(Interval(17, 45, False, True), Interval(17, 38, True, True), 'lt'),
(Interval(62, 76, True, True), Interval(62, 70, False, True), 'gt'),
(Interval(16, 54, True, True), Interval(16, 34, True, True), 'gt'),
# Inside
(Interval(57, 58), Interval(0, 95), 'gt'),
# Inside end
(Interval(44, 49, True, True), Interval(25, 49, True, True), 'gt'),
(Interval(25, 73, True, True), Interval(7, 73, True, False), 'gt'),
(Interval(81, 94, True, False), Interval(12, 94, True, True), 'gt'),
(Interval(82, 96, True, False), Interval(41, 96, True, False), 'gt'),
# Overlap after
(Interval(29, 96), Interval(28, 42), 'gt'),
# Abut after
(Interval(42, 72), Interval(41, 42), 'gt'),
# After
(Interval(97, 99), Interval(86, 87), 'gt'),
)
def test___eq__(self):
for i1, i2, cmp_exp in IntervalTest._orderings:
cmp_act = i1 == i2
self.assertEqual(cmp_exp == 'eq', cmp_act, (i1, i2))
# Check with different type
self.assertNotEqual(Interval(1, 2), 3)
def test___lt__(self):
for i1, i2, cmp_exp in IntervalTest._orderings:
cmp_act = i1 < i2
self.assertEqual(cmp_exp == 'lt', cmp_act, (i1, i2))
def test___le__(self):
for i1, i2, cmp_exp in IntervalTest._orderings:
cmp_act = i1 <= i2
self.assertEqual(cmp_exp in ('lt', 'eq'), cmp_act, (i1, i2))
def test___gt__(self):
for i1, i2, cmp_exp in IntervalTest._orderings:
cmp_act = i1 > i2
self.assertEqual(cmp_exp == 'gt', cmp_act, (i1, i2))
def test___ge__(self):
for i1, i2, cmp_exp in IntervalTest._orderings:
cmp_act = i1 >= i2
self.assertEqual(cmp_exp in ('gt', 'eq'), cmp_act, (i1, i2))
_subsets = (
# Empty
(Interval(0, lo_open=True), Interval(1, lo_open=True), True),
(Interval(0, lo_open=True), Interval(1), True),
(Interval(0, lo_open=True), Interval(1, 3), True),
# Before / After
(Interval(24, 69), Interval(83, 86), False),
(Interval(50, 65), Interval(41, 43), False),
# Abut
(Interval(0, 82), Interval(82, 83), False),
(Interval(36, 81), Interval(20, 36), False),
# Overlap
(Interval(3, 28), Interval(4, 99), False),
(Interval(87, 98), Interval(42, 95), False),
# Inside / Outside
(Interval(55, 93), Interval(16, 94, True, True), True),
(Interval(2, 68), Interval(7, 50, True, True), False),
# Start
(Interval(7, 11, True, False), Interval(7, 18, True, True),
True),
(Interval(80, 93, True, False), Interval(80, 97, False, True),
True),
(Interval(36, 90, False, False), Interval(36, 96, True, True),
False),
(Interval(32, 82, False, False), Interval(32, 99, False, True),
True),
# Finish
(Interval(64, 89, False, True), Interval(23, 89, True, True),
True),
(Interval(54, 90, False, True), Interval(42, 90, True, False),
True),
(Interval(40, 73, False, False), Interval(27, 73, True, True),
False),
(Interval(77, 89, False, False), Interval(25, 89, True, False),
True),
# Equal
(Interval(22, 39, True, True), Interval(22, 39, True, True),
True),
(Interval(43, 74, True, False), Interval(43, 74, True, True),
False),
(Interval(43, 92, False, True), Interval(43, 92, True, True),
False),
(Interval(21, 57, False, False), Interval(21, 57, True, True),
False),
(Interval(88, 93, True, False), Interval(88, 93, True, False),
True),
(Interval(27, 81, False, True), Interval(27, 81, False, True),
True),
(Interval(22, 85), Interval(22, 85), True),
)
def test_issubset(self):
for i1, i2, is_subset_of in IntervalTest._subsets:
if is_subset_of:
self.assertTrue(i1.issubset(i2), (i1, i2))
else:
self.assertFalse(i1.issubset(i2), (i1, i2))
_unions = (
# Empty
((Interval(3, lo_open=True), Interval(21, lo_open=True)),
Interval(3, lo_open=True)),
((Interval(37, lo_open=True), Interval(53, 96)),
Interval(53, 96)),
# Before / After
((Interval(11, 13), Interval(26, 62)),
(Interval(11, 13), Interval(26, 62))),
# Abut
((Interval(44, 45, True, True), Interval(45, 96, True, True)),
(Interval(44, 45, True, True), Interval(45, 96, True, True))),
((Interval(53, 88, True, False), Interval(88, 98, True, True)),
Interval(53, 98, True, True)),
((Interval(30, 60, True, True), Interval(60, 89, False, True)),
Interval(30, 89, True, True)),
((Interval(42, 64), Interval(64, 77)), Interval(42, 77)),
# Overlap
((Interval(20, 58, True, True), Interval(34, 86, True, True)),
Interval(20, 86, True, True)),
((Interval(0, 65, True, True), Interval(47, 98, True, False)),
Interval(0, 98, True, False)),
((Interval(66, 77, False, True), Interval(75, 95, True, True)),
Interval(66, 95, False, True)),
((Interval(14, 67, False, True), Interval(23, 75, True, False)),
Interval(14, 75)),
# Inside / Outside
((Interval(82, 83, True, True), Interval(68, 96, True, True)),
Interval(68, 96, True, True)),
((Interval(42, 59, True, True), Interval(10, 72, True, False)),
Interval(10, 72, True, False)),
((Interval(17, 26, True, True), Interval(15, 64, False, True)),
Interval(15, 64, False, True)),
((Interval(55, 59, True, True), Interval(3, 99)),
Interval(3, 99)),
# Start
((Interval(7, 78, True, True), Interval(7, 31, True, True)),
Interval(7, 78, True, True)),
((Interval(26, 97, False, True), Interval(26, 52, True, True)),
Interval(26, 97, False, True)),
((Interval(26, 27, True, True), Interval(26, 68, False, True)),
Interval(26, 68, False, True)),
((Interval(34, 37), Interval(34, 48)), Interval(34, 48)),
# Finish
((Interval(25, 44, True, True), Interval(7, 44, True, True)),
Interval(7, 44, True, True)),
((Interval(5, 77, True, True), Interval(29, 77, True, False)),
Interval(5, 77, True, False)),
((Interval(7, 51, True, False), Interval(45, 51, True, True)),
Interval(7, 51, True, False)),
((Interval(64, 91), Interval(81, 91)), Interval(64, 91)),
# Equal
((Interval(47, 90, True, True), Interval(47, 90, True, True)),
Interval(47, 90, True, True)),
((Interval(0, 61, True, False), Interval(0, 61, True, False)),
Interval(0, 61, True, False)),
((Interval(52, 54, False, True), Interval(52, 54, False, True)),
Interval(52, 54, False, True)),
((Interval(14, 20, False, False), Interval(14, 20, False, False)),
Interval(14, 20, False, False)),
# Multiple
((Interval(15, 30), Interval(17, 81), Interval(27, 83)),
Interval(15, 83)),
((Interval(2, 7), Interval(13, 84), Interval(87, 94)),
(Interval(2, 7), Interval(13, 84), Interval(87, 94))),
)
def test_union(self):
for intervals, u_exp in IntervalTest._unions:
for itvls in (intervals, tuple(reversed(intervals))):
u_act = itvls[0].union(*itvls[1:])
if isinstance(u_act, CompoundInterval):
u_act = tuple(u_act)
self.assertEqual(u_exp, u_act)
_intersections = (
# Empty
((Interval(28, lo_open=True), Interval(35, 87, True, True)),
Interval(0, lo_open=True)),
((Interval(39, lo_open=True), Interval(1, 61, True, True)),
Interval(0, lo_open=True)),
((Interval(94, lo_open=True), Interval(4, 69, True, True)),
Interval(0, lo_open=True)),
# Before / After
((Interval(31, 62), Interval(74, 91)),
Interval(0, lo_open=True)),
# Abut
((Interval(1, 43, True, True), Interval(43, 76, True, True)),
Interval(43, lo_open=True)),
((Interval(38, 97, True, False), Interval(97, 99, True, True)),
Interval(97, lo_open=True)),
((Interval(13, 19, True, True), Interval(19, 85, False, True)),
Interval(19, lo_open=True)),
((Interval(50, 53), Interval(53, 96)), Interval(53)),
# Overlap
((Interval(2, 56, True, True), Interval(5, 80, True, True)),
Interval(5, 56, True, True)),
((Interval(13, 63, True, False), Interval(22, 89, True, True)),
Interval(22, 63, True, False)),
((Interval(6, 63, True, True), Interval(59, 93, False, True)),
Interval(59, 63, False, True)),
((Interval(11, 58), Interval(24, 71)), Interval(24, 58)),
# Inside / Outside
((Interval(13, 53, True, True), Interval(9, 98, True, True)),
Interval(13, 53, True, True)),
((Interval(27, 31, True, False), Interval(7, 82, True, True)),
Interval(27, 31, True, False)),
((Interval(72, 79, False, True), Interval(66, 86, True, True)),
Interval(72, 79, False, True)),
((Interval(5, 14), Interval(4, 89)), Interval(5, 14)),
# Start
((Interval(27, 35, True, True), Interval(27, 39, True, True)),
Interval(27, 35, True, True)),
((Interval(0, 33, False, True), Interval(0, 98, True, True)),
Interval(0, 33, True, True)),
((Interval(29, 39, False, True), Interval(29, 47, False, True)),
Interval(29, 39, False, True)),
((Interval(2, 56), Interval(2, 80)), Interval(2, 56)),
# Finish
((Interval(12, 61, True, True), Interval(43, 61, True, True)),
Interval(43, 61, True, True)),
((Interval(49, 81, True, False), Interval(57, 81, True, True)),
Interval(57, 81, True, True)),
((Interval(6, 99, True, False), Interval(97, 99, True, False)),
Interval(97, 99, True, False)),
((Interval(11, 43), Interval(25, 43)), Interval(25, 43)),
# Equal
((Interval(70, 87, True, True), Interval(70, 87, True, True)),
Interval(70, 87, True, True)),
((Interval(37, 83, True, False), Interval(37, 83, True, False)),
Interval(37, 83, True, False)),
((Interval(5, 61, False, True), Interval(5, 61, False, True)),
Interval(5, 61, False, True)),
((Interval(6, 24), Interval(6, 24)), Interval(6, 24)),
# Multiple
((Interval(0, 53), Interval(37, 67), Interval(50, 73)),
Interval(50, 53)),
)
def test_intersection(self):
for intervals, i_exp in IntervalTest._intersections:
for itvls in (intervals, tuple(reversed(intervals))):
i_act = itvls[0].intersection(*itvls[1:])
self.assertEqual(i_exp, i_act)
def test_intersects(self):
for intervals, i_exp in IntervalTest._intersections:
for itvls in (intervals, tuple(reversed(intervals))):
self.assertEqual(not i_exp.is_empty(),
itvls[0].intersects(itvls[1]))
|
|
import unittest
from w3lib.encoding import resolve_encoding
from scrapy.http import Request, Response, TextResponse, HtmlResponse, XmlResponse, Headers
from scrapy.selector import Selector
class BaseResponseTest(unittest.TestCase):
response_class = Response
def test_init(self):
# Response requires url in the consturctor
self.assertRaises(Exception, self.response_class)
self.assertTrue(isinstance(self.response_class('http://example.com/'), self.response_class))
# body can be str or None
self.assertTrue(isinstance(self.response_class('http://example.com/', body=''), self.response_class))
self.assertTrue(isinstance(self.response_class('http://example.com/', body='body'), self.response_class))
# test presence of all optional parameters
self.assertTrue(isinstance(self.response_class('http://example.com/', headers={}, status=200, body=''), self.response_class))
r = self.response_class("http://www.example.com")
assert isinstance(r.url, str)
self.assertEqual(r.url, "http://www.example.com")
self.assertEqual(r.status, 200)
assert isinstance(r.headers, Headers)
self.assertEqual(r.headers, {})
headers = {"caca": "coco"}
body = "a body"
r = self.response_class("http://www.example.com", headers=headers, body=body)
assert r.headers is not headers
self.assertEqual(r.headers["caca"], "coco")
r = self.response_class("http://www.example.com", status=301)
self.assertEqual(r.status, 301)
r = self.response_class("http://www.example.com", status='301')
self.assertEqual(r.status, 301)
self.assertRaises(ValueError, self.response_class, "http://example.com", status='lala200')
def test_copy(self):
"""Test Response copy"""
r1 = self.response_class("http://www.example.com", body="Some body")
r1.flags.append('cached')
r2 = r1.copy()
self.assertEqual(r1.status, r2.status)
self.assertEqual(r1.body, r2.body)
# make sure flags list is shallow copied
assert r1.flags is not r2.flags, "flags must be a shallow copy, not identical"
self.assertEqual(r1.flags, r2.flags)
# make sure headers attribute is shallow copied
assert r1.headers is not r2.headers, "headers must be a shallow copy, not identical"
self.assertEqual(r1.headers, r2.headers)
def test_copy_meta(self):
req = Request("http://www.example.com")
req.meta['foo'] = 'bar'
r1 = self.response_class("http://www.example.com", body="Some body", request=req)
assert r1.meta is req.meta
def test_copy_inherited_classes(self):
"""Test Response children copies preserve their class"""
class CustomResponse(self.response_class):
pass
r1 = CustomResponse('http://www.example.com')
r2 = r1.copy()
assert type(r2) is CustomResponse
def test_replace(self):
"""Test Response.replace() method"""
hdrs = Headers({"key": "value"})
r1 = self.response_class("http://www.example.com")
r2 = r1.replace(status=301, body="New body", headers=hdrs)
assert r1.body == ''
self.assertEqual(r1.url, r2.url)
self.assertEqual((r1.status, r2.status), (200, 301))
self.assertEqual((r1.body, r2.body), ('', "New body"))
self.assertEqual((r1.headers, r2.headers), ({}, hdrs))
# Empty attributes (which may fail if not compared properly)
r3 = self.response_class("http://www.example.com", flags=['cached'])
r4 = r3.replace(body='', flags=[])
self.assertEqual(r4.body, '')
self.assertEqual(r4.flags, [])
def _assert_response_values(self, response, encoding, body):
if isinstance(body, unicode):
body_unicode = body
body_str = body.encode(encoding)
else:
body_unicode = body.decode(encoding)
body_str = body
assert isinstance(response.body, str)
self._assert_response_encoding(response, encoding)
self.assertEqual(response.body, body_str)
self.assertEqual(response.body_as_unicode(), body_unicode)
def _assert_response_encoding(self, response, encoding):
self.assertEqual(response.encoding, resolve_encoding(encoding))
def test_immutable_attributes(self):
r = self.response_class("http://example.com")
self.assertRaises(AttributeError, setattr, r, 'url', 'http://example2.com')
self.assertRaises(AttributeError, setattr, r, 'body', 'xxx')
class ResponseText(BaseResponseTest):
def test_no_unicode_url(self):
self.assertRaises(TypeError, self.response_class, u'http://www.example.com')
class TextResponseTest(BaseResponseTest):
response_class = TextResponse
def test_replace(self):
super(TextResponseTest, self).test_replace()
r1 = self.response_class("http://www.example.com", body="hello", encoding="cp852")
r2 = r1.replace(url="http://www.example.com/other")
r3 = r1.replace(url="http://www.example.com/other", encoding="latin1")
assert isinstance(r2, self.response_class)
self.assertEqual(r2.url, "http://www.example.com/other")
self._assert_response_encoding(r2, "cp852")
self.assertEqual(r3.url, "http://www.example.com/other")
self.assertEqual(r3._declared_encoding(), "latin1")
def test_unicode_url(self):
# instantiate with unicode url without encoding (should set default encoding)
resp = self.response_class(u"http://www.example.com/")
self._assert_response_encoding(resp, self.response_class._DEFAULT_ENCODING)
# make sure urls are converted to str
resp = self.response_class(url=u"http://www.example.com/", encoding='utf-8')
assert isinstance(resp.url, str)
resp = self.response_class(url=u"http://www.example.com/price/\xa3", encoding='utf-8')
self.assertEqual(resp.url, 'http://www.example.com/price/\xc2\xa3')
resp = self.response_class(url=u"http://www.example.com/price/\xa3", encoding='latin-1')
self.assertEqual(resp.url, 'http://www.example.com/price/\xa3')
resp = self.response_class(u"http://www.example.com/price/\xa3", headers={"Content-type": ["text/html; charset=utf-8"]})
self.assertEqual(resp.url, 'http://www.example.com/price/\xc2\xa3')
resp = self.response_class(u"http://www.example.com/price/\xa3", headers={"Content-type": ["text/html; charset=iso-8859-1"]})
self.assertEqual(resp.url, 'http://www.example.com/price/\xa3')
def test_unicode_body(self):
unicode_string = u'\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0447\u0435\u0441\u043a\u0438\u0439 \u0442\u0435\u043a\u0441\u0442'
self.assertRaises(TypeError, self.response_class, 'http://www.example.com', body=u'unicode body')
original_string = unicode_string.encode('cp1251')
r1 = self.response_class('http://www.example.com', body=original_string, encoding='cp1251')
# check body_as_unicode
self.assertTrue(isinstance(r1.body_as_unicode(), unicode))
self.assertEqual(r1.body_as_unicode(), unicode_string)
def test_encoding(self):
r1 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=utf-8"]}, body="\xc2\xa3")
r2 = self.response_class("http://www.example.com", encoding='utf-8', body=u"\xa3")
r3 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=iso-8859-1"]}, body="\xa3")
r4 = self.response_class("http://www.example.com", body="\xa2\xa3")
r5 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=None"]}, body="\xc2\xa3")
r6 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=gb2312"]}, body="\xa8D")
r7 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=gbk"]}, body="\xa8D")
self.assertEqual(r1._headers_encoding(), "utf-8")
self.assertEqual(r2._headers_encoding(), None)
self.assertEqual(r2._declared_encoding(), 'utf-8')
self._assert_response_encoding(r2, 'utf-8')
self.assertEqual(r3._headers_encoding(), "cp1252")
self.assertEqual(r3._declared_encoding(), "cp1252")
self.assertEqual(r4._headers_encoding(), None)
self.assertEqual(r5._headers_encoding(), None)
self._assert_response_encoding(r5, "utf-8")
assert r4._body_inferred_encoding() is not None and r4._body_inferred_encoding() != 'ascii'
self._assert_response_values(r1, 'utf-8', u"\xa3")
self._assert_response_values(r2, 'utf-8', u"\xa3")
self._assert_response_values(r3, 'iso-8859-1', u"\xa3")
self._assert_response_values(r6, 'gb18030', u"\u2015")
self._assert_response_values(r7, 'gb18030', u"\u2015")
# TextResponse (and subclasses) must be passed a encoding when instantiating with unicode bodies
self.assertRaises(TypeError, self.response_class, "http://www.example.com", body=u"\xa3")
def test_declared_encoding_invalid(self):
"""Check that unknown declared encodings are ignored"""
r = self.response_class("http://www.example.com",
headers={"Content-type": ["text/html; charset=UKNOWN"]},
body="\xc2\xa3")
self.assertEqual(r._declared_encoding(), None)
self._assert_response_values(r, 'utf-8', u"\xa3")
def test_utf16(self):
"""Test utf-16 because UnicodeDammit is known to have problems with"""
r = self.response_class("http://www.example.com",
body='\xff\xfeh\x00i\x00',
encoding='utf-16')
self._assert_response_values(r, 'utf-16', u"hi")
def test_invalid_utf8_encoded_body_with_valid_utf8_BOM(self):
r6 = self.response_class("http://www.example.com",
headers={"Content-type": ["text/html; charset=utf-8"]},
body="\xef\xbb\xbfWORD\xe3\xab")
self.assertEqual(r6.encoding, 'utf-8')
self.assertEqual(r6.body_as_unicode(), u'WORD\ufffd\ufffd')
def test_bom_is_removed_from_body(self):
# Inferring encoding from body also cache decoded body as sideeffect,
# this test tries to ensure that calling response.encoding and
# response.body_as_unicode() in indistint order doesn't affect final
# values for encoding and decoded body.
url = 'http://example.com'
body = "\xef\xbb\xbfWORD"
headers = {"Content-type": ["text/html; charset=utf-8"]}
# Test response without content-type and BOM encoding
response = self.response_class(url, body=body)
self.assertEqual(response.encoding, 'utf-8')
self.assertEqual(response.body_as_unicode(), u'WORD')
response = self.response_class(url, body=body)
self.assertEqual(response.body_as_unicode(), u'WORD')
self.assertEqual(response.encoding, 'utf-8')
# Body caching sideeffect isn't triggered when encoding is declared in
# content-type header but BOM still need to be removed from decoded
# body
response = self.response_class(url, headers=headers, body=body)
self.assertEqual(response.encoding, 'utf-8')
self.assertEqual(response.body_as_unicode(), u'WORD')
response = self.response_class(url, headers=headers, body=body)
self.assertEqual(response.body_as_unicode(), u'WORD')
self.assertEqual(response.encoding, 'utf-8')
def test_replace_wrong_encoding(self):
"""Test invalid chars are replaced properly"""
r = self.response_class("http://www.example.com", encoding='utf-8', body='PREFIX\xe3\xabSUFFIX')
# XXX: Policy for replacing invalid chars may suffer minor variations
# but it should always contain the unicode replacement char (u'\ufffd')
assert u'\ufffd' in r.body_as_unicode(), repr(r.body_as_unicode())
assert u'PREFIX' in r.body_as_unicode(), repr(r.body_as_unicode())
assert u'SUFFIX' in r.body_as_unicode(), repr(r.body_as_unicode())
# Do not destroy html tags due to encoding bugs
r = self.response_class("http://example.com", encoding='utf-8', \
body='\xf0<span>value</span>')
assert u'<span>value</span>' in r.body_as_unicode(), repr(r.body_as_unicode())
# FIXME: This test should pass once we stop using BeautifulSoup's UnicodeDammit in TextResponse
#r = self.response_class("http://www.example.com", body='PREFIX\xe3\xabSUFFIX')
#assert u'\ufffd' in r.body_as_unicode(), repr(r.body_as_unicode())
def test_selector(self):
body = "<html><head><title>Some page</title><body></body></html>"
response = self.response_class("http://www.example.com", body=body)
self.assertIsInstance(response.selector, Selector)
self.assertEqual(response.selector.type, 'html')
self.assertIs(response.selector, response.selector) # property is cached
self.assertIs(response.selector.response, response)
self.assertEqual(
response.selector.xpath("//title/text()").extract(),
[u'Some page']
)
self.assertEqual(
response.selector.css("title::text").extract(),
[u'Some page']
)
self.assertEqual(
response.selector.re("Some (.*)</title>"),
[u'page']
)
def test_selector_shortcuts(self):
body = "<html><head><title>Some page</title><body></body></html>"
response = self.response_class("http://www.example.com", body=body)
self.assertEqual(
response.xpath("//title/text()").extract(),
response.selector.xpath("//title/text()").extract(),
)
self.assertEqual(
response.css("title::text").extract(),
response.selector.css("title::text").extract(),
)
class HtmlResponseTest(TextResponseTest):
response_class = HtmlResponse
def test_html_encoding(self):
body = """<html><head><title>Some page</title><meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head><body>Price: \xa3100</body></html>'
"""
r1 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r1, 'iso-8859-1', body)
body = """<?xml version="1.0" encoding="iso-8859-1"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
Price: \xa3100
"""
r2 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r2, 'iso-8859-1', body)
# for conflicting declarations headers must take precedence
body = """<html><head><title>Some page</title><meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body>Price: \xa3100</body></html>'
"""
r3 = self.response_class("http://www.example.com", headers={"Content-type": ["text/html; charset=iso-8859-1"]}, body=body)
self._assert_response_values(r3, 'iso-8859-1', body)
# make sure replace() preserves the encoding of the original response
body = "New body \xa3"
r4 = r3.replace(body=body)
self._assert_response_values(r4, 'iso-8859-1', body)
def test_html5_meta_charset(self):
body = """<html><head><meta charset="gb2312" /><title>Some page</title><body>bla bla</body>"""
r1 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r1, 'gb2312', body)
class XmlResponseTest(TextResponseTest):
response_class = XmlResponse
def test_xml_encoding(self):
body = "<xml></xml>"
r1 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r1, self.response_class._DEFAULT_ENCODING, body)
body = """<?xml version="1.0" encoding="iso-8859-1"?><xml></xml>"""
r2 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r2, 'iso-8859-1', body)
# make sure replace() preserves the explicit encoding passed in the constructor
body = """<?xml version="1.0" encoding="iso-8859-1"?><xml></xml>"""
r3 = self.response_class("http://www.example.com", body=body, encoding='utf-8')
body2 = "New body"
r4 = r3.replace(body=body2)
self._assert_response_values(r4, 'utf-8', body2)
def test_replace_encoding(self):
# make sure replace() keeps the previous encoding unless overridden explicitly
body = """<?xml version="1.0" encoding="iso-8859-1"?><xml></xml>"""
body2 = """<?xml version="1.0" encoding="utf-8"?><xml></xml>"""
r5 = self.response_class("http://www.example.com", body=body)
r6 = r5.replace(body=body2)
r7 = r5.replace(body=body2, encoding='utf-8')
self._assert_response_values(r5, 'iso-8859-1', body)
self._assert_response_values(r6, 'iso-8859-1', body2)
self._assert_response_values(r7, 'utf-8', body2)
def test_selector(self):
body = '<?xml version="1.0" encoding="utf-8"?><xml><elem>value</elem></xml>'
response = self.response_class("http://www.example.com", body=body)
self.assertIsInstance(response.selector, Selector)
self.assertEqual(response.selector.type, 'xml')
self.assertIs(response.selector, response.selector) # property is cached
self.assertIs(response.selector.response, response)
self.assertEqual(
response.selector.xpath("//elem/text()").extract(),
[u'value']
)
def test_selector_shortcuts(self):
body = '<?xml version="1.0" encoding="utf-8"?><xml><elem>value</elem></xml>'
response = self.response_class("http://www.example.com", body=body)
self.assertEqual(
response.xpath("//elem/text()").extract(),
response.selector.xpath("//elem/text()").extract(),
)
if __name__ == "__main__":
unittest.main()
|
|
import unittest
class TestZmqpy(unittest.TestCase):
def test_import_zmqpy(self):
try:
import zmqpy
from zmqpy import Context, Socket
except ImportError as ie:
self.fail(ie.message)
class TestContext(unittest.TestCase):
def tearDown(self):
from zmqpy import Context
c = Context()
c.term()
def test_context_init(self):
from zmqpy import Context
c = Context()
cc = Context()
assert type(c) == Context
assert c.zmq_ctx
assert c.n_sockets == 0
assert c._sockets == {}
assert c.closed == False
assert c.iothreads == 1
assert id(c.__dict__) == id(cc.__dict__)
def test_context_term(self):
from zmqpy import Context
c = Context()
c.term()
assert c.closed
assert c.zmq_ctx == None
def test_context_socket(self):
from zmqpy import Context, PAIR
c = Context()
socket = c.socket(PAIR)
assert socket
assert c.n_sockets == 1
assert len(c._sockets) == 1
assert not c.closed
def test_context_socket_term(self):
from zmqpy import Context, PAIR
c = Context()
socket = c.socket(PAIR)
assert socket
assert c.n_sockets == 1
assert len(c._sockets) == 1
assert not c.closed
c.term()
assert c.n_sockets == 0
assert len(c._sockets) == 0
assert socket.closed
class TestSocket(unittest.TestCase):
def tearDown(self):
from zmqpy import Context
c = Context()
c.term()
def test_socket_bind(self):
from zmqpy import Context, PAIR
c = Context()
socket = c.socket(PAIR)
bind = socket.bind('tcp://*:3333')
assert bind == 0
socket.close()
def test_socket_connect(self):
from zmqpy import Context, PAIR
c = Context()
sender = c.socket(PAIR)
receiver = c.socket(PAIR)
bind = receiver.bind('tcp://*:3333')
assert bind == 0
connect = sender.connect('tcp://127.0.0.1:3333')
assert connect == 0
sender.close()
receiver.close()
def test_socket_disconnected_send(self):
from zmqpy import Context, PAIR, NOBLOCK
c = Context()
socket = c.socket(PAIR)
ret = socket.send("zmqpy test message", NOBLOCK)
assert ret == -1
assert socket.last_errno > 0
socket.close()
def test_socket_connected_send(self):
from zmqpy import Context, PAIR
from zmqpy._cffi import zmq_version
c = Context()
sender = c.socket(PAIR)
receiver = c.socket(PAIR)
bind = receiver.bind('tcp://*:3333')
connect = sender.connect('tcp://127.0.0.1:333')
message = "zmqpy test message"
ret = sender.send(message)
if zmq_version == 2:
assert ret == 0
else:
assert ret == len(message)
assert sender.last_errno == None
sender.close()
receiver.close()
def test_socket_connected_recv(self):
from zmqpy import Context, PAIR
from zmqpy._cffi import zmq_version
c = Context()
sender = c.socket(PAIR)
receiver = c.socket(PAIR)
bind = receiver.bind('tcp://*:3333')
connect = sender.connect('tcp://127.0.0.1:3333')
assert bind == 0
assert connect == 0
ret = sender.send("zmqpy test message")
if zmq_version == 2:
assert ret == 0
else:
assert ret == 18
assert sender.last_errno == None
import time
time.sleep(0.2)
message = receiver.recv()
assert sender.last_errno == None
assert message == "zmqpy test message"
sender.close()
receiver.close()
def test_socket_setsockopt_uint64(self):
from zmqpy._cffi import zmq_version
if zmq_version == 2:
from zmqpy import Context, PAIR, HWM
else:
from zmqpy import Context, PAIR, RCVHWM
HWM = RCVHWM
c = Context()
sender = c.socket(PAIR)
rc = sender.setsockopt(HWM, 10)
assert rc == 0
sender.close()
def test_socket_getsockopt_uint64(self):
from zmqpy._cffi import zmq_version
if zmq_version == 2:
from zmqpy import Context, PAIR, HWM
else:
from zmqpy import Context, PAIR, RCVHWM
HWM = RCVHWM
c = Context()
sender = c.socket(PAIR)
rc = sender.setsockopt(HWM, 10)
assert rc == 0
opt_value = sender.getsockopt(HWM)
assert opt_value != -1
assert opt_value == 10
sender.close()
def test_socket_setsockopt_binary(self):
from zmqpy import Context, PAIR, IDENTITY
c = Context()
sender = c.socket(PAIR)
rc = sender.setsockopt(IDENTITY, "id")
assert rc == 0
sender.close()
def test_socket_getsockopt_binary(self):
from zmqpy import Context, PAIR, IDENTITY
c = Context()
sender = c.socket(PAIR)
rc = sender.setsockopt(IDENTITY, "id")
assert rc == 0
opt_value = sender.getsockopt(IDENTITY, length=2)
assert opt_value != -1
assert opt_value == "id"
sender.close()
def test_socket_setsockopt_int64(self):
from zmqpy import Context, PAIR, RATE
c = Context()
sender = c.socket(PAIR)
rc = sender.setsockopt(RATE, 200)
assert rc == 0
sender.close()
def test_socket_getsockopt_int64(self):
from zmqpy import Context, PAIR, RATE
c = Context()
sender = c.socket(PAIR)
rc = sender.setsockopt(RATE, 200)
assert rc == 0
opt_value = sender.getsockopt(RATE)
assert opt_value != -1
assert opt_value == 200
sender.close()
def test_socket_setsockopt_int(self):
from zmqpy import Context, PAIR, LINGER
c = Context()
sender = c.socket(PAIR)
rc = sender.setsockopt(LINGER, 10)
assert rc == 0
sender.close()
def test_socket_getsockopt_int(self):
from zmqpy import Context, PAIR, LINGER
c = Context()
sender = c.socket(PAIR)
rc = sender.setsockopt(LINGER, 10)
assert rc == 0
opt_value = sender.getsockopt(LINGER)
assert opt_value != -1
assert opt_value == 10
sender.close()
|
Subsets and Splits