repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
BenFrantzDale/OpenRayTrace | refs/heads/master | OpenRayTrace/UI/frames/Aberrations.py | 1 | # OpenRayTrace.UI.Frames.Aberrations
## OpenRayTrace: Free optical design software
## Copyright (C) 2004 Andrew Wilson
##
## This file is part of OpenRayTrace.
##
## OpenRayTrace is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## OpenRayTrace is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with OpenRayTrace; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from OpenRayTrace.UI.myCanvas import *
from OpenRayTrace.ray_trace import *
import wx
import numpy as np
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
class Aberrations(wx.MDIChildFrame):
wxID = wx.NewId()
#def _init_utils(self): pass
def _init_ctrls(self, prnt):
wx.MDIChildFrame.__init__(self, id=self.wxID,
name='Aberrations', parent=prnt, pos=wx.Point(399,218),
size=wx.Size(1200, 854), style=wx.DEFAULT_FRAME_STYLE,
title='Aberrations')
self.SetClientSize(wx.Size(1192, 820))
self.Center(wx.BOTH)
EVT_CLOSE(self, self.OnClose)
def __init__(self, parent):
self._init_ctrls(parent)
self.can = myCanvas(self)
self.Show()
self.can.glSetCurrent()
self.can.set_bg_color([0.0,0.0,0.0])
self.rays = 1000
self.glRayListStart = glGenLists(self.rays)
self.rayList = range(self.glRayListStart,self.glRayListStart+self.rays)
self.can.set_ray_list(self.rayList)
self.y_ave = 0.0
self.can.Rotateable(False)
def clear_list(self):
self.can.glSetCurrent()
glDeleteLists(self.glRayListStart,self.rays)
self.glRayListStart = glGenLists(self.rays)
self.rayList = range(self.glRayListStart,self.glRayListStart+self.rays)
self.can.set_ray_list(self.rayList)
self.can.DrawGL()
def calc_abr(self,t,n,c,t_cum,h,object_height):
self.clear_list()
angles = 20
Hp = [] # zeros(2*angles+1,Float)
tanUp = [] # zeros(2*angles+1,Float)
r = [] #zeros(2*angles+1,Float)
ray = self.glRayListStart
pos = np.array([0.0,0.7071,1.0])
pos = -object_height * pos
title = ['On Axis SPA',str(pos[1]) + 'mm SPA ', str(pos[2]) + 'mm SPA ']
cnt = 0
y_hit = np.array([i*h[1]/angles for i in range(-angles,angles+1)],dtype=float)
for i in range(len(pos)):
p = pos[i]
cnt += 1
ray += 1
y_launch = pos[i]
yy = y_hit - y_launch
den = (pow(yy*yy + t[0]*t[0],0.5))
Yi = yy / den
Xi = pow(1 - Yi*Yi,0.5)
for j in range(len(Yi)):
(x,y,z,X,Y,Z) = skew_ray((0.0,p,0.0),(Xi[j],Yi[j],0.0),t,n,c,t_cum,h)
# self.GetParent().ogl.draw_ray(x,y,z,j,t_cum,color = [0.0,1.0,1.0])
if(len(y) == len(t) + 1):
Hp.append(y[len(y)-1])
tanUp.append(tan(arcsin(Y[len(y)-1])))
r.append(y[1])
if(cnt == 1 and len(Hp) > 0):
mx = np.max(Hp)
#print Hp,tanUp
LA = []
for i in range(len(Hp)):
if(tanUp[i] != 0.0):
LA.append(-1 * np.array(Hp[i])/np.array(tanUp[i]))
else:
LA.append(0)
self.plotxy(LA,np.array(r),(2, 0),'LA',ray, color = (0.8,0.2,0.2))
ray += 1
##print r,Hp
if(len(Hp) > 0):
self.plotxy(y_hit,np.array(Hp),(0,0),title[cnt-1],ray,color = (0.2,0.8,0.2))
Hp = []
tanUp = []
r = []
self.can.DrawGL()
def plotxy(self,x,y, offset, title, ray, color):
self.can.glSetCurrent()
mxy = max(y)
mny = min(y)
mxx = max(x)
mnx = min(x)
rngx = mxx - mnx
rngy = mxy - mny
glNewList(ray, GL_COMPILE)
glTranslatef(offset[0],offset[1],0)
glColorf(*color)
x_norm = np.array(x)/rngx
y_norm = np.array(y)/rngy
mxy = np.max(y_norm)
mny = np.min(y_norm)
mxx = np.max(x_norm)
mnx = np.min(x_norm)
glBegin(GL_LINE_STRIP)
for i in range(len(y)):
glVertexf(x_norm[i],y_norm[i])
glEnd()
#draw Axes
glColorf(1.0,1.0,1.0,1.0)
glBegin(GL_LINES)
yc = 0.0 #(mxy+mny)/2.0
glVertexf(mnx,yc)
glVertexf(mxx,yc)
if(mny > 0):
glVertexf(0.0,0.0)
else:
glVertexf(0.0,mny)
glVertexf(0.0,mxy)
glEnd()
glColorf(1,1,1,1.0)
glRasterPos3f(x_norm[0] - .01,y_norm[0]-.01,0.0)
num = '( '+str(x[0])+' , '+str(y[0]) + ' )'
[glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12,ord(ii)) for ii in num]
glRasterPos3f(x_norm[len(x_norm)-1] - .01,y_norm[len(y_norm)-1]-.01,0.0)
num = '( '+str(x[len(x)-1])+' , '+str(y[len(y)-1]) + ' )'
[glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12,ord(ii)) for ii in num]
glRasterPos3f(x_norm[len(x_norm)-1] - .01,y_norm[len(y_norm)-1]-.05,0.0)
num = title
[glutBitmapCharacter(GLUT_BITMAP_HELVETICA_12,ord(ii)) for ii in num]
glTranslatef(-offset[0],-offset[1],0)
glFlush()
glEndList()
def OnClose(self, event): self.Hide()
|
tomlanyon/dnspython | refs/heads/master | dns/rdtypes/ANY/DLV.py | 18 | # Copyright (C) 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.dsbase
class DLV(dns.rdtypes.dsbase.DSBase):
"""DLV record"""
|
ingokegel/intellij-community | refs/heads/master | python/helpers/tests/generator3_tests/data/StatePassingGeneration/existing_updated_due_to_modified_binary/after/sdk_skeletons/mod2.py | 84 | # encoding: utf-8
# module mod2
# from mod2.so
# by generator 0.2
# no doc
# no imports
# no functions
# no classes
|
Rav3nPL/doubloons-0.10 | refs/heads/master-0.10 | qa/rpc-tests/smartfees.py | 129 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test fee estimation code
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class EstimateFeeTest(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir,
["-debug=mempool", "-debug=estimatefee", "-relaypriority=0"]))
# Node1 mines small-but-not-tiny blocks, and allows free transactions.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# so blockmaxsize of 2,000 is really just 1,000 bytes (room enough for
# 6 or 7 transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockprioritysize=1500", "-blockmaxsize=2000",
"-debug=mempool", "-debug=estimatefee", "-relaypriority=0"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces very small blocks (room for only 3 or so transactions)
node2args = [ "-blockprioritysize=0", "-blockmaxsize=1500",
"-debug=mempool", "-debug=estimatefee", "-relaypriority=0"]
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
# Prime the memory pool with pairs of transactions
# (high-priority, random fee and zero-priority, random fee)
min_fee = Decimal("0.001")
fees_per_kb = [];
for i in range(12):
(txid, txhex, fee) = random_zeropri_transaction(self.nodes, Decimal("1.1"),
min_fee, min_fee, 20)
tx_kbytes = (len(txhex)/2)/1000.0
fees_per_kb.append(float(fee)/tx_kbytes)
# Mine blocks with node2 until the memory pool clears:
count_start = self.nodes[2].getblockcount()
while len(self.nodes[2].getrawmempool()) > 0:
self.nodes[2].setgenerate(True, 1)
self.sync_all()
all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Fee estimates, super-stingy miner: "+str([str(e) for e in all_estimates]))
# Estimates should be within the bounds of what transactions fees actually were:
delta = 1.0e-6 # account for rounding error
for e in filter(lambda x: x >= 0, all_estimates):
if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb))
# Generate transactions while mining 30 more blocks, this time with node1:
for i in range(30):
for j in range(random.randrange(6-4,6+4)):
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"),
Decimal("0.0"), min_fee, 20)
tx_kbytes = (len(txhex)/2)/1000.0
fees_per_kb.append(float(fee)/tx_kbytes)
self.nodes[1].setgenerate(True, 1)
self.sync_all()
all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Fee estimates, more generous miner: "+str([ str(e) for e in all_estimates]))
for e in filter(lambda x: x >= 0, all_estimates):
if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb))
# Finish by mining a normal-sized block:
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].setgenerate(True, 1)
self.sync_all()
final_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Final fee estimates: "+str([ str(e) for e in final_estimates]))
if __name__ == '__main__':
EstimateFeeTest().main()
|
Crach1015/plugin.video.superpack | refs/heads/master | zip/plugin.video.SportsDevil/lib/dialogs/dialogQuestion.py | 25 | # -*- coding: utf-8 -*-
import xbmcgui
class DialogQuestion:
def __init__(self):
self.dlg = xbmcgui.Dialog()
self.head = 'SportsDevil Question'
def ask(self, question):
return self.dlg.yesno(self.head, question)
def close(self):
self.dlg.close() |
neoscoin/neos-core | refs/heads/master | src/ledger/lib/python2.7/site-packages/pip/_vendor/chardet/version.py | 5 | """
This module exists only to simplify retrieving the version number of chardet
from within setup.py and from chardet subpackages.
:author: Dan Blanchard ([email protected])
"""
__version__ = "3.0.4"
VERSION = __version__.split('.')
|
AlexanderFabisch/scikit-learn | refs/heads/master | sklearn/ensemble/tests/test_forest.py | 26 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
|
axinging/chromium-crosswalk | refs/heads/master | net/data/verify_certificate_chain_unittest/generate-target-unknown-critical-extension.py | 16 | #!/usr/bin/python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Certificate chain with 1 intermediary and a trusted root. The target
certificate has an unknown X.509v3 extension (OID=1.2.3.4) that is marked as
critical. Verifying this certificate chain is expected to fail because there is
an unrecognized critical extension."""
import common
# Self-signed root certificate (part of trust store).
root = common.create_self_signed_root_certificate('Root')
# Intermediary certificate.
intermediary = common.create_intermediary_certificate('Intermediary', root)
# Target certificate (has unknown critical extension).
target = common.create_end_entity_certificate('Target', intermediary)
target.get_extensions().add_property('1.2.3.4',
'critical,DER:01:02:03:04')
chain = [target, intermediary]
trusted = [root]
time = common.DEFAULT_TIME
verify_result = False
common.write_test_file(__doc__, chain, trusted, time, verify_result)
|
manazhao/tf_recsys | refs/heads/r1.0 | tensorflow/contrib/sparsemax/__init__.py | 106 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module that implements sparsemax and sparsemax loss, see [1].
[1] https://arxiv.org/abs/1602.02068
## Sparsemax
@@sparsemax
@@sparsemax_loss
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.sparsemax.python.ops.sparsemax import sparsemax
from tensorflow.contrib.sparsemax.python.ops.sparsemax_loss \
import sparsemax_loss
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['sparsemax', 'sparsemax_loss']
remove_undocumented(__name__, _allowed_symbols)
|
lucalianas/openmicroscopy | refs/heads/develop | components/tools/OmeroPy/test/integration/gatewaytest/test_pixels.py | 10 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gateway tests - Testing the gateway image wrapper.getPrimaryPixels() and
the pixels wrapper
Copyright 2013 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
pytest fixtures used as defined in conftest.py:
- gatewaywrapper
- author_testimg_generated
"""
import omero
import pytest
class TestPixels (object):
@pytest.fixture(autouse=True)
def setUp(self, author_testimg):
self.image = author_testimg
def testReuseRawPixelsStore(self, gatewaywrapper,
author_testimg_generated):
img1 = self.image
img2 = author_testimg_generated
rps = gatewaywrapper.gateway.createRawPixelsStore()
rps.setPixelsId(img1.getPrimaryPixels().getId(), True,
{'omero.group': '-1'})
assert rps.getByteWidth() > 0
rps.setPixelsId(img2.getPrimaryPixels().getId(), True,
{'omero.group': '-1'})
assert rps.getByteWidth() > 0
def testPlaneInfo(self):
image = self.image
pixels = image.getPrimaryPixels()
assert pixels.OMERO_CLASS == 'Pixels'
assert pixels._obj.__class__ == omero.model.PixelsI
sizeZ = image.getSizeZ()
sizeC = image.getSizeC()
sizeT = image.getSizeT()
planeInfo = list(pixels.copyPlaneInfo())
assert len(planeInfo) == sizeZ*sizeC*sizeT
# filter by 1 or more dimension
planeInfo = list(pixels.copyPlaneInfo(theC=0))
for p in planeInfo:
assert p.theC == 0
planeInfo = list(pixels.copyPlaneInfo(theZ=1, theT=0))
for p in planeInfo:
assert p.theZ == 1
assert p.theT == 0
def testPixelsType(self):
image = self.image
pixels = image.getPrimaryPixels()
pixelsType = pixels.getPixelsType()
assert pixelsType.value == 'int16'
assert pixelsType.bitSize == 16
def testGetTile(self, gatewaywrapper):
image = self.image
pixels = image.getPrimaryPixels()
sizeX = image.getSizeX()
zctTileList = []
tile = (50, 100, 10, 20)
for z in range(2):
for c in range(1):
for t in range(1):
zctTileList.append((z, c, t, tile))
lastTile = None
tiles = pixels.getTiles(zctTileList) # get a tile from every plane
for tile in tiles:
tile
lastT = None
for zctTile in zctTileList:
z, c, t, Tile = zctTile
tile = pixels.getTile(z, c, t, Tile)
assert lastTile == lastT
# try stacking tiles together - check it's the same as getting the
# same region as 1 tile
z, c, t = 0, 0, 0
tile1 = pixels.getTile(z, c, t, (0, 0, 5, 3))
tile2 = pixels.getTile(z, c, t, (5, 0, 5, 3))
# should be same as tile1 and tile2 combined
tile3 = pixels.getTile(z, c, t, (0, 0, 10, 3))
from numpy import hstack
stacked = hstack((tile1, tile2))
# bit of a hacked way to compare arrays, but seems to work
assert str(tile3) == str(stacked)
# See whether a the first row and a tile of the first row
# are equal (without using gateway)
rfs = gatewaywrapper.gateway.createRawPixelsStore()
try:
rfs.setPixelsId(pixels.id, False)
directRow = rfs.getRow(0, 0, 0, 0)
directTile = rfs.getTile(0, 0, 0, 0, 0, sizeX, 1)
assert directRow == directTile
finally:
rfs.close()
# See whether a 2x2 tile is the same as the same region of a Plane.
# See #11315
testTile = pixels.getTile(0, 0, 0, tile=(0, 0, 2, 2))
croppedPlane = pixels.getPlane(0, 0, 0)[0:2, 0:2]
assert str(testTile) == str(croppedPlane), \
"Tile and croppedPlane not equal"
def testGetPlane(self):
image = self.image
pixels = image.getPrimaryPixels()
sizeZ = image.getSizeZ()
sizeC = image.getSizeC()
sizeT = image.getSizeT()
zctList = []
for z in range(sizeZ):
for c in range(sizeC):
for t in range(sizeT):
zctList.append((z, c, t))
# timing commented out below - typical times:
# get 70 planes, using getPlanes() t1 = 3.99837493896 secs, getPlane()
# t2 = 5.9151828289 secs. t1/t2 = 0.7
# get 210 planes, using getPlanes() t1 = 12.3150248528 secs,
# getPlane() t2 = 17.2735779285 secs t1/t2 = 0.7
# test getPlanes()
# import time
# startTime = time.time()
planes = pixels.getPlanes(zctList) # get all planes
for plane in planes:
plane
# t1 = time.time() - startTime
# print "Getplanes = %s secs" % t1
# test getPlane() which returns a single plane
# startTime = time.time()
for zct in zctList:
z, c, t = zct
pixels.getPlane(z, c, t)
# t2 = time.time() - startTime
# print "Get individual planes = %s secs" % t2
# print "t1/t2", t1/t2
pixels.getPlane(sizeZ-1, sizeC-1, sizeT-1)
plane = pixels.getPlane() # default is (0,0,0)
firstPlane = pixels.getPlane(0, 0, 0)
assert plane[0][0] == firstPlane[0][0]
def testGetPlanesExceptionOnGetPlane(self):
"""
Tests exception handling in the gateway.getPlanes generator.
See #5156
"""
image = self.image
pixels = image.getPrimaryPixels()
# Replace service creation with a mock
pixels._prepareRawPixelsStore = lambda: MockRawPixelsStore(pixels)
# Now, when we call, the first yield should succeed, the second should
# fail
found = 0
try:
for x in pixels.getPlanes(((0, 0, 0), (1, 1, 1))):
found += 1
raise AssertionError("Should throw")
except AssertionError:
raise
except Exception, e:
assert not e.close
assert found == 1
def testGetPlanesExceptionOnClose(self):
"""
Tests exception handling in the gateway.getPlanes generator.
See #5156
"""
image = self.image
pixels = image.getPrimaryPixels()
# Replace service creation with a mock
pixels._prepareRawPixelsStore = lambda: MockRawPixelsStore(
pixels, good_calls=2, close_fails=True)
# Now, when we call, the first yield should succeed, the second should
# fail
found = 0
try:
for x in pixels.getPlanes(((0, 0, 0), (1, 1, 1))):
found += 1
raise AssertionError("Should have failed on close")
except AssertionError:
raise
except Exception, e:
assert e.close
assert found == 2
def testGetPlanesExceptionOnBoth(self):
"""
Tests exception handling in the gateway.getPlanes generator.
In this test, both the getPlane and the close throw an exception.
The exception from the getPlane method should be thrown, and the close
logged (not tested here)
See #5156
"""
image = self.image
pixels = image.getPrimaryPixels()
# Replace service creation with a mock
pixels._prepareRawPixelsStore = lambda: MockRawPixelsStore(
pixels, good_calls=1, close_fails=True)
# Now, when we call, the first yield should succeed, the second should
# fail
found = 0
try:
for x in pixels.getPlanes(((0, 0, 0), (1, 1, 1))):
found += 1
raise AssertionError("Should have failed on getPlane and close")
except AssertionError:
raise
except Exception, e:
assert not e.close
assert found == 1
class MockRawPixelsStore(object):
"""
Mock which throws exceptions at given times.
"""
def __init__(self, pixels, good_calls=1, close_fails=False):
self.pixels = pixels
self.good_calls = good_calls
self.close_fails = close_fails
def getPlane(self, *args):
if self.good_calls == 0:
e = Exception("MOCK EXCEPTION")
e.close = False
raise e
else:
self.good_calls -= 1
return "0"*(2*self.pixels.getSizeX()*self.pixels.getSizeY())
def close(self, *args):
if self.close_fails:
e = Exception("MOCK CLOSE EXCEPTION")
e.close = True
raise e
|
lulandco/SickRage | refs/heads/develop | lib/tvdb_api/tvdb_api.py | 8 | # !/usr/bin/env python2
# encoding:utf-8
# author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:unlicense (http://unlicense.org/)
from functools import wraps
import traceback
__author__ = "dbr/Ben"
__version__ = "1.9"
import os
import re
import time
import getpass
import StringIO
import tempfile
import warnings
import logging
import zipfile
import datetime as dt
import requests
import xmltodict
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
try:
import gzip
except ImportError:
gzip = None
from dateutil.parser import parse
from cachecontrol import CacheControl, caches
from tvdb_ui import BaseUI, ConsoleUI
from tvdb_exceptions import (tvdb_error, tvdb_userabort, tvdb_shownotfound, tvdb_showincomplete,
tvdb_seasonnotfound, tvdb_episodenotfound, tvdb_attributenotfound)
def log():
return logging.getLogger("tvdb_api")
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
class ShowContainer(dict):
"""Simple dict that holds a series of Show instances
"""
def __init__(self):
self._stack = []
self._lastgc = time.time()
def __setitem__(self, key, value):
self._stack.append(key)
#keep only the 100th latest results
if time.time() - self._lastgc > 20:
for o in self._stack[:-100]:
del self[o]
self._stack = self._stack[-100:]
self._lastgc = time.time()
super(ShowContainer, self).__setitem__(key, value)
class Show(dict):
"""Holds a dict of seasons, and show data.
"""
def __init__(self):
dict.__init__(self)
self.data = {}
def __repr__(self):
return "<Show %s (containing %s seasons)>" % (
self.data.get(u'seriesname', 'instance'),
len(self)
)
def __getattr__(self, key):
if key in self:
# Key is an episode, return it
return self[key]
if key in self.data:
# Non-numeric request is for show-data
return self.data[key]
raise AttributeError
def __getitem__(self, key):
if key in self:
# Key is an episode, return it
return dict.__getitem__(self, key)
if key in self.data:
# Non-numeric request is for show-data
return dict.__getitem__(self.data, key)
# Data wasn't found, raise appropriate error
if isinstance(key, int) or key.isdigit():
# Episode number x was not found
raise tvdb_seasonnotfound("Could not find season %s" % (repr(key)))
else:
# If it's not numeric, it must be an attribute name, which
# doesn't exist, so attribute error.
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def airedOn(self, date):
ret = self.search(str(date), 'firstaired')
if len(ret) == 0:
raise tvdb_episodenotfound("Could not find any episodes that aired on %s" % date)
return ret
def search(self, term=None, key=None):
"""
Search all episodes in show. Can search all data, or a specific key (for
example, episodename)
Always returns an array (can be empty). First index contains the first
match, and so on.
Each array index is an Episode() instance, so doing
search_results[0]['episodename'] will retrieve the episode name of the
first match.
Search terms are converted to lower case (unicode) strings.
# Examples
These examples assume t is an instance of Tvdb():
>>> t = Tvdb()
>>>
To search for all episodes of Scrubs with a bit of data
containing "my first day":
>>> t['Scrubs'].search("my first day")
[<Episode 01x01 - My First Day>]
>>>
Search for "My Name Is Earl" episode named "Faked His Own Death":
>>> t['My Name Is Earl'].search('Faked His Own Death', key = 'episodename')
[<Episode 01x04 - Faked His Own Death>]
>>>
To search Scrubs for all episodes with "mentor" in the episode name:
>>> t['scrubs'].search('mentor', key = 'episodename')
[<Episode 01x02 - My Mentor>, <Episode 03x15 - My Tormented Mentor>]
>>>
# Using search results
>>> results = t['Scrubs'].search("my first")
>>> print results[0]['episodename']
My First Day
>>> for x in results: print x['episodename']
My First Day
My First Step
My First Kill
>>>
"""
results = []
for cur_season in self.values():
searchresult = cur_season.search(term=term, key=key)
if len(searchresult) != 0:
results.extend(searchresult)
return results
class Season(dict):
def __init__(self, show=None):
"""The show attribute points to the parent show
"""
self.show = show
def __repr__(self):
return "<Season instance (containing %s episodes)>" % (
len(self.keys())
)
def __getattr__(self, episode_number):
if episode_number in self:
return self[episode_number]
raise AttributeError
def __getitem__(self, episode_number):
if episode_number not in self:
raise tvdb_episodenotfound("Could not find episode %s" % (repr(episode_number)))
else:
return dict.__getitem__(self, episode_number)
def search(self, term=None, key=None):
"""Search all episodes in season, returns a list of matching Episode
instances.
>>> t = Tvdb()
>>> t['scrubs'][1].search('first day')
[<Episode 01x01 - My First Day>]
>>>
See Show.search documentation for further information on search
"""
results = []
for ep in self.values():
searchresult = ep.search(term=term, key=key)
if searchresult is not None:
results.append(
searchresult
)
return results
class Episode(dict):
def __init__(self, season=None):
"""The season attribute points to the parent season
"""
self.season = season
def __repr__(self):
seasno = int(self.get(u'seasonnumber', 0))
epno = int(self.get(u'episodenumber', 0))
epname = self.get(u'episodename')
if epname is not None:
return "<Episode %02dx%02d - %s>" % (seasno, epno, epname)
else:
return "<Episode %02dx%02d>" % (seasno, epno)
def __getattr__(self, key):
if key in self:
return self[key]
raise AttributeError
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise tvdb_attributenotfound("Cannot find attribute %s" % (repr(key)))
def search(self, term=None, key=None):
"""Search episode data for term, if it matches, return the Episode (self).
The key parameter can be used to limit the search to a specific element,
for example, episodename.
This primarily for use use by Show.search and Season.search. See
Show.search for further information on search
Simple example:
>>> e = Episode()
>>> e['episodename'] = "An Example"
>>> e.search("examp")
<Episode 00x00 - An Example>
>>>
Limiting by key:
>>> e.search("examp", key = "episodename")
<Episode 00x00 - An Example>
>>>
"""
if term == None:
raise TypeError("must supply string to search for (contents)")
term = unicode(term).lower()
for cur_key, cur_value in self.items():
cur_key, cur_value = unicode(cur_key).lower(), unicode(cur_value).lower()
if key is not None and cur_key != key:
# Do not search this key
continue
if cur_value.find(unicode(term).lower()) > -1:
return self
class Actors(list):
"""Holds all Actor instances for a show
"""
pass
class Actor(dict):
"""Represents a single actor. Should contain..
id,
image,
name,
role,
sortorder
"""
def __repr__(self):
return "<Actor \"%s\">" % (self.get("name"))
class Tvdb:
"""Create easy-to-use interface to name of season/episode name
>>> t = Tvdb()
>>> t['Scrubs'][1][24]['episodename']
u'My Last Day'
"""
def __init__(self,
interactive=False,
select_first=False,
debug=False,
cache=True,
banners=False,
actors=False,
custom_ui=None,
language=None,
search_all_languages=False,
apikey=None,
forceConnect=False,
useZip=False,
dvdorder=False,
proxy=None):
"""interactive (True/False):
When True, uses built-in console UI is used to select the correct show.
When False, the first search result is used.
select_first (True/False):
Automatically selects the first series search result (rather
than showing the user a list of more than one series).
Is overridden by interactive = False, or specifying a custom_ui
debug (True/False) DEPRECATED:
Replaced with proper use of logging module. To show debug messages:
>>> import logging
>>> logging.basicConfig(level = logging.DEBUG)
cache (True/False/str/unicode/urllib2 opener):
Retrieved XML are persisted to to disc. If true, stores in
tvdb_api folder under your systems TEMP_DIR, if set to
str/unicode instance it will use this as the cache
location. If False, disables caching. Can also be passed
an arbitrary Python object, which is used as a urllib2
opener, which should be created by urllib2.build_opener
banners (True/False):
Retrieves the banners for a show. These are accessed
via the _banners key of a Show(), for example:
>>> Tvdb(banners=True)['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
actors (True/False):
Retrieves a list of the actors for a show. These are accessed
via the _actors key of a Show(), for example:
>>> t = Tvdb(actors=True)
>>> t['scrubs']['_actors'][0]['name']
u'Zach Braff'
custom_ui (tvdb_ui.BaseUI subclass):
A callable subclass of tvdb_ui.BaseUI (overrides interactive option)
language (2 character language abbreviation):
The language of the returned data. Is also the language search
uses. Default is "en" (English). For full list, run..
>>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS
['da', 'fi', 'nl', ...]
search_all_languages (True/False):
By default, Tvdb will only search in the language specified using
the language option. When this is True, it will search for the
show in and language
apikey (str/unicode):
Override the default thetvdb.com API key. By default it will use
tvdb_api's own key (fine for small scripts), but you can use your
own key if desired - this is recommended if you are embedding
tvdb_api in a larger application)
See http://thetvdb.com/?tab=apiregister to get your own key
forceConnect (bool):
If true it will always try to connect to theTVDB.com even if we
recently timed out. By default it will wait one minute before
trying again, and any requests within that one minute window will
return an exception immediately.
useZip (bool):
Download the zip archive where possibale, instead of the xml.
This is only used when all episodes are pulled.
And only the main language xml is used, the actor and banner xml are lost.
"""
self.shows = ShowContainer() # Holds all Show classes
self.corrections = {} # Holds show-name to show_id mapping
self.config = {}
if apikey is not None:
self.config['apikey'] = apikey
else:
self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key
self.config['debug_enabled'] = debug # show debugging messages
self.config['custom_ui'] = custom_ui
self.config['interactive'] = interactive # prompt for correct series?
self.config['select_first'] = select_first
self.config['search_all_languages'] = search_all_languages
self.config['useZip'] = useZip
self.config['dvdorder'] = dvdorder
self.config['proxy'] = proxy
if cache is True:
self.config['cache_enabled'] = True
self.config['cache_location'] = self._getTempDir()
elif cache is False:
self.config['cache_enabled'] = False
elif isinstance(cache, basestring):
self.config['cache_enabled'] = True
self.config['cache_location'] = cache
else:
raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))
self.config['session'] = requests.Session()
self.config['banners_enabled'] = banners
self.config['actors_enabled'] = actors
if self.config['debug_enabled']:
warnings.warn("The debug argument to tvdb_api.__init__ will be removed in the next version. "
"To enable debug messages, use the following code before importing: "
"import logging; logging.basicConfig(level=logging.DEBUG)")
logging.basicConfig(level=logging.DEBUG)
# List of language from http://thetvdb.com/api/0629B785CE550C8D/languages.xml
# Hard-coded here as it is realtively static, and saves another HTTP request, as
# recommended on http://thetvdb.com/wiki/index.php/API:languages.xml
self.config['valid_languages'] = [
"da", "fi", "nl", "de", "it", "es", "fr", "pl", "hu", "el", "tr",
"ru", "he", "ja", "pt", "zh", "cs", "sl", "hr", "ko", "en", "sv", "no"
]
# thetvdb.com should be based around numeric language codes,
# but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
# requires the language ID, thus this mapping is required (mainly
# for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)
self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,
'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,
'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,
'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}
if language is None:
self.config['language'] = 'en'
else:
if language not in self.config['valid_languages']:
raise ValueError("Invalid language %s, options are: %s" % (
language, self.config['valid_languages']
))
else:
self.config['language'] = language
# The following url_ configs are based of the
# http://thetvdb.com/wiki/index.php/Programmers_API
self.config['base_url'] = "http://thetvdb.com"
if self.config['search_all_languages']:
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php" % self.config
self.config['params_getSeries'] = {"seriesname": "", "language": "all"}
else:
self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php" % self.config
self.config['params_getSeries'] = {"seriesname": "", "language": self.config['language']}
self.config['url_epInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.xml" % self.config
self.config['url_epInfo_zip'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.zip" % self.config
self.config['url_seriesInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/%%s.xml" % self.config
self.config['url_actorsInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config
self.config['url_seriesBanner'] = u"%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config
self.config['url_artworkPrefix'] = u"%(base_url)s/banners/%%s" % self.config
self.config['url_updates_all'] = u"%(base_url)s/api/%(apikey)s/updates_all.zip" % self.config
self.config['url_updates_month'] = u"%(base_url)s/api/%(apikey)s/updates_month.zip" % self.config
self.config['url_updates_week'] = u"%(base_url)s/api/%(apikey)s/updates_week.zip" % self.config
self.config['url_updates_day'] = u"%(base_url)s/api/%(apikey)s/updates_day.zip" % self.config
def _getTempDir(self):
"""Returns the [system temp dir]/tvdb_api-u501 (or
tvdb_api-myuser)
"""
if hasattr(os, 'getuid'):
uid = "u%d" % (os.getuid())
else:
# For Windows
try:
uid = getpass.getuser()
except ImportError:
return os.path.join(tempfile.gettempdir(), "tvdb_api")
return os.path.join(tempfile.gettempdir(), "tvdb_api-%s" % (uid))
@retry(tvdb_error)
def _loadUrl(self, url, params=None, language=None):
try:
log().debug("Retrieving URL %s" % url)
# get response from TVDB
if self.config['cache_enabled']:
# Lets try without caching sessions to disk for awhile
# session = CacheControl(sess=self.config['session'], cache=caches.FileCache(self.config['cache_location'], use_dir_lock=True), cache_etags=False)
session = self.config['session']
if self.config['proxy']:
log().debug("Using proxy for URL: %s" % url)
session.proxies = {
"http": self.config['proxy'],
"https": self.config['proxy'],
}
resp = session.get(url.strip(), params=params)
else:
resp = requests.get(url.strip(), params=params)
resp.raise_for_status()
except requests.exceptions.HTTPError, e:
raise tvdb_error("HTTP error " + str(e.errno) + " while loading URL " + str(url))
except requests.exceptions.ConnectionError, e:
raise tvdb_error("Connection error " + str(e.message) + " while loading URL " + str(url))
except requests.exceptions.Timeout, e:
raise tvdb_error("Connection timed out " + str(e.message) + " while loading URL " + str(url))
except Exception as e:
raise tvdb_error("Unknown exception while loading URL " + url + ": " + repr(e))
def process(path, key, value):
key = key.lower()
# clean up value and do type changes
if value:
try:
if key == 'firstaired' and value in "0000-00-00":
new_value = str(dt.date.fromordinal(1))
new_value = re.sub("([-]0{2}){1,}", "", new_value)
fixDate = parse(new_value, fuzzy=True).date()
value = fixDate.strftime("%Y-%m-%d")
elif key == 'firstaired':
value = parse(value, fuzzy=True).date()
value = value.strftime("%Y-%m-%d")
#if key == 'airs_time':
# value = parse(value).time()
# value = value.strftime("%I:%M %p")
except:
pass
return key, value
if 'application/zip' in resp.headers.get("Content-Type", ''):
try:
log().debug("We recived a zip file unpacking now ...")
zipdata = StringIO.StringIO()
zipdata.write(resp.content)
myzipfile = zipfile.ZipFile(zipdata)
return xmltodict.parse(myzipfile.read('%s.xml' % language), postprocessor=process)
except zipfile.BadZipfile:
raise tvdb_error("Bad zip file received from thetvdb.com, could not read it")
else:
try:
return xmltodict.parse(resp.content.decode('utf-8'), postprocessor=process)
except:
return dict([(u'data', None)])
def _getetsrc(self, url, params=None, language=None):
"""Loads a URL using caching, returns an ElementTree of the source
"""
try:
return self._loadUrl(url, params=params, language=language).values()[0]
except Exception, e:
raise tvdb_error(e)
def _setItem(self, sid, seas, ep, attrib, value):
"""Creates a new episode, creating Show(), Season() and
Episode()s as required. Called by _getShowData to populate show
Since the nice-to-use tvdb[1][24]['name] interface
makes it impossible to do tvdb[1][24]['name] = "name"
and still be capable of checking if an episode exists
so we can raise tvdb_shownotfound, we have a slightly
less pretty method of setting items.. but since the API
is supposed to be read-only, this is the best way to
do it!
The problem is that calling tvdb[1][24]['episodename'] = "name"
calls __getitem__ on tvdb[1], there is no way to check if
tvdb.__dict__ should have a key "1" before we auto-create it
"""
if sid not in self.shows:
self.shows[sid] = Show()
if seas not in self.shows[sid]:
self.shows[sid][seas] = Season(show=self.shows[sid])
if ep not in self.shows[sid][seas]:
self.shows[sid][seas][ep] = Episode(season=self.shows[sid][seas])
self.shows[sid][seas][ep][attrib] = value
def _setShowData(self, sid, key, value):
"""Sets self.shows[sid] to a new Show instance, or sets the data
"""
if sid not in self.shows:
self.shows[sid] = Show()
self.shows[sid].data[key] = value
def _cleanData(self, data):
"""Cleans up strings returned by TheTVDB.com
Issues corrected:
- Replaces & with &
- Trailing whitespace
"""
data = unicode(data).replace(u"&", u"&")
data = data.strip()
return data
def search(self, series):
"""This searches TheTVDB.com for the series name
and returns the result list
"""
series = series.encode("utf-8")
log().debug("Searching for show %s" % series)
self.config['params_getSeries']['seriesname'] = series
results = self._getetsrc(self.config['url_getSeries'], self.config['params_getSeries'])
if not results:
return
return results.values()[0]
def _getSeries(self, series):
"""This searches TheTVDB.com for the series name,
If a custom_ui UI is configured, it uses this to select the correct
series. If not, and interactive == True, ConsoleUI is used, if not
BaseUI is used to select the first result.
"""
allSeries = self.search(series)
if not allSeries:
log().debug('Series result returned zero')
raise tvdb_shownotfound("Show search returned zero results (cannot find show on TVDB)")
if not isinstance(allSeries, list):
allSeries = [allSeries]
if self.config['custom_ui'] is not None:
log().debug("Using custom UI %s" % (repr(self.config['custom_ui'])))
CustomUI = self.config['custom_ui']
ui = CustomUI(config=self.config)
else:
if not self.config['interactive']:
log().debug('Auto-selecting first search result using BaseUI')
ui = BaseUI(config=self.config)
else:
log().debug('Interactively selecting show using ConsoleUI')
ui = ConsoleUI(config=self.config)
return ui.selectSeries(allSeries)
def _parseBanners(self, sid):
"""Parses banners XML, from
http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/banners.xml
Banners are retrieved using t['show name]['_banners'], for example:
>>> t = Tvdb(banners = True)
>>> t['scrubs']['_banners'].keys()
['fanart', 'poster', 'series', 'season']
>>> t['scrubs']['_banners']['poster']['680x1000']['35308']['_bannerpath']
u'http://thetvdb.com/banners/posters/76156-2.jpg'
>>>
Any key starting with an underscore has been processed (not the raw
data from the XML)
This interface will be improved in future versions.
"""
log().debug('Getting season banners for %s' % (sid))
bannersEt = self._getetsrc(self.config['url_seriesBanner'] % (sid))
if not bannersEt:
log().debug('Banners result returned zero')
return
banners = {}
for cur_banner in bannersEt['banner'] if isinstance(bannersEt['banner'], list) else [bannersEt['banner']]:
bid = cur_banner['id']
btype = cur_banner['bannertype']
btype2 = cur_banner['bannertype2']
if btype is None or btype2 is None:
continue
if not btype in banners:
banners[btype] = {}
if not btype2 in banners[btype]:
banners[btype][btype2] = {}
if not bid in banners[btype][btype2]:
banners[btype][btype2][bid] = {}
for k, v in cur_banner.items():
if k is None or v is None:
continue
k, v = k.lower(), v.lower()
banners[btype][btype2][bid][k] = v
for k, v in banners[btype][btype2][bid].items():
if k.endswith("path"):
new_key = "_%s" % (k)
log().debug("Transforming %s to %s" % (k, new_key))
new_url = self.config['url_artworkPrefix'] % (v)
banners[btype][btype2][bid][new_key] = new_url
self._setShowData(sid, "_banners", banners)
def _parseActors(self, sid):
"""Parsers actors XML, from
http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/actors.xml
Actors are retrieved using t['show name]['_actors'], for example:
>>> t = Tvdb(actors = True)
>>> actors = t['scrubs']['_actors']
>>> type(actors)
<class 'tvdb_api.Actors'>
>>> type(actors[0])
<class 'tvdb_api.Actor'>
>>> actors[0]
<Actor "Zach Braff">
>>> sorted(actors[0].keys())
['id', 'image', 'name', 'role', 'sortorder']
>>> actors[0]['name']
u'Zach Braff'
>>> actors[0]['image']
u'http://thetvdb.com/banners/actors/43640.jpg'
Any key starting with an underscore has been processed (not the raw
data from the XML)
"""
log().debug("Getting actors for %s" % (sid))
actorsEt = self._getetsrc(self.config['url_actorsInfo'] % (sid))
if not actorsEt:
log().debug('Actors result returned zero')
return
cur_actors = Actors()
for cur_actor in actorsEt['actor'] if isinstance(actorsEt['actor'], list) else [actorsEt['actor']]:
curActor = Actor()
for k, v in cur_actor.items():
if k is None or v is None:
continue
k = k.lower()
if k == "image":
v = self.config['url_artworkPrefix'] % (v)
else:
v = self._cleanData(v)
curActor[k] = v
cur_actors.append(curActor)
self._setShowData(sid, '_actors', cur_actors)
def _getShowData(self, sid, language, getEpInfo=False):
"""Takes a series ID, gets the epInfo URL and parses the TVDB
XML file into the shows dict in layout:
shows[series_id][season_number][episode_number]
"""
if self.config['language'] is None:
log().debug('Config language is none, using show language')
if language is None:
raise tvdb_error("config['language'] was None, this should not happen")
getShowInLanguage = language
else:
log().debug(
'Configured language %s override show language of %s' % (
self.config['language'],
language
)
)
getShowInLanguage = self.config['language']
# Parse show information
log().debug('Getting all series data for %s' % (sid))
seriesInfoEt = self._getetsrc(
self.config['url_seriesInfo'] % (sid, getShowInLanguage)
)
if not seriesInfoEt:
log().debug('Series result returned zero')
raise tvdb_error("Series result returned zero")
# get series data
for k, v in seriesInfoEt['series'].items():
if v is not None:
if k in ['banner', 'fanart', 'poster']:
v = self.config['url_artworkPrefix'] % (v)
else:
v = self._cleanData(v)
self._setShowData(sid, k, v)
# get episode data
if getEpInfo:
# Parse banners
if self.config['banners_enabled']:
self._parseBanners(sid)
# Parse actors
if self.config['actors_enabled']:
self._parseActors(sid)
# Parse episode data
log().debug('Getting all episodes of %s' % (sid))
if self.config['useZip']:
url = self.config['url_epInfo_zip'] % (sid, language)
else:
url = self.config['url_epInfo'] % (sid, language)
epsEt = self._getetsrc(url, language=language)
if not epsEt:
log().debug('Series results incomplete')
raise tvdb_showincomplete("Show search returned incomplete results (cannot find complete show on TVDB)")
if 'episode' not in epsEt:
return False
episodes = epsEt["episode"]
if not isinstance(episodes, list):
episodes = [episodes]
for cur_ep in episodes:
if self.config['dvdorder']:
log().debug('Using DVD ordering.')
use_dvd = cur_ep['dvd_season'] != None and cur_ep['dvd_episodenumber'] != None
else:
use_dvd = False
if use_dvd:
seasnum, epno = cur_ep['dvd_season'], cur_ep['dvd_episodenumber']
else:
seasnum, epno = cur_ep['seasonnumber'], cur_ep['episodenumber']
if seasnum is None or epno is None:
log().warning("An episode has incomplete season/episode number (season: %r, episode: %r)" % (
seasnum, epno))
continue # Skip to next episode
# float() is because https://github.com/dbr/tvnamer/issues/95 - should probably be fixed in TVDB data
seas_no = int(float(seasnum))
ep_no = int(float(epno))
for k, v in cur_ep.items():
k = k.lower()
if v is not None:
if k == 'filename':
v = self.config['url_artworkPrefix'] % (v)
else:
v = self._cleanData(v)
self._setItem(sid, seas_no, ep_no, k, v)
return True
def _nameToSid(self, name):
"""Takes show name, returns the correct series ID (if the show has
already been grabbed), or grabs all episodes and returns
the correct SID.
"""
if name in self.corrections:
log().debug('Correcting %s to %s' % (name, self.corrections[name]))
return self.corrections[name]
else:
log().debug('Getting show %s' % (name))
selected_series = self._getSeries(name)
if isinstance(selected_series, dict):
selected_series = [selected_series]
sids = list(int(x['id']) for x in selected_series if
self._getShowData(int(x['id']), self.config['language']))
self.corrections.update(dict((x['seriesname'], int(x['id'])) for x in selected_series))
return sids
def __getitem__(self, key):
"""Handles tvdb_instance['seriesname'] calls.
The dict index should be the show id
"""
if isinstance(key, (int, long)):
# Item is integer, treat as show id
if key not in self.shows:
self._getShowData(key, self.config['language'], True)
return self.shows[key]
key = str(key).lower()
self.config['searchterm'] = key
selected_series = self._getSeries(key)
if isinstance(selected_series, dict):
selected_series = [selected_series]
[[self._setShowData(show['id'], k, v) for k, v in show.items()] for show in selected_series]
return selected_series
def __repr__(self):
return str(self.shows)
def main():
"""Simple example of using tvdb_api - it just
grabs an episode name interactively.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
tvdb_instance = Tvdb(interactive=True, cache=False)
print tvdb_instance['Lost']['seriesname']
print tvdb_instance['Lost'][1][4]['episodename']
if __name__ == '__main__':
main()
|
guangxingli/python-neo | refs/heads/master | examples/read_files.py | 7 | # -*- coding: utf-8 -*-
"""
This is an example for reading files with neo.io
"""
import urllib
import neo
# Plexon files
distantfile = 'https://portal.g-node.org/neo/plexon/File_plexon_3.plx'
localfile = './File_plexon_3.plx'
urllib.urlretrieve(distantfile, localfile)
#create a reader
reader = neo.io.PlexonIO(filename='File_plexon_3.plx')
# read the blocks
blks = reader.read(cascade=True, lazy=False)
print blks
# acces to segments
for blk in blks:
for seg in blk.segments:
print seg
for asig in seg.analogsignals:
print asig
for st in seg.spiketrains:
print st
# CED Spike2 files
distantfile = 'https://portal.g-node.org/neo/spike2/File_spike2_1.smr'
localfile = './File_spike2_1.smr'
urllib.urlretrieve(distantfile, localfile)
#create a reader
reader = neo.io.Spike2IO(filename='File_spike2_1.smr')
# read the block
bl = reader.read(cascade=True, lazy=False)[0]
print bl
# acces to segments
for seg in bl.segments:
print seg
for asig in seg.analogsignals:
print asig
for st in seg.spiketrains:
print st
|
endlessm/chromium-browser | refs/heads/master | v8/tools/sanitizers/sancov_merger_test.py | 25 | # Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import sancov_merger
# Files on disk after test runner completes. The files are mapped by
# executable name -> file list.
FILE_MAP = {
'd8': [
'd8.test.1.1.sancov',
'd8.test.2.1.sancov',
'd8.test.3.1.sancov',
'd8.test.4.1.sancov',
'd8.test.5.1.sancov',
'd8.test.5.2.sancov',
'd8.test.6.1.sancov',
],
'cctest': [
'cctest.test.1.1.sancov',
'cctest.test.2.1.sancov',
'cctest.test.3.1.sancov',
'cctest.test.4.1.sancov',
],
}
# Inputs for merge process with 2 cpus. The tuples contain:
# (flag, path, executable name, intermediate result index, file list).
EXPECTED_INPUTS_2 = [
(False, '/some/path', 'cctest', 0, [
'cctest.test.1.1.sancov',
'cctest.test.2.1.sancov']),
(False, '/some/path', 'cctest', 1, [
'cctest.test.3.1.sancov',
'cctest.test.4.1.sancov']),
(False, '/some/path', 'd8', 0, [
'd8.test.1.1.sancov',
'd8.test.2.1.sancov',
'd8.test.3.1.sancov',
'd8.test.4.1.sancov']),
(False, '/some/path', 'd8', 1, [
'd8.test.5.1.sancov',
'd8.test.5.2.sancov',
'd8.test.6.1.sancov']),
]
# The same for 4 cpus.
EXPECTED_INPUTS_4 = [
(True, '/some/path', 'cctest', 0, [
'cctest.test.1.1.sancov',
'cctest.test.2.1.sancov']),
(True, '/some/path', 'cctest', 1, [
'cctest.test.3.1.sancov',
'cctest.test.4.1.sancov']),
(True, '/some/path', 'd8', 0, [
'd8.test.1.1.sancov',
'd8.test.2.1.sancov']),
(True, '/some/path', 'd8', 1, [
'd8.test.3.1.sancov',
'd8.test.4.1.sancov']),
(True, '/some/path', 'd8', 2, [
'd8.test.5.1.sancov',
'd8.test.5.2.sancov']),
(True, '/some/path', 'd8', 3, [
'd8.test.6.1.sancov'])]
class MergerTests(unittest.TestCase):
def test_generate_inputs_2_cpu(self):
inputs = sancov_merger.generate_inputs(
False, '/some/path', FILE_MAP, 2)
self.assertEquals(EXPECTED_INPUTS_2, inputs)
def test_generate_inputs_4_cpu(self):
inputs = sancov_merger.generate_inputs(
True, '/some/path', FILE_MAP, 4)
self.assertEquals(EXPECTED_INPUTS_4, inputs)
|
stevenewey/django | refs/heads/master | django/conf/locale/tr/formats.py | 504 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'd F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'd F'
SHORT_DATE_FORMAT = 'd M Y'
SHORT_DATETIME_FORMAT = 'd M Y H:i'
FIRST_DAY_OF_WEEK = 1 # Pazartesi
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%y-%m-%d', # '06-10-25'
# '%d %B %Y', '%d %b. %Y', # '25 Ekim 2006', '25 Eki. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
netvvorms/raspberry | refs/heads/master | raspberry/sensors/__init__.py | 2 | __all__ = [ ]
import bmp085
from bmp085 import BMP085
__all__.extend(bmp085.__all__)
|
WikiRealtyInc/django-storages | refs/heads/master | storages/__init__.py | 21 | __version__ = '1.1.8'
|
gritlogic/incubator-airflow | refs/heads/master | tests/operators/subdag_operator.py | 14 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import unittest
import airflow
from airflow.models import DAG, DagBag
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.jobs import BackfillJob
from airflow.exceptions import AirflowException
DEFAULT_DATE = datetime.datetime(2016, 1, 1)
default_args = dict(
owner='airflow',
start_date=DEFAULT_DATE,
)
class SubDagOperatorTests(unittest.TestCase):
def test_subdag_name(self):
"""
Subdag names must be {parent_dag}.{subdag task}
"""
dag = DAG('parent', default_args=default_args)
subdag_good = DAG('parent.test', default_args=default_args)
subdag_bad1 = DAG('parent.bad', default_args=default_args)
subdag_bad2 = DAG('bad.test', default_args=default_args)
subdag_bad3 = DAG('bad.bad', default_args=default_args)
SubDagOperator(task_id='test', dag=dag, subdag=subdag_good)
self.assertRaises(
AirflowException,
SubDagOperator, task_id='test', dag=dag, subdag=subdag_bad1)
self.assertRaises(
AirflowException,
SubDagOperator, task_id='test', dag=dag, subdag=subdag_bad2)
self.assertRaises(
AirflowException,
SubDagOperator, task_id='test', dag=dag, subdag=subdag_bad3)
def test_subdag_pools(self):
"""
Subdags and subdag tasks can't both have a pool with 1 slot
"""
dag = DAG('parent', default_args=default_args)
subdag = DAG('parent.child', default_args=default_args)
session = airflow.settings.Session()
pool_1 = airflow.models.Pool(pool='test_pool_1', slots=1)
pool_10 = airflow.models.Pool(pool='test_pool_10', slots=10)
session.add(pool_1)
session.add(pool_10)
session.commit()
dummy_1 = DummyOperator(task_id='dummy', dag=subdag, pool='test_pool_1')
self.assertRaises(
AirflowException,
SubDagOperator,
task_id='child', dag=dag, subdag=subdag, pool='test_pool_1')
# recreate dag because failed subdagoperator was already added
dag = DAG('parent', default_args=default_args)
SubDagOperator(
task_id='child', dag=dag, subdag=subdag, pool='test_pool_10')
session.delete(pool_1)
session.delete(pool_10)
session.commit()
def test_subdag_deadlock(self):
dagbag = DagBag()
dag = dagbag.get_dag('test_subdag_deadlock')
dag.clear()
subdag = dagbag.get_dag('test_subdag_deadlock.subdag')
subdag.clear()
# first make sure subdag is deadlocked
self.assertRaisesRegexp(AirflowException, 'deadlocked', subdag.run, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
# now make sure dag picks up the subdag error
self.assertRaises(AirflowException, dag.run, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
|
oasiswork/odoo | refs/heads/8.0 | addons/website/tests/__init__.py | 396 | # -*- coding: utf-8 -*-
import test_converter
import test_crawl
import test_ui
import test_views
|
markeTIC/OCB | refs/heads/8.0 | addons/account/wizard/account_state_open.py | 341 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_state_open(osv.osv_memory):
_name = 'account.state.open'
_description = 'Account State Open'
def change_inv_state(self, cr, uid, ids, context=None):
proxy = self.pool.get('account.invoice')
if context is None:
context = {}
active_ids = context.get('active_ids')
if isinstance(active_ids, list):
invoice = proxy.browse(cr, uid, active_ids[0], context=context)
if invoice.reconciled:
raise osv.except_osv(_('Warning!'), _('Invoice is already reconciled.'))
invoice.signal_workflow('open_test')
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
is210-2015-fall-02/is210-week-06-warmup | refs/heads/master | data.py | 19 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides an interface to data-bearing functions."""
import json
import os
BALLETS = ['The Nutcracker',
'Duck Lake',
'Sleeping Beauty',
'Onegin',
'Manon',
'Le Corsair',
'Serenade',
'Agon',
'Apollo',
'Scherehazade',
'Giselle',
'Amadeus',
'Robert Schumann\'s "Davids Bundlertanze"',
'Firebird',
'Concerto Barocco',
'Napoli',
'A Midsummer Night\'s Dream',
'La Bayadere',
'Romeo and Juliet',
'Jewels',
'The Four Temperaments',
'La Valse']
DIRECTIONS = ('North', 'South', 'East', 'Spaghetti Western')
def get_raw_data():
"""Loads our data from file and returns it.
Returns:
list: A list of serialized data.
Examples:
>>> get_raw_data()
[578, 389, ...]
"""
fpath = os.path.dirname(os.path.abspath(__file__))
fpath = os.path.join(fpath, 'data.json')
fhandler = open(fpath, 'r')
data = json.load(fhandler)
fhandler.close()
return data
def get_data_as_list():
"""Returns the stored data as a list object.
Returns:
list: A list of serialized data.
Examples:
>>> get_data_as_list()
[578, 389, ...]
"""
return get_raw_data()
def get_data_as_tuple():
"""Returns the stored data as a tuple.
Returns:
tuple: A tuple of serialized data.
Examples:
>>> get_data_as_tuple()
(578, 389, ...)
"""
return tuple(get_raw_data())
def get_nested_data():
"""Returns the stored data as a nested sequence.
Returns:
list: A list of tuples.
Examples:
>>> get_nested_data()
[(578, 389), ...]
"""
retval = []
tupval = ()
for idx, value in enumerate(get_raw_data()):
if idx % 2:
tupval = (value,)
else:
tupval += (value,)
retval.append(tupval)
return retval
|
40223114/2015cd_midterm | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/unittest/test/test_discovery.py | 785 | import os
import re
import sys
import unittest
class TestableTestProgram(unittest.TestProgram):
module = '__main__'
exit = True
defaultTest = failfast = catchbreak = buffer = None
verbosity = 1
progName = ''
testRunner = testLoader = None
def __init__(self):
pass
class TestDiscovery(unittest.TestCase):
# Heavily mocked tests so I can avoid hitting the filesystem
def test_get_name_from_path(self):
loader = unittest.TestLoader()
loader._top_level_dir = '/foo'
name = loader._get_name_from_path('/foo/bar/baz.py')
self.assertEqual(name, 'bar.baz')
if not __debug__:
# asserts are off
return
with self.assertRaises(AssertionError):
loader._get_name_from_path('/bar/baz.py')
def test_find_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['test1.py', 'test2.py', 'not_a_test.py', 'test_dir',
'test.foo', 'test-not-a-module.py', 'another_dir'],
['test3.py', 'test4.py', ]]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
def isdir(path):
return path.endswith('dir')
os.path.isdir = isdir
self.addCleanup(restore_isdir)
def isfile(path):
# another_dir is not a package and so shouldn't be recursed into
return not path.endswith('dir') and not 'another_dir' in path
os.path.isfile = isfile
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
loader.loadTestsFromModule = lambda module: module + ' tests'
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
expected = [name + ' module tests' for name in
('test1', 'test2')]
expected.extend([('test_dir.%s' % name) + ' module tests' for name in
('test3', 'test4')])
self.assertEqual(suite, expected)
def test_find_tests_with_package(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return 'load_tests'
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
def loadTestsFromModule(module, use_load_tests):
if use_load_tests:
raise self.failureException('use_load_tests should be False for packages')
return module.path + ' module tests'
loader.loadTestsFromModule = loadTestsFromModule
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*'))
# We should have loaded tests from the test_directory package by calling load_tests
# and directly from the test_directory2 package
self.assertEqual(suite,
['load_tests', 'test_directory2' + ' module tests'])
self.assertEqual(Module.paths, ['test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, 'test_directory' + ' module tests', 'test*')])
def test_discover(self):
loader = unittest.TestLoader()
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def restore_isfile():
os.path.isfile = original_isfile
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
orig_sys_path = sys.path[:]
def restore_path():
sys.path[:] = orig_sys_path
self.addCleanup(restore_path)
full_path = os.path.abspath(os.path.normpath('/foo'))
with self.assertRaises(ImportError):
loader.discover('/foo/bar', top_level_dir='/foo')
self.assertEqual(loader._top_level_dir, full_path)
self.assertIn(full_path, sys.path)
os.path.isfile = lambda path: True
os.path.isdir = lambda path: True
def restore_isdir():
os.path.isdir = original_isdir
self.addCleanup(restore_isdir)
_find_tests_args = []
def _find_tests(start_dir, pattern):
_find_tests_args.append((start_dir, pattern))
return ['tests']
loader._find_tests = _find_tests
loader.suiteClass = str
suite = loader.discover('/foo/bar/baz', 'pattern', '/foo/bar')
top_level_dir = os.path.abspath('/foo/bar')
start_dir = os.path.abspath('/foo/bar/baz')
self.assertEqual(suite, "['tests']")
self.assertEqual(loader._top_level_dir, top_level_dir)
self.assertEqual(_find_tests_args, [(start_dir, 'pattern')])
self.assertIn(top_level_dir, sys.path)
def test_discover_with_modules_that_fail_to_import(self):
loader = unittest.TestLoader()
listdir = os.listdir
os.listdir = lambda _: ['test_this_does_not_exist.py']
isfile = os.path.isfile
os.path.isfile = lambda _: True
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
suite = loader.discover('.')
self.assertIn(os.getcwd(), sys.path)
self.assertEqual(suite.countTestCases(), 1)
test = list(list(suite)[0])[0] # extract test from suite
with self.assertRaises(ImportError):
test.test_this_does_not_exist()
def test_command_line_handling_parseArgs(self):
program = TestableTestProgram()
args = []
def do_discovery(argv):
args.extend(argv)
program._do_discovery = do_discovery
program.parseArgs(['something', 'discover'])
self.assertEqual(args, [])
program.parseArgs(['something', 'discover', 'foo', 'bar'])
self.assertEqual(args, ['foo', 'bar'])
def test_command_line_handling_discover_by_default(self):
program = TestableTestProgram()
program.module = None
self.called = False
def do_discovery(argv):
self.called = True
self.assertEqual(argv, [])
program._do_discovery = do_discovery
program.parseArgs(['something'])
self.assertTrue(self.called)
def test_command_line_handling_discover_by_default_with_options(self):
program = TestableTestProgram()
program.module = None
args = ['something', '-v', '-b', '-v', '-c', '-f']
self.called = False
def do_discovery(argv):
self.called = True
self.assertEqual(argv, args[1:])
program._do_discovery = do_discovery
program.parseArgs(args)
self.assertTrue(self.called)
def test_command_line_handling_do_discovery_too_many_arguments(self):
class Stop(Exception):
pass
def usageExit():
raise Stop
program = TestableTestProgram()
program.usageExit = usageExit
with self.assertRaises(Stop):
# too many args
program._do_discovery(['one', 'two', 'three', 'four'])
def test_command_line_handling_do_discovery_calls_loader(self):
program = TestableTestProgram()
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program._do_discovery(['-v'], Loader=Loader)
self.assertEqual(program.verbosity, 2)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['--verbose'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery([], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs', 'ham'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', 'ham')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-s', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-t', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', 'fish')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'fish', None)])
self.assertFalse(program.failfast)
self.assertFalse(program.catchbreak)
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'eggs', '-s', 'fish', '-v', '-f', '-c'],
Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
self.assertEqual(program.verbosity, 2)
self.assertTrue(program.failfast)
self.assertTrue(program.catchbreak)
def test_detect_module_clash(self):
class Module(object):
__file__ = 'bar/foo.py'
sys.modules['foo'] = Module
full_path = os.path.abspath('foo')
original_listdir = os.listdir
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def cleanup():
os.listdir = original_listdir
os.path.isfile = original_isfile
os.path.isdir = original_isdir
del sys.modules['foo']
if full_path in sys.path:
sys.path.remove(full_path)
self.addCleanup(cleanup)
def listdir(_):
return ['foo.py']
def isfile(_):
return True
def isdir(_):
return True
os.listdir = listdir
os.path.isfile = isfile
os.path.isdir = isdir
loader = unittest.TestLoader()
mod_dir = os.path.abspath('bar')
expected_dir = os.path.abspath('foo')
msg = re.escape(r"'foo' module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?" % (mod_dir, expected_dir))
self.assertRaisesRegex(
ImportError, '^%s$' % msg, loader.discover,
start_dir='foo', pattern='foo.py'
)
self.assertEqual(sys.path[0], full_path)
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
if __name__ == '__main__':
unittest.main()
|
rishikksh20/scikit-learn | refs/heads/master | benchmarks/bench_covertype.py | 57 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
# Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
|
mrquim/mrquimrepo | refs/heads/master | script.module.youtube.dl/lib/youtube_dl/extractor/buzzfeed.py | 47 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from .facebook import FacebookIE
class BuzzFeedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?buzzfeed\.com/[^?#]*?/(?P<id>[^?#]+)'
_TESTS = [{
'url': 'http://www.buzzfeed.com/abagg/this-angry-ram-destroys-a-punching-bag-like-a-boss?utm_term=4ldqpia',
'info_dict': {
'id': 'this-angry-ram-destroys-a-punching-bag-like-a-boss',
'title': 'This Angry Ram Destroys A Punching Bag Like A Boss',
'description': 'Rambro!',
},
'playlist': [{
'info_dict': {
'id': 'aVCR29aE_OQ',
'ext': 'mp4',
'title': 'Angry Ram destroys a punching bag..',
'description': 'md5:c59533190ef23fd4458a5e8c8c872345',
'upload_date': '20141024',
'uploader_id': 'Buddhanz1',
'uploader': 'Angry Ram',
}
}]
}, {
'url': 'http://www.buzzfeed.com/sheridanwatson/look-at-this-cute-dog-omg?utm_term=4ldqpia',
'params': {
'skip_download': True, # Got enough YouTube download tests
},
'info_dict': {
'id': 'look-at-this-cute-dog-omg',
'description': 're:Munchkin the Teddy Bear is back ?!',
'title': 'You Need To Stop What You\'re Doing And Watching This Dog Walk On A Treadmill',
},
'playlist': [{
'info_dict': {
'id': 'mVmBL8B-In0',
'ext': 'mp4',
'title': 're:Munchkin the Teddy Bear gets her exercise',
'description': 'md5:28faab95cda6e361bcff06ec12fc21d8',
'upload_date': '20141124',
'uploader_id': 'CindysMunchkin',
'uploader': 're:^Munchkin the',
},
}]
}, {
'url': 'http://www.buzzfeed.com/craigsilverman/the-most-adorable-crash-landing-ever#.eq7pX0BAmK',
'info_dict': {
'id': 'the-most-adorable-crash-landing-ever',
'title': 'Watch This Baby Goose Make The Most Adorable Crash Landing',
'description': 'This gosling knows how to stick a landing.',
},
'playlist': [{
'md5': '763ca415512f91ca62e4621086900a23',
'info_dict': {
'id': '971793786185728',
'ext': 'mp4',
'title': 'We set up crash pads so that the goslings on our roof would have a safe landi...',
'uploader': 'Calgary Outdoor Centre-University of Calgary',
},
}],
'add_ie': ['Facebook'],
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
all_buckets = re.findall(
r'(?s)<div class="video-embed[^"]*"..*?rel:bf_bucket_data=\'([^\']+)\'',
webpage)
entries = []
for bd_json in all_buckets:
bd = json.loads(bd_json)
video = bd.get('video') or bd.get('progload_video')
if not video:
continue
entries.append(self.url_result(video['url']))
facebook_urls = FacebookIE._extract_urls(webpage)
entries.extend([
self.url_result(facebook_url)
for facebook_url in facebook_urls])
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'entries': entries,
}
|
home-assistant/home-assistant | refs/heads/dev | homeassistant/components/verisure/switch.py | 2 | """Support for Verisure Smartplugs."""
from __future__ import annotations
from time import monotonic
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import CONF_GIID, DOMAIN
from .coordinator import VerisureDataUpdateCoordinator
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Verisure alarm control panel from a config entry."""
coordinator: VerisureDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
VerisureSmartplug(coordinator, serial_number)
for serial_number in coordinator.data["smart_plugs"]
)
class VerisureSmartplug(CoordinatorEntity, SwitchEntity):
"""Representation of a Verisure smartplug."""
coordinator: VerisureDataUpdateCoordinator
def __init__(
self, coordinator: VerisureDataUpdateCoordinator, serial_number: str
) -> None:
"""Initialize the Verisure device."""
super().__init__(coordinator)
self._attr_name = coordinator.data["smart_plugs"][serial_number]["area"]
self._attr_unique_id = serial_number
self.serial_number = serial_number
self._change_timestamp = 0
self._state = False
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this entity."""
area = self.coordinator.data["smart_plugs"][self.serial_number]["area"]
return {
"name": area,
"suggested_area": area,
"manufacturer": "Verisure",
"model": "SmartPlug",
"identifiers": {(DOMAIN, self.serial_number)},
"via_device": (DOMAIN, self.coordinator.entry.data[CONF_GIID]),
}
@property
def is_on(self) -> bool:
"""Return true if on."""
if monotonic() - self._change_timestamp < 10:
return self._state
self._state = (
self.coordinator.data["smart_plugs"][self.serial_number]["currentState"]
== "ON"
)
return self._state
@property
def available(self) -> bool:
"""Return True if entity is available."""
return (
super().available
and self.serial_number in self.coordinator.data["smart_plugs"]
)
def turn_on(self, **kwargs) -> None:
"""Set smartplug status on."""
self.coordinator.verisure.set_smartplug_state(self.serial_number, True)
self._state = True
self._change_timestamp = monotonic()
self.schedule_update_ha_state()
def turn_off(self, **kwargs) -> None:
"""Set smartplug status off."""
self.coordinator.verisure.set_smartplug_state(self.serial_number, False)
self._state = False
self._change_timestamp = monotonic()
self.schedule_update_ha_state()
|
ahmadshahwan/cohorte-platforms | refs/heads/master | build/extra/macosx/jpype/_jwrapper.py | 7 | # *****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
import _jpype
from . import _jclass
from ._jpackage import JPackage
def _initialize():
_jpype.setWrapperClass(_JWrapper)
_jpype.setStringWrapperClass(JString)
class _JWrapper(object):
def __init__(self, v):
if v is not None:
self._value = _jpype.convertToJValue(self.typeName, v)
else:
self._value = None
class JByte(_JWrapper):
typeName = "byte"
class JInt(_JWrapper):
typeName = "int"
class JLong(_JWrapper):
typeName = "long"
class JFloat(_JWrapper):
typeName = "float"
class JDouble(_JWrapper):
typeName = "double"
class JChar(_JWrapper):
typeName = "char"
class JBoolean(_JWrapper):
typeName = "boolean"
class JString(_JWrapper):
typeName = "java.lang.String"
def _getDefaultTypeName(obj):
if obj is True or obj is False:
return 'java.lang.Boolean'
if isinstance(obj, str):
return "java.lang.String"
if isinstance(obj, int):
return "java.lang.Integer"
if isinstance(obj, float):
return "java.lang.Double"
if isinstance(obj, _jclass._JavaClass):
return obj.__javaclassname__
if isinstance(obj, JPackage("java").lang.Class):
return obj.__class__.__javaclass__.getName()
if isinstance(obj, _JWrapper):
return obj.typeName
raise JPackage("java").lang.RuntimeException(
"Unable to determine the default type of {0}".format(obj.__class__))
class JObject(_JWrapper):
def __init__(self, v, tp=None):
if tp is None:
tp = _getDefaultTypeName(v)
if isinstance(tp, _jclass._JavaClass):
tp = tp.__javaclass__.getName()
self.typeName = tp
self._value = _jpype.convertToJValue(tp, v)
|
charulagrl/partyrocker | refs/heads/master | application/favoritetweet.py | 2 | from twitter import *
from application.models import User, Hashtag
import sys
class TwitterFav():
def __init__(self):
self.t = None
def get_outh(self, user):
OAUTH_TOKEN = user.auth_token
OAUTH_SECRET = user.auth_secret
CONSUMER_KEY = user.consumer_key
CONSUMER_SECRET = user.consumer_secret
#print OAUTH_SECRET, OAUTH_TOKEN
self.t = Twitter(auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET, CONSUMER_KEY, CONSUMER_SECRET))
if self.t is not None:
return True
else:
return False
def query(self):
userList = User.query.filter_by(job_status=True).all()
return userList
def get_hashtag(self, user_id):
tagList = Hashtag.query.filter_by(id=user_id).all()
return tagList
def favoriteTweets(self):
userList = self.query()
for user in userList:
if self.get_outh(user):
tagList = self.get_hashtag(user.id)
if len(tagList) < 19:
for tag in tagList:
self.search_and_fav(tag.tag, 10)
def search_tweets(self, q, count=100, max_id=None):
return self.t.search.tweets(q=q, result_type='recent', count=count, lang="en", max_id=max_id)
def favorites_create(self, tweet):
try:
result = self.t.favorites.create(_id=tweet['id'])
print "Favorited"
return result
except TwitterHTTPError as e:
print "Error: ", e
return None
def search_and_fav(self, q, count=100, max_id=None):
result = self.search_tweets(q, count, max_id)
first_id = result['statuses'][0]['id']
last_id = result['statuses'][-1]['id']
success = 0
for tweet in result['statuses']:
if self.favorites_create(tweet) is not None:
success += 1
print "Favorited total: %i of %i" % (success, len(result['statuses']))
print "First id %s last id %s" % (first_id, last_id)
def search_and_unfav(self, username=None):
results = self.t.favorites.list(screen_name=username, count=10, result_type='recent')
if results:
for r in results:
self.t.destroy_favorite(r.id)
print "All the recent tweets unfavorited"
else:
print "No tweets for unfavoriting"
def unfavoriteTweets(self):
userList = self.query()
for user in userList:
if self.get_outh(user):
search_and_unfav(user.twitter_handle)
def auto_rt(self, q, count=100, result_type="recent"):
"""
Retweets tweets that match a certain phrase (hashtag, word, etc.)
"""
result = self.search_tweets(q, count, result_type)
for tweet in result["statuses"]:
try:
# don't retweet your own tweets
if tweet["user"]["screen_name"] == TWITTER_HANDLE:
continue
result = t.statuses.retweet(id=tweet["id"])
print("retweeted: %s" % (result["text"].encode("utf-8")))
# when you have already retweeted a tweet, this error is thrown
except TwitterHTTPError as e:
print("error: %s" % (str(e)))
def auto_follow(self, q, count=100, result_type="recent"):
"""
Follows anyone who tweets about a specific phrase (hashtag, word, etc.)
"""
result = self.search_tweets(q, count, result_type)
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
# make sure the "already followed" file exists
if not os.path.isfile(ALREADY_FOLLOWED_FILE):
with open(ALREADY_FOLLOWED_FILE, "w") as out_file:
out_file.write("")
# read in the list of user IDs that the bot has already followed in the
# past
do_not_follow = set()
dnf_list = []
with open(ALREADY_FOLLOWED_FILE) as in_file:
for line in in_file:
dnf_list.append(int(line))
do_not_follow.update(set(dnf_list))
del dnf_list
for tweet in result["statuses"]:
try:
if (tweet["user"]["screen_name"] != TWITTER_HANDLE and
tweet["user"]["id"] not in following and
tweet["user"]["id"] not in do_not_follow):
t.friendships.create(user_id=tweet["user"]["id"], follow=True)
following.update(set([tweet["user"]["id"]]))
print("followed %s" % (tweet["user"]["screen_name"]))
except TwitterHTTPError as e:
print("error: %s" % (str(e)))
# quit on error unless it's because someone blocked me
if "blocked" not in str(e).lower():
quit()
def auto_follow_followers(self):
"""
Follows back everyone who's followed you
"""
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)["ids"])
not_following_back = followers - following
for user_id in not_following_back:
try:
t.friendships.create(user_id=user_id, follow=True)
except Exception as e:
print("error: %s" % (str(e)))
def auto_unfollow_nonfollowers(self):
"""
Unfollows everyone who hasn't followed you back
"""
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)["ids"])
# put user IDs here that you want to keep following even if they don't
# follow you back
users_keep_following = set([])
not_following_back = following - followers
# make sure the "already followed" file exists
if not os.path.isfile(ALREADY_FOLLOWED_FILE):
with open(ALREADY_FOLLOWED_FILE, "w") as out_file:
out_file.write("")
# update the "already followed" file with users who didn't follow back
already_followed = set(not_following_back)
af_list = []
with open(ALREADY_FOLLOWED_FILE) as in_file:
for line in in_file:
af_list.append(int(line))
already_followed.update(set(af_list))
del af_list
with open(ALREADY_FOLLOWED_FILE, "w") as out_file:
for val in already_followed:
out_file.write(str(val) + "\n")
for user_id in not_following_back:
if user_id not in users_keep_following:
t.friendships.destroy(user_id=user_id)
print("unfollowed %d" % (user_id))
|
weidongxu84/info-gatherer | refs/heads/master | django/conf/locale/ko/formats.py | 313 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y년 n월 j일'
TIME_FORMAT = 'A g:i:s'
DATETIME_FORMAT = 'Y년 n월 j일 g:i:s A'
YEAR_MONTH_FORMAT = 'Y년 F월'
MONTH_DAY_FORMAT = 'F월 j일'
SHORT_DATE_FORMAT = 'Y-n-j.'
SHORT_DATETIME_FORMAT = 'Y-n-j H:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
'%Y년 %m월 %d일', # '2006년 10월 25일', with localized suffix.
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
'%H시 %M분 %S초', # '14시 30분 59초'
'%H시 %M분', # '14시 30분'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
'%Y년 %m월 %d일 %H시 %M분 %S초', # '2006년 10월 25일 14시 30분 59초'
'%Y년 %m월 %d일 %H시 %M분', # '2006년 10월 25일 14시 30분'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
fengzhyuan/Halide | refs/heads/master | python_bindings/tutorial/lesson_13_tuples.py | 13 | #!/usr/bin/python3
# Halide tutorial lesson 13: Tuples
# This lesson describes how to write Funcs that evaluate to multiple
# values.
# On linux, you can compile and run it like so:
# g++ lesson_13*.cpp -g -I ../include -L ../bin -lHalide -lpthread -ldl -o lesson_13 -std=c++11
# LD_LIBRARY_PATH=../bin ./lesson_13
# On os x:
# g++ lesson_13*.cpp -g -I ../include -L ../bin -lHalide -o lesson_13 -std=c++11
# DYLD_LIBRARY_PATH=../bin ./lesson_13
# If you have the entire Halide source tree, you can also build it by
# running:
# make tutorial_lesson_13_tuples
# in a shell with the current directory at the top of the halide
# source tree.
#include "Halide.h"
#include <stdio.h>
#include <algorithm>
#using namespace Halide
from halide import *
import numpy
import math
min_, max_ = __builtins__.min, __builtins__.max
def main():
# So far Funcs (such as the one below) have evaluated to a single
# scalar value for each point in their domain.
single_valued = Func()
x, y = Var("x"), Var("y")
single_valued[x, y] = x + y
# One way to write a Func that returns a collection of values is
# to add an additional dimension which indexes that
# collection. This is how we typically deal with color. For
# example, the Func below represents a collection of three values
# for every x, y coordinate indexed by c.
color_image = Func()
c = Var("c")
color_image[x, y, c] = select(c == 0, 245, # Red value
c == 1, 42, # Green value
132) # Blue value
# This method is often convenient because it makes it easy to
# operate on this Func in a way that treats each item in the
# collection equally:
brighter = Func()
brighter[x, y, c] = color_image[x, y, c] + 10
# However this method is also inconvenient for three reasons.
#
# 1) Funcs are defined over an infinite domain, so users of this
# Func can for example access color_image(x, y, -17), which is
# not a meaningful value and is probably indicative of a bug.
#
# 2) It requires a select, which can impact performance if not
# bounded and unrolled:
# brighter.bound(c, 0, 3).unroll(c)
#
# 3) With this method, all values in the collection must have the
# same type. While the above two issues are merely inconvenient,
# this one is a hard limitation that makes it impossible to
# express certain things in this way.
# It is also possible to represent a collection of values as a
# collection of Funcs:
func_array = [Func() for i in range(3)]
func_array[0][x, y] = x + y
func_array[1][x, y] = sin(x)
func_array[2][x, y] = cos(y)
# This method avoids the three problems above, but introduces a
# new annoyance. Because these are separate Funcs, it is
# difficult to schedule them so that they are all computed
# together inside a single loop over x, y.
# A third alternative is to define a Func as evaluating to a
# Tuple instead of an Expr. A Tuple is a fixed-size collection of
# Exprs which may have different type. The following function
# evaluates to an integer value (x+y), and a floating point value
# (sin(x*y)).
multi_valued = Func("multi_valued")
multi_valued[x, y] = Tuple(x + y, sin(x * y))
# Realizing a tuple-valued Func returns a collection of
# Buffers. We call this a Realization. It's equivalent to a
# std::vector of Buffer/Image objects:
if True:
r = multi_valued.realize(80, 60)
assert r.size() == 2
im1 = Image(Int(32), r[0])
im2 = Image(Float(32), r[1])
assert type(im1) is Image_int32
assert type(im2) is Image_float32
assert im1(30, 40) == 30 + 40
assert numpy.isclose(im2(30, 40), math.sin(30 * 40))
# All Tuple elements are evaluated together over the same domain
# in the same loop nest, but stored in distinct allocations. The
# equivalent C++ code to the above is:
if True:
multi_valued_0 = numpy.empty((80*60), dtype=numpy.int32)
multi_valued_1 = numpy.empty((80*60), dtype=numpy.int32)
for yy in range(80):
for xx in range(60):
multi_valued_0[xx + 60*yy] = xx + yy
multi_valued_1[xx + 60*yy] = math.sin(xx*yy)
# When compiling ahead-of-time, a Tuple-valued Func evaluates
# into multiple distinct output buffer_t structs. These appear in
# order at the end of the function signature:
# int multi_valued(...input buffers and params..., buffer_t *output_1, buffer_t *output_2)
# You can construct a Tuple by passing multiple Exprs to the
# Tuple constructor as we did above. Perhaps more elegantly, you
# can also take advantage of C++11 initializer lists and just
# enclose your Exprs in braces:
multi_valued_2 = Func("multi_valued_2")
#multi_valued_2(x, y) = {x + y, sin(x*y)}
multi_valued_2[x, y] = Tuple(x + y, sin(x * y))
# Calls to a multi-valued Func cannot be treated as Exprs. The
# following is a syntax error:
# Func consumer
# consumer[x, y] = multi_valued_2[x, y] + 10
# Instead you must index a Tuple with square brackets to retrieve
# the individual Exprs:
integer_part = multi_valued_2[x, y][0]
floating_part = multi_valued_2[x, y][1]
assert type(integer_part) is Expr
assert type(floating_part) is Expr
consumer = Func()
consumer[x, y] = Tuple(integer_part + 10, floating_part + 10.0)
# Tuple reductions.
if True:
# Tuples are particularly useful in reductions, as they allow
# the reduction to maintain complex state as it walks along
# its domain. The simplest example is an argmax.
# First we create an Image to take the argmax over.
input_func = Func()
input_func[x] = sin(x)
input = Image(Float(32), input_func.realize(100))
assert type(input) is Image_float32
# Then we defined a 2-valued Tuple which tracks the maximum value
# its index.
arg_max = Func()
# Pure definition.
#arg_max() = Tuple(0, input(0))
# (using [None] is a convention of this python interface)
arg_max[None] = Tuple(0, input(0))
# Update definition.
r = RDom(1, 99)
old_index = arg_max[None][0]
old_max = arg_max[None][1]
new_index = select(old_max > input[r], r, old_index)
new_max = max(input[r], old_max)
arg_max[None] = Tuple(new_index, new_max)
# The equivalent C++ is:
arg_max_0 = 0
arg_max_1 = float(input(0))
for r in range(1, 100):
old_index = arg_max_0
old_max = arg_max_1
new_index = r if (old_max > input(r)) else old_index
new_max = max_(input(r), old_max)
# In a tuple update definition, all loads and computation
# are done before any stores, so that all Tuple elements
# are updated atomically with respect to recursive calls
# to the same Func.
arg_max_0 = new_index
arg_max_1 = new_max
# Let's verify that the Halide and C++ found the same maximum
# value and index.
if True:
r = arg_max.realize()
r0 = Image(Int(32), r[0])
r1 = Image(Float(32), r[1])
assert type(r0) is Image_int32
assert type(r1) is Image_float32
assert arg_max_0 == r0(0)
assert numpy.isclose(arg_max_1, r1(0))
# Halide provides argmax and argmin as built-in reductions
# similar to sum, product, maximum, and minimum. They return
# a Tuple consisting of the point in the reduction domain
# corresponding to that value, and the value itself. In the
# case of ties they return the first value found. We'll use
# one of these in the following section.
# Tuples for user-defined types.
if True:
# Tuples can also be a convenient way to represent compound
# objects such as complex numbers. Defining an object that
# can be converted to and from a Tuple is one way to extend
# Halide's type system with user-defined types.
class Complex:
#Expr real, imag
# Construct from a Tuple
#Complex(Tuple t) : real(t[0]), imag(t[1])
def __init__(self, r, i=None):
if type(r) is Tuple:
t = r
self.real = t[0]
self.imag = t[1]
elif type(r) is float and type(i) is float:
self.real = Expr(r)
self.imag = Expr(i)
elif i is not None:
self.real = r
self.imag = i
else:
tt = Tuple(r)
self.real = tt[0]
self.imag = tt[1]
assert type(self.real) in [Expr, FuncRefExpr]
assert type(self.imag) in [Expr, FuncRefExpr]
return
def as_tuple(self):
"Convert to a Tuple"
return Tuple(self.real, self.imag)
def __add__(self, other):
"Complex addition"
return Tuple(self.real + other.real, self.imag + other.imag)
def __mul__(self, other):
"Complex multiplication"
return Tuple(self.real * other.real - self.imag * other.imag,
self.real * other.imag + self.imag * other.real)
def magnitude(self):
"Complex magnitude"
return (self.real * self.real) + (self.imag * self.imag)
# Other complex operators would go here. The above are
# sufficient for this example.
# Let's use the Complex struct to compute a Mandelbrot set.
mandelbrot = Func()
# The initial complex value corresponding to an x, y coordinate
# in our Func.
initial = Complex(x/15.0 - 2.5, y/6.0 - 2.0)
# Pure definition.
t = Var("t")
mandelbrot[x, y, t] = Complex(0.0, 0.0).as_tuple()
# We'll use an update definition to take 12 steps.
r=RDom(1, 12)
current = Complex(mandelbrot[x, y, r-1])
# The following line uses the complex multiplication and
# addition we defined above.
mandelbrot[x, y, r] = (Complex(current*current) + initial)
# We'll use another tuple reduction to compute the iteration
# number where the value first escapes a circle of radius 4.
# This can be expressed as an argmin of a boolean - we want
# the index of the first time the given boolean expression is
# false (we consider false to be less than true). The argmax
# would return the index of the first time the expression is
# true.
escape_condition = Complex(mandelbrot[x, y, r]).magnitude() < 16.0
first_escape = argmin(escape_condition)
assert type(first_escape) is Tuple
# We only want the index, not the value, but argmin returns
# both, so we'll index the argmin Tuple expression using
# square brackets to get the Expr representing the index.
escape = Func()
escape[x, y] = first_escape[0]
# Realize the pipeline and print the result as ascii art.
result = Image(Int(32), escape.realize(61, 25))
assert type(result) is Image_int32
code = " .:-~*={&%#@"
for yy in range(result.height()):
for xx in range(result.width()):
index = result(xx, yy)
if index < len(code):
print("%c" % code[index], end="")
else:
pass # is lesson 13 cpp version buggy ?
print("\n")
print("Success!")
return 0
if __name__ == "__main__":
main()
|
enapps/enapps-openerp-server | refs/heads/master | openerp/addons/base/res/res_users.py | 1 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2011 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from functools import partial
import pytz
import netsvc
import pooler
import tools
from osv import fields,osv
from osv.orm import browse_record
from service import security
from tools.translate import _
import openerp
import openerp.exceptions
from lxml import etree
from lxml.builder import E
import base64
from base64 import decodestring
import cStringIO
import PIL
from PIL import Image
import re
try:
import cups
except ImportError:
cups = None
def get_printer_list():
if cups:
conn = cups.Connection()
printers = conn.getPrinters()
result = [(k, k) for k in printers.keys()]
return result
else:
return [('none', "Please install cups and python-cups packages")]
_logger = logging.getLogger(__name__)
class groups(osv.osv):
_name = "res.groups"
_description = "Access Groups"
_rec_name = 'full_name'
def _get_full_name(self, cr, uid, ids, field, arg, context=None):
res = {}
for g in self.browse(cr, uid, ids, context):
if g.category_id:
res[g.id] = '%s / %s' % (g.category_id.name, g.name)
else:
res[g.id] = g.name
return res
_columns = {
'name': fields.char('Name', size=64, required=True, ),
'users': fields.many2many('res.users', 'res_groups_users_rel', 'gid', 'uid', 'Users'),
'model_access': fields.one2many('ir.model.access', 'group_id', 'Access Controls'),
'rule_groups': fields.many2many('ir.rule', 'rule_group_rel',
'group_id', 'rule_group_id', 'Rules', domain=[('global', '=', False)]),
'menu_access': fields.many2many('ir.ui.menu', 'ir_ui_menu_group_rel', 'gid', 'menu_id', 'Access Menu'),
'comment' : fields.text('Comment', size=250, ),
'category_id': fields.many2one('ir.module.category', 'Application', select=True),
'full_name': fields.function(_get_full_name, type='char', string='Group Name'),
}
_sql_constraints = [
('name_uniq', 'unique (category_id, name)', 'The name of the group must be unique !')
]
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
# add explicit ordering if search is sorted on full_name
if order and order.startswith('full_name'):
ids = super(groups, self).search(cr, uid, args, context=context)
gs = self.browse(cr, uid, ids, context)
gs.sort(key=lambda g: g.full_name, reverse=order.endswith('DESC'))
gs = gs[offset:offset+limit] if limit else gs[offset:]
return map(int, gs)
return super(groups, self).search(cr, uid, args, offset, limit, order, context, count)
def copy(self, cr, uid, id, default=None, context=None):
group_name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update({'name': _('%s (copy)')%group_name})
return super(groups, self).copy(cr, uid, id, default, context)
def write(self, cr, uid, ids, vals, context=None):
if 'name' in vals:
if vals['name'].startswith('-'):
raise osv.except_osv(_('Error'),
_('The name of the group can not start with "-"'))
res = super(groups, self).write(cr, uid, ids, vals, context=context)
self.pool.get('ir.model.access').call_cache_clearing_methods(cr)
return res
def create(self, cr, uid, vals, context=None):
if 'name' in vals:
if vals['name'].startswith('-'):
raise osv.except_osv(_('Error'),
_('The name of the group can not start with "-"'))
gid = super(groups, self).create(cr, uid, vals, context=context)
if context and context.get('noadmin', False):
pass
else:
# assign this new group to user_root
user_obj = self.pool.get('res.users')
aid = user_obj.browse(cr, 1, user_obj._get_admin_id(cr))
if aid:
aid.write({'groups_id': [(4, gid)]})
return gid
def unlink(self, cr, uid, ids, context=None):
group_users = []
for record in self.read(cr, uid, ids, ['users'], context=context):
if record['users']:
group_users.extend(record['users'])
if group_users:
user_names = [user.name for user in self.pool.get('res.users').browse(cr, uid, group_users, context=context)]
user_names = list(set(user_names))
if len(user_names) >= 5:
user_names = user_names[:5] + ['...']
raise osv.except_osv(_('Warning !'),
_('Group(s) cannot be deleted, because some user(s) still belong to them: %s !') % \
', '.join(user_names))
return super(groups, self).unlink(cr, uid, ids, context=context)
def get_extended_interface_group(self, cr, uid, context=None):
data_obj = self.pool.get('ir.model.data')
extended_group_data_id = data_obj._get_id(cr, uid, 'base', 'group_extended')
return data_obj.browse(cr, uid, extended_group_data_id, context=context).res_id
groups()
def _lang_get(self, cr, uid, context=None):
obj = self.pool.get('res.lang')
ids = obj.search(cr, uid, [('translatable','=',True)])
res = obj.read(cr, uid, ids, ['code', 'name'], context=context)
res = [(r['code'], r['name']) for r in res]
return res
def _tz_get(self,cr,uid, context=None):
return [(x, x) for x in pytz.all_timezones]
class users(osv.osv):
__admin_ids = {}
_uid_cache = {}
_name = "res.users"
_order = 'name'
WELCOME_MAIL_SUBJECT = u"Welcome to OpenERP"
WELCOME_MAIL_BODY = u"An OpenERP account has been created for you, "\
"\"%(name)s\".\n\nYour login is %(login)s, "\
"you should ask your supervisor or system administrator if you "\
"haven't been given your password yet.\n\n"\
"If you aren't %(name)s, this email reached you errorneously, "\
"please delete it."
def get_welcome_mail_subject(self, cr, uid, context=None):
""" Returns the subject of the mail new users receive (when
created via the res.config.users wizard), default implementation
is to return config_users.WELCOME_MAIL_SUBJECT
"""
return self.WELCOME_MAIL_SUBJECT
def get_welcome_mail_body(self, cr, uid, context=None):
""" Returns the subject of the mail new users receive (when
created via the res.config.users wizard), default implementation
is to return config_users.WELCOME_MAIL_BODY
"""
return self.WELCOME_MAIL_BODY
def get_current_company(self, cr, uid):
cr.execute('select company_id, res_company.name from res_users left join res_company on res_company.id = company_id where res_users.id=%s' %uid)
return cr.fetchall()
def send_welcome_email(self, cr, uid, id, context=None):
if isinstance(id,list): id = id[0]
user = self.read(cr, uid, id, ['email','login','name', 'user_email'], context=context)
email = user['email'] or user['user_email']
ir_mail_server = self.pool.get('ir.mail_server')
msg = ir_mail_server.build_email(email_from=None, # take config default
email_to=[email],
subject=self.get_welcome_mail_subject(cr, uid, context=context),
body=(self.get_welcome_mail_body(cr, uid, context=context) % user))
return ir_mail_server.send_email(cr, uid, msg, context=context)
def _set_interface_type(self, cr, uid, ids, name, value, arg, context=None):
"""Implementation of 'view' function field setter, sets the type of interface of the users.
@param name: Name of the field
@param arg: User defined argument
@param value: new value returned
@return: True/False
"""
if not value or value not in ['simple','extended']:
return False
group_obj = self.pool.get('res.groups')
extended_group_id = group_obj.get_extended_interface_group(cr, uid, context=context)
# First always remove the users from the group (avoids duplication if called twice)
self.write(cr, uid, ids, {'groups_id': [(3, extended_group_id)]}, context=context)
# Then add them back if requested
if value == 'extended':
self.write(cr, uid, ids, {'groups_id': [(4, extended_group_id)]}, context=context)
return True
def _get_interface_type(self, cr, uid, ids, name, args, context=None):
"""Implementation of 'view' function field getter, returns the type of interface of the users.
@param field_name: Name of the field
@param arg: User defined argument
@return: Dictionary of values
"""
group_obj = self.pool.get('res.groups')
extended_group_id = group_obj.get_extended_interface_group(cr, uid, context=context)
extended_users = group_obj.read(cr, uid, extended_group_id, ['users'], context=context)['users']
return dict(zip(ids, ['extended' if user in extended_users else 'simple' for user in ids]))
def _set_new_password(self, cr, uid, id, name, value, args, context=None):
if value is False:
# Do not update the password if no value is provided, ignore silently.
# For example web client submits False values for all empty fields.
return
if uid == id:
# To change their own password users must use the client-specific change password wizard,
# so that the new password is immediately used for further RPC requests, otherwise the user
# will face unexpected 'Access Denied' exceptions.
raise osv.except_osv(_('Operation Canceled'), _('Please use the change password wizard (in User Preferences or User menu) to change your own password.'))
self.write(cr, uid, id, {'password': value})
def _get_password(self, cr, uid, ids, arg, karg, context=None):
return dict.fromkeys(ids, '')
_columns = {
'id': fields.integer('ID'),
'name': fields.char('User Name', size=64, required=True, select=True,
help="The new user's real name, used for searching"
" and most listings"),
'login': fields.char('Login', size=64, required=True,
help="Used to log into the system"),
'password': fields.char('Password', size=64, invisible=True, help="Keep empty if you don't want the user to be able to connect on the system."),
'new_password': fields.function(_get_password, type='char', size=64,
fnct_inv=_set_new_password,
string='Set password', help="Specify a value only when creating a user or if you're changing the user's password, "
"otherwise leave empty. After a change of password, the user has to login again."),
'user_email': fields.char('Email', size=64),
'signature': fields.text('Signature'),
'active': fields.boolean('Active'),
'action_id': fields.many2one('ir.actions.actions', 'Home Action', help="If specified, this action will be opened at logon for this user, in addition to the standard menu."),
'menu_id': fields.many2one('ir.actions.actions', 'Menu Action', help="If specified, the action will replace the standard menu for this user."),
'groups_id': fields.many2many('res.groups', 'res_groups_users_rel', 'uid', 'gid', 'Groups'),
# Special behavior for this field: res.company.search() will only return the companies
# available to the current user (should be the user's companies?), when the user_preference
# context is set.
'company_id': fields.many2one('res.company', 'Company', required=True,
help="The company this user is currently working for.", context={'user_preference': True}),
'company_ids':fields.many2many('res.company','res_company_users_rel','user_id','cid','Companies'),
'context_lang': fields.selection(_lang_get, 'Language', required=True,
help="The default language used in the graphical user interface, when translations are available. To add a new language, you can use the 'Load an Official Translation' wizard available from the 'Administration' menu."),
'context_tz': fields.selection(_tz_get, 'Timezone', size=64,
help="The user's timezone, used to output proper date and time values inside printed reports. "
"It is important to set a value for this field. You should use the same timezone "
"that is otherwise used to pick and render date and time values: your computer's timezone."),
'view': fields.function(_get_interface_type, type='selection', fnct_inv=_set_interface_type,
selection=[('simple','Simplified'),('extended','Extended')],
string='Interface', help="OpenERP offers a simplified and an extended user interface. If you use OpenERP for the first time we strongly advise you to select the simplified interface, which has less features but is easier to use. You can switch to the other interface from the User/Preferences menu at any time."),
'menu_tips': fields.boolean('Menu Tips', help="Check out this box if you want to always display tips on each menu action"),
'date': fields.datetime('Latest Connection', readonly=True),
'avatar': fields.binary('Image', help='', readonly=False),
'printer': fields.selection(get_printer_list(), 'Default Printer', help='Default Printer'),
}
def on_change_company_id(self, cr, uid, ids, company_id):
return {
'warning' : {
'title': _("Company Switch Warning"),
'message': _("Please keep in mind that documents currently displayed may not be relevant after switching to another company. If you have unsaved changes, please make sure to save and close all forms before switching to a different company. (You can click on Cancel in the User Preferences now)"),
}
}
def read(self,cr, uid, ids, fields=None, context=None, load='_classic_read'):
def override_password(o):
if 'password' in o and ( 'id' not in o or o['id'] != uid ):
o['password'] = '********'
return o
result = super(users, self).read(cr, uid, ids, fields, context, load)
canwrite = self.pool.get('ir.model.access').check(cr, uid, 'res.users', 'write', False)
if not canwrite:
if isinstance(ids, (int, float)):
result = override_password(result)
else:
result = map(override_password, result)
return result
def _check_company(self, cr, uid, ids, context=None):
return all(((this.company_id in this.company_ids) or not this.company_ids) for this in self.browse(cr, uid, ids, context))
_constraints = [
(_check_company, 'The chosen company is not in the allowed companies for this user', ['company_id', 'company_ids']),
]
_sql_constraints = [
('login_key', 'UNIQUE (login)', 'You can not have two users with the same login !')
]
def _get_email_from(self, cr, uid, ids, context=None):
if not isinstance(ids, list):
ids = [ids]
res = dict.fromkeys(ids, False)
for user in self.browse(cr, uid, ids, context=context):
if user.user_email:
res[user.id] = "%s <%s>" % (user.name, user.user_email)
return res
def _get_admin_id(self, cr):
if self.__admin_ids.get(cr.dbname) is None:
ir_model_data_obj = self.pool.get('ir.model.data')
mdid = ir_model_data_obj._get_id(cr, 1, 'base', 'user_root')
self.__admin_ids[cr.dbname] = ir_model_data_obj.read(cr, 1, [mdid], ['res_id'])[0]['res_id']
return self.__admin_ids[cr.dbname]
def _get_company(self,cr, uid, context=None, uid2=False):
if not uid2:
uid2 = uid
user = self.pool.get('res.users').read(cr, uid, uid2, ['company_id'], context)
company_id = user.get('company_id', False)
return company_id and company_id[0] or False
def _get_companies(self, cr, uid, context=None):
c = self._get_company(cr, uid, context)
if c:
return [c]
return False
def _get_menu(self,cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
try:
model, res_id = dataobj.get_object_reference(cr, uid, 'base', 'action_menu_admin')
if model != 'ir.actions.act_window':
return False
return res_id
except ValueError:
return False
def _get_group(self,cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
result = []
try:
dummy,group_id = dataobj.get_object_reference(cr, 1, 'base', 'group_user')
result.append(group_id)
dummy,group_id = dataobj.get_object_reference(cr, 1, 'base', 'group_partner_manager')
result.append(group_id)
except ValueError:
# If these groups does not exists anymore
pass
return result
_defaults = {
'password' : '',
'context_lang': 'en_GB',
'active' : True,
'menu_id': _get_menu,
'company_id': _get_company,
'company_ids': _get_companies,
'groups_id': _get_group,
'menu_tips': False
}
# User can write to a few of her own fields (but not her groups for example)
SELF_WRITEABLE_FIELDS = ['menu_tips','view', 'password', 'signature', 'action_id', 'company_id', 'user_email', 'name']
def write(self, cr, uid, ids, values, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
if ids == [uid]:
for key in values.keys():
if not (key in self.SELF_WRITEABLE_FIELDS or key.startswith('context_')):
break
else:
if 'company_id' in values:
if not (values['company_id'] in self.read(cr, 1, uid, ['company_ids'], context=context)['company_ids']):
del values['company_id']
uid = 1 # safe fields only, so we write as super-user to bypass access rights
if 'avatar' in values and values.get('avatar') is not False:
logo = cStringIO.StringIO()
img = Image.open(cStringIO.StringIO(base64.decodestring(values.get('avatar'))))
img.thumbnail((64,64), Image.ANTIALIAS)
img.save(logo, format='PNG')
img = base64.encodestring(logo.getvalue())
values['avatar'] = img
res = super(users, self).write(cr, uid, ids, values, context=context)
# clear caches linked to the users
self.pool.get('ir.model.access').call_cache_clearing_methods(cr)
clear = partial(self.pool.get('ir.rule').clear_cache, cr)
map(clear, ids)
db = cr.dbname
if db in self._uid_cache:
for id in ids:
if id in self._uid_cache[db]:
del self._uid_cache[db][id]
return res
def unlink(self, cr, uid, ids, context=None):
if 1 in ids:
raise osv.except_osv(_('Can not remove root user!'), _('You can not remove the admin user as it is used internally for resources created by OpenERP (updates, module installation, ...)'))
db = cr.dbname
if db in self._uid_cache:
for id in ids:
if id in self._uid_cache[db]:
del self._uid_cache[db][id]
return super(users, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if not context:
context = {}
ids = []
if name:
ids = self.search(cr, user, [('login', '=', name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('name', operator, name)] + args, limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
user2copy = self.read(cr, uid, [id], ['login','name'])[0]
if default is None:
default = {}
copy_pattern = _("%s (copy)")
copydef = dict(login=(copy_pattern % user2copy['login']),
name=(copy_pattern % user2copy['name']),
)
copydef.update(default)
return super(users, self).copy(cr, uid, id, copydef, context)
def context_get(self, cr, uid, context=None):
user = self.browse(cr, uid, uid, context)
result = {}
for k in self._columns.keys():
if k.startswith('context_'):
res = getattr(user,k) or False
if isinstance(res, browse_record):
res = res.id
result[k[8:]] = res or False
return result
def action_get(self, cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
data_id = dataobj._get_id(cr, 1, 'base', 'action_res_users_my')
return dataobj.browse(cr, uid, data_id, context=context).res_id
def authenticate(self, db, login, password, user_agent_env):
"""Verifies and returns the user ID corresponding to the given
``login`` and ``password`` combination, or False if there was
no matching user.
:param str db: the database on which user is trying to authenticate
:param str login: username
:param str password: user password
:param dict user_agent_env: environment dictionary describing any
relevant environment attributes
"""
uid = self.login(db, login, password)
if uid == openerp.SUPERUSER_ID:
# Successfully logged in as admin!
# Attempt to guess the web base url...
if user_agent_env and user_agent_env.get('base_location'):
cr = pooler.get_db(db).cursor()
try:
self.pool.get('ir.config_parameter').set_param(cr, uid, 'web.base.url',
user_agent_env['base_location'])
cr.commit()
except Exception:
_logger.exception("Failed to update web.base.url configuration parameter")
finally:
cr.close()
return uid
def get_groups_names(self, db, uid):
if not uid:
return []
cr = pooler.get_db(db).cursor()
user = self.browse(cr, uid, uid, )
cr.execute('''
SELECT module, name
FROM ir_model_data
WHERE res_id IN %s
AND model = 'res.groups'
''', (tuple(group.id for group in user.groups_id) or tuple([]), ))
groups = cr.fetchall()
groups = groups and ["%s.%s" % group for group in groups]
return groups
def login(self, db, login, password):
if not password:
return False
cr = pooler.get_db(db).cursor()
try:
# autocommit: our single request will be performed atomically.
# (In this way, there is no opportunity to have two transactions
# interleaving their cr.execute()..cr.commit() calls and have one
# of them rolled back due to a concurrent access.)
# We effectively unconditionally write the res_users line.
cr.autocommit(True)
# Even w/ autocommit there's a chance the user row will be locked,
# in which case we can't delay the login just for the purpose of
# update the last login date - hence we use FOR UPDATE NOWAIT to
# try to get the lock - fail-fast
cr.execute("""SELECT id from res_users
WHERE login=%s AND password=%s
AND active FOR UPDATE NOWAIT""",
(tools.ustr(login), tools.ustr(password)), log_exceptions=False)
cr.execute("""UPDATE res_users
SET date = now() AT TIME ZONE 'UTC'
WHERE login=%s AND password=%s AND active
RETURNING id""",
(tools.ustr(login), tools.ustr(password)))
except Exception:
# Failing to acquire the lock on the res_users row probably means
# another request is holding it - no big deal, we skip the update
# for this time, and let the user login anyway.
cr.rollback()
cr.execute("""SELECT id from res_users
WHERE login=%s AND password=%s
AND active""",
(tools.ustr(login), tools.ustr(password)))
finally:
res = cr.fetchone()
cr.close()
if res:
return res[0]
return False
def check_super(self, passwd):
if passwd == tools.config['admin_passwd']:
return True
else:
raise openerp.exceptions.AccessDenied()
def check(self, db, uid, passwd):
"""Verifies that the given (uid, password) pair is authorized for the database ``db`` and
raise an exception if it is not."""
if not passwd:
# empty passwords disallowed for obvious security reasons
raise openerp.exceptions.AccessDenied()
if self._uid_cache.get(db, {}).get(uid) == passwd:
return
cr = pooler.get_db(db).cursor()
try:
cr.execute('SELECT COUNT(1) FROM res_users WHERE id=%s AND password=%s AND active=%s',
(int(uid), passwd, True))
res = cr.fetchone()[0]
if not res:
raise openerp.exceptions.AccessDenied()
if self._uid_cache.has_key(db):
ulist = self._uid_cache[db]
ulist[uid] = passwd
else:
self._uid_cache[db] = {uid:passwd}
finally:
cr.close()
def access(self, db, uid, passwd, sec_level, ids):
if not passwd:
return False
cr = pooler.get_db(db).cursor()
try:
cr.execute('SELECT id FROM res_users WHERE id=%s AND password=%s', (uid, passwd))
res = cr.fetchone()
if not res:
raise openerp.exceptions.AccessDenied()
return res[0]
finally:
cr.close()
def change_password(self, cr, uid, old_passwd, new_passwd, context=None):
"""Change current user password. Old password must be provided explicitly
to prevent hijacking an existing user session, or for cases where the cleartext
password is not used to authenticate requests.
:return: True
:raise: openerp.exceptions.AccessDenied when old password is wrong
:raise: except_osv when new password is not set or empty
"""
self.check(cr.dbname, uid, old_passwd)
if new_passwd:
return self.write(cr, uid, uid, {'password': new_passwd})
raise osv.except_osv(_('Warning!'), _("Setting empty passwords is not allowed for security reasons!"))
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if not args:
args = []
args = self.replace_groups_search(cr, uid, args, context=context)
return super(users, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def replace_groups_search(self, cr, uid, args, context={}):
'''Fixing issue when "in_group_..." is passed as field name '''
args_type = type(args)
if args and args_type in (list, tuple, ):
args_new = list(args) if args_type == tuple else args
for numb, args_item in enumerate(args_new):
if args_item and isinstance(args_item, (list, tuple)):
args_new[numb] = self.replace_groups_search(cr, uid, args_item, context=context)
if len(args) == 3 and isinstance(args[0], (str, unicode)) and args[1] in ('is','=','is not','!='):
if re.match(r'^in_group_[0-9]*$',args[0]):
group_id = int(args[0].split('_')[2])
operator = args[1]
operator = '=' if args[1] == 'is' else operator
operator = '!=' if args[1] == 'is not' else operator
cr.execute('''SELECT uid
FROM res_groups_users_rel
WHERE gid %s %s'''% (operator, group_id, ))
users_ids = [item[0] for item in cr.fetchall()]
users_ids = users_ids or [0]
args_new=['id','in',users_ids]
args = tuple(args_new) if args_type == tuple else args_new
return args
users()
#
# Extension of res.groups and res.users with a relation for "implied" or
# "inherited" groups. Once a user belongs to a group, it automatically belongs
# to the implied groups (transitively).
#
class cset(object):
""" A cset (constrained set) is a set of elements that may be constrained to
be a subset of other csets. Elements added to a cset are automatically
added to its supersets. Cycles in the subset constraints are supported.
"""
def __init__(self, xs):
self.supersets = set()
self.elements = set(xs)
def subsetof(self, other):
if other is not self:
self.supersets.add(other)
other.update(self.elements)
def update(self, xs):
xs = set(xs) - self.elements
if xs: # xs will eventually be empty in case of a cycle
self.elements.update(xs)
for s in self.supersets:
s.update(xs)
def __iter__(self):
return iter(self.elements)
def concat(ls):
""" return the concatenation of a list of iterables """
res = []
for l in ls: res.extend(l)
return res
class groups_implied(osv.osv):
_inherit = 'res.groups'
def _get_trans_implied(self, cr, uid, ids, field, arg, context=None):
"computes the transitive closure of relation implied_ids"
memo = {} # use a memo for performance and cycle avoidance
def computed_set(g):
if g not in memo:
memo[g] = cset(g.implied_ids)
for h in g.implied_ids:
computed_set(h).subsetof(memo[g])
return memo[g]
res = {}
for g in self.browse(cr, 1, ids, context):
res[g.id] = map(int, computed_set(g))
return res
_columns = {
'implied_ids': fields.many2many('res.groups', 'res_groups_implied_rel', 'gid', 'hid',
string='Inherits', help='Users of this group automatically inherit those groups'),
'trans_implied_ids': fields.function(_get_trans_implied,
type='many2many', relation='res.groups', string='Transitively inherits'),
}
def create(self, cr, uid, values, context=None):
users = values.pop('users', None)
gid = super(groups_implied, self).create(cr, uid, values, context)
if users:
# delegate addition of users to add implied groups
self.write(cr, uid, [gid], {'users': users}, context)
return gid
def write(self, cr, uid, ids, values, context=None):
res = super(groups_implied, self).write(cr, uid, ids, values, context)
if values.get('users') or values.get('implied_ids'):
# add all implied groups (to all users of each group)
for g in self.browse(cr, uid, ids):
gids = map(int, g.trans_implied_ids)
vals = {'users': [(4, u.id) for u in g.users]}
super(groups_implied, self).write(cr, uid, gids, vals, context)
return res
groups_implied()
class users_implied(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
groups = values.pop('groups_id', None)
user_id = super(users_implied, self).create(cr, uid, values, context)
if groups:
# delegate addition of groups to add implied groups
self.write(cr, uid, [user_id], {'groups_id': groups}, context)
return user_id
def write(self, cr, uid, ids, values, context=None):
if not isinstance(ids,list):
ids = [ids]
res = super(users_implied, self).write(cr, uid, ids, values, context)
if values.get('groups_id'):
# add implied groups for all users
for user in self.browse(cr, uid, ids):
gs = set(concat([g.trans_implied_ids for g in user.groups_id]))
vals = {'groups_id': [(4, g.id) for g in gs]}
super(users_implied, self).write(cr, uid, [user.id], vals, context)
return res
users_implied()
#
# Extension of res.groups and res.users for the special groups view in the users
# form. This extension presents groups with selection and boolean widgets:
# - Groups are shown by application, with boolean and/or selection fields.
# Selection fields typically defines a role "Name" for the given application.
# - Uncategorized groups are presented as boolean fields and grouped in a
# section "Others".
#
# The user form view is modified by an inherited view (base.user_groups_view);
# the inherited view replaces the field 'groups_id' by a set of reified group
# fields (boolean or selection fields). The arch of that view is regenerated
# each time groups are changed.
#
# Naming conventions for reified groups fields:
# - boolean field 'in_group_ID' is True iff
# ID is in 'groups_id'
# - boolean field 'in_groups_ID1_..._IDk' is True iff
# any of ID1, ..., IDk is in 'groups_id'
# - selection field 'sel_groups_ID1_..._IDk' is ID iff
# ID is in 'groups_id' and ID is maximal in the set {ID1, ..., IDk}
def name_boolean_group(id): return 'in_group_' + str(id)
def name_boolean_groups(ids): return 'in_groups_' + '_'.join(map(str, ids))
def name_selection_groups(ids): return 'sel_groups_' + '_'.join(map(str, ids))
def is_boolean_group(name): return name.startswith('in_group_')
def is_boolean_groups(name): return name.startswith('in_groups_')
def is_selection_groups(name): return name.startswith('sel_groups_')
def is_reified_group(name):
return is_boolean_group(name) or is_boolean_groups(name) or is_selection_groups(name)
def get_boolean_group(name): return int(name[9:])
def get_boolean_groups(name): return map(int, name[10:].split('_'))
def get_selection_groups(name): return map(int, name[11:].split('_'))
def partition(f, xs):
"return a pair equivalent to (filter(f, xs), filter(lambda x: not f(x), xs))"
yes, nos = [], []
for x in xs:
(yes if f(x) else nos).append(x)
return yes, nos
class groups_view(osv.osv):
_inherit = 'res.groups'
def create(self, cr, uid, values, context=None):
res = super(groups_view, self).create(cr, uid, values, context)
self.update_user_groups_view(cr, uid, context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(groups_view, self).write(cr, uid, ids, values, context)
self.update_user_groups_view(cr, uid, context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(groups_view, self).unlink(cr, uid, ids, context)
self.update_user_groups_view(cr, uid, context)
return res
def update_user_groups_view(self, cr, uid, context=None):
# the view with id 'base.user_groups_view' inherits the user form view,
# and introduces the reified group fields
view = self.get_user_groups_view(cr, uid, context)
if view:
xml1, xml2 = [], []
xml1.append(E.separator(string=_('Application'), colspan="4"))
for app, kind, gs in self.get_groups_by_application(cr, uid, context):
if kind == 'selection':
# application name with a selection field
field_name = name_selection_groups(map(int, gs))
xml1.append(E.field(name=field_name))
xml1.append(E.newline())
else:
# application separator with boolean fields
app_name = app and app.name or _('Other')
xml2.append(E.separator(string=app_name, colspan="4"))
for g in gs:
field_name = name_boolean_group(g.id)
xml2.append(E.field(name=field_name))
xml = E.field(*(xml1 + xml2), name="groups_id", position="replace")
xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS"))
xml_content = etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding="utf-8")
view.write({'arch': xml_content})
return True
def get_user_groups_view(self, cr, uid, context=None):
try:
view = self.pool.get('ir.model.data').get_object(cr, 1, 'base', 'user_groups_view', context)
assert view and view._table_name == 'ir.ui.view'
except Exception:
view = False
return view
def get_application_groups(self, cr, uid, domain=None, context=None):
return self.search(cr, uid, domain or [])
def get_groups_by_application(self, cr, uid, context=None):
""" return all groups classified by application (module category), as a list of pairs:
[(app, kind, [group, ...]), ...],
where app and group are browse records, and kind is either 'boolean' or 'selection'.
Applications are given in sequence order. If kind is 'selection', the groups are
given in reverse implication order.
"""
def linearized(gs):
gs = set(gs)
# determine sequence order: a group should appear after its implied groups
order = dict.fromkeys(gs, 0)
for g in gs:
for h in gs.intersection(g.trans_implied_ids):
order[h] -= 1
# check whether order is total, i.e., sequence orders are distinct
if len(set(order.itervalues())) == len(gs):
return sorted(gs, key=lambda g: order[g])
return None
# classify all groups by application
gids = self.get_application_groups(cr, uid, context=context)
by_app, others = {}, []
for g in self.browse(cr, uid, gids, context):
if g.category_id:
by_app.setdefault(g.category_id, []).append(g)
else:
others.append(g)
# build the result
res = []
apps = sorted(by_app.iterkeys(), key=lambda a: a.sequence or 0)
for app in apps:
gs = linearized(by_app[app])
if gs:
res.append((app, 'selection', gs))
else:
res.append((app, 'boolean', by_app[app]))
if others:
res.append((False, 'boolean', others))
return res
groups_view()
class users_view(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
self._set_reified_groups(values)
return super(users_view, self).create(cr, uid, values, context)
def write(self, cr, uid, ids, values, context=None):
self._set_reified_groups(values)
return super(users_view, self).write(cr, uid, ids, values, context)
def _set_reified_groups(self, values):
""" reflect reified group fields in values['groups_id'] """
if 'groups_id' in values:
# groups are already given, ignore group fields
for f in filter(is_reified_group, values.iterkeys()):
del values[f]
return
add, remove = [], []
for f in values.keys():
if is_boolean_group(f):
target = add if values.pop(f) else remove
target.append(get_boolean_group(f))
elif is_boolean_groups(f):
if not values.pop(f):
remove.extend(get_boolean_groups(f))
elif is_selection_groups(f):
remove.extend(get_selection_groups(f))
selected = values.pop(f)
if selected:
add.append(selected)
# update values *only* if groups are being modified, otherwise
# we introduce spurious changes that might break the super.write() call.
if add or remove:
# remove groups in 'remove' and add groups in 'add'
values['groups_id'] = [(3, id) for id in remove] + [(4, id) for id in add]
def default_get(self, cr, uid, fields, context=None):
group_fields, fields = partition(is_reified_group, fields)
fields1 = (fields + ['groups_id']) if group_fields else fields
values = super(users_view, self).default_get(cr, uid, fields1, context)
self._get_reified_groups(group_fields, values)
return values
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
if not fields:
fields = self.fields_get(cr, uid, context=context).keys()
group_fields, fields = partition(is_reified_group, fields)
if not 'groups_id' in fields:
fields.append('groups_id')
res = super(users_view, self).read(cr, uid, ids, fields, context=context, load=load)
for values in (res if isinstance(res, list) else [res]):
self._get_reified_groups(group_fields, values)
return res
def _get_reified_groups(self, fields, values):
""" compute the given reified group fields from values['groups_id'] """
gids = set(values.get('groups_id') or [])
for f in fields:
if is_boolean_group(f):
values[f] = get_boolean_group(f) in gids
elif is_boolean_groups(f):
values[f] = not gids.isdisjoint(get_boolean_groups(f))
elif is_selection_groups(f):
selected = [gid for gid in get_selection_groups(f) if gid in gids]
values[f] = selected and selected[-1] or False
def fields_get(self, cr, uid, allfields=None, context=None, write_access=True):
res = super(users_view, self).fields_get(cr, uid, allfields, context, write_access)
# add reified groups fields
for app, kind, gs in self.pool.get('res.groups').get_groups_by_application(cr, uid, context):
if kind == 'selection':
# selection group field
tips = ['%s: %s' % (g.name, g.comment or '') for g in gs]
res[name_selection_groups(map(int, gs))] = {
'type': 'selection',
'string': app and app.name or _('Other'),
'selection': [(False, '')] + [(g.id, g.name) for g in gs],
'help': '\n'.join(tips),
}
else:
# boolean group fields
for g in gs:
res[name_boolean_group(g.id)] = {
'type': 'boolean',
'string': g.name,
'help': g.comment,
}
return res
users_view()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
NINAnor/QGIS | refs/heads/master | python/plugins/db_manager/db_plugins/postgis/info_model.py | 3 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtGui import QApplication
from ..info_model import TableInfo, VectorTableInfo, RasterTableInfo
from ..html_elems import HtmlSection, HtmlParagraph, HtmlTable, HtmlTableHeader, HtmlTableCol
class PGTableInfo(TableInfo):
def __init__(self, table):
self.table = table
def generalInfo(self):
ret = []
# if the estimation is less than 100 rows, try to count them - it shouldn't take long time
if self.table.rowCount is None and self.table.estimatedRowCount < 100:
# row count information is not displayed yet, so just block
# table signals to avoid double refreshing (infoViewer->refreshRowCount->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshRowCount()
self.table.blockSignals(False)
tbl = [
(QApplication.translate("DBManagerPlugin", "Relation type:"),
QApplication.translate("DBManagerPlugin", "View") if self.table._relationType == 'v' else
QApplication.translate("DBManagerPlugin", "Materialized view") if self.table._relationType == 'm' else
QApplication.translate("DBManagerPlugin", "Table")),
(QApplication.translate("DBManagerPlugin", "Owner:"), self.table.owner)
]
if self.table.comment:
tbl.append((QApplication.translate("DBManagerPlugin", "Comment:"), self.table.comment))
tbl.extend([
(QApplication.translate("DBManagerPlugin", "Pages:"), self.table.pages),
(QApplication.translate("DBManagerPlugin", "Rows (estimation):"), self.table.estimatedRowCount)
])
# privileges
# has the user access to this schema?
schema_priv = self.table.database().connector.getSchemaPrivileges(
self.table.schemaName()) if self.table.schema() else None
if schema_priv is None:
pass
elif not schema_priv[1]: # no usage privileges on the schema
tbl.append((QApplication.translate("DBManagerPlugin", "Privileges:"),
QApplication.translate("DBManagerPlugin",
"<warning> This user doesn't have usage privileges for this schema!")))
else:
table_priv = self.table.database().connector.getTablePrivileges((self.table.schemaName(), self.table.name))
privileges = []
if table_priv[0]:
privileges.append("select")
if self.table.rowCount is not None or self.table.rowCount >= 0:
tbl.append((QApplication.translate("DBManagerPlugin", "Rows (counted):"),
self.table.rowCount if self.table.rowCount is not None else QApplication.translate(
"DBManagerPlugin", 'Unknown (<a href="action:rows/count">find out</a>)')))
if table_priv[1]:
privileges.append("insert")
if table_priv[2]:
privileges.append("update")
if table_priv[3]:
privileges.append("delete")
priv_string = u", ".join(privileges) if len(privileges) > 0 else QApplication.translate("DBManagerPlugin",
'<warning> This user has no privileges!')
tbl.append((QApplication.translate("DBManagerPlugin", "Privileges:"), priv_string))
ret.append(HtmlTable(tbl))
if schema_priv is not None and schema_priv[1]:
if table_priv[0] and not table_priv[1] and not table_priv[2] and not table_priv[3]:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> This user has read-only privileges.")))
if not self.table.isView:
if self.table.rowCount is not None:
if abs(self.table.estimatedRowCount - self.table.rowCount) > 1 and \
(self.table.estimatedRowCount > 2 * self.table.rowCount or
self.table.rowCount > 2 * self.table.estimatedRowCount):
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> There's a significant difference between estimated and real row count. "
'Consider running <a href="action:vacuumanalyze/run">VACUUM ANALYZE</a>.')))
# primary key defined?
if not self.table.isView:
if len(filter(lambda fld: fld.primaryKey, self.table.fields())) <= 0:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> No primary key defined for this table!")))
return ret
def getSpatialInfo(self):
ret = []
info = self.db.connector.getSpatialInfo()
if info is None:
return
tbl = [
(QApplication.translate("DBManagerPlugin", "Library:"), info[0]),
(QApplication.translate("DBManagerPlugin", "Scripts:"), info[3]),
("GEOS:", info[1]),
("Proj:", info[2])
]
ret.append(HtmlTable(tbl))
if info[1] is not None and info[1] != info[2]:
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> Version of installed scripts doesn't match version of released scripts!\n"
"This is probably a result of incorrect PostGIS upgrade.")))
if not self.db.connector.has_geometry_columns:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> geometry_columns table doesn't exist!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
elif not self.db.connector.has_geometry_columns_access:
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> This user doesn't have privileges to read contents of geometry_columns table!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
return ret
def fieldsDetails(self):
tbl = []
# define the table header
header = (
"#", QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Length"), QApplication.translate("DBManagerPlugin", "Null"),
QApplication.translate("DBManagerPlugin", "Default"))
tbl.append(HtmlTableHeader(header))
# add table contents
for fld in self.table.fields():
char_max_len = fld.charMaxLen if fld.charMaxLen is not None and fld.charMaxLen != -1 else ""
is_null_txt = "N" if fld.notNull else "Y"
# make primary key field underlined
attrs = {"class": "underline"} if fld.primaryKey else None
name = HtmlTableCol(fld.name, attrs)
tbl.append((fld.num, name, fld.type2String(), char_max_len, is_null_txt, fld.default2String()))
return HtmlTable(tbl, {"class": "header"})
def triggersDetails(self):
if self.table.triggers() is None or len(self.table.triggers()) <= 0:
return None
ret = []
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Function"),
QApplication.translate("DBManagerPlugin", "Type"), QApplication.translate("DBManagerPlugin", "Enabled"))
tbl.append(HtmlTableHeader(header))
# add table contents
for trig in self.table.triggers():
name = u'%(name)s (<a href="action:trigger/%(name)s/%(action)s">%(action)s</a>)' % {"name": trig.name,
"action": "delete"}
(enabled, action) = (QApplication.translate("DBManagerPlugin", "Yes"), "disable") if trig.enabled else (
QApplication.translate("DBManagerPlugin", "No"), "enable")
txt_enabled = u'%(enabled)s (<a href="action:trigger/%(name)s/%(action)s">%(action)s</a>)' % {
"name": trig.name, "action": action, "enabled": enabled}
tbl.append((name, trig.function, trig.type2String(), txt_enabled))
ret.append(HtmlTable(tbl, {"class": "header"}))
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
'<a href="action:triggers/enable">Enable all triggers</a> / <a href="action:triggers/disable">Disable all triggers</a>')))
return ret
def rulesDetails(self):
if self.table.rules() is None or len(self.table.rules()) <= 0:
return None
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Definition"))
tbl.append(HtmlTableHeader(header))
# add table contents
for rule in self.table.rules():
name = u'%(name)s (<a href="action:rule/%(name)s/%(action)s">%(action)s</a>)' % {"name": rule.name,
"action": "delete"}
tbl.append((name, rule.definition))
return HtmlTable(tbl, {"class": "header"})
def getTableInfo(self):
ret = TableInfo.getTableInfo(self)
# rules
rules_details = self.rulesDetails()
if rules_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Rules'), rules_details))
return ret
class PGVectorTableInfo(PGTableInfo, VectorTableInfo):
def __init__(self, table):
VectorTableInfo.__init__(self, table)
PGTableInfo.__init__(self, table)
def spatialInfo(self):
return VectorTableInfo.spatialInfo(self)
class PGRasterTableInfo(PGTableInfo, RasterTableInfo):
def __init__(self, table):
RasterTableInfo.__init__(self, table)
PGTableInfo.__init__(self, table)
def spatialInfo(self):
return RasterTableInfo.spatialInfo(self)
|
ouiliame/ps.py | refs/heads/master | mechanize/_util.py | 123 | """Utility functions and date/time routines.
Copyright 2002-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import re
import time
import warnings
class ExperimentalWarning(UserWarning):
pass
def experimental(message):
warnings.warn(message, ExperimentalWarning, stacklevel=3)
def hide_experimental_warnings():
warnings.filterwarnings("ignore", category=ExperimentalWarning)
def reset_experimental_warnings():
warnings.filterwarnings("default", category=ExperimentalWarning)
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def hide_deprecations():
warnings.filterwarnings("ignore", category=DeprecationWarning)
def reset_deprecations():
warnings.filterwarnings("default", category=DeprecationWarning)
def write_file(filename, data):
f = open(filename, "wb")
try:
f.write(data)
finally:
f.close()
def get1(sequence):
assert len(sequence) == 1
return sequence[0]
def isstringlike(x):
try: x+""
except: return False
else: return True
## def caller():
## try:
## raise SyntaxError
## except:
## import sys
## return sys.exc_traceback.tb_frame.f_back.f_back.f_code.co_name
from calendar import timegm
# Date/time conversion routines for formats used by the HTTP protocol.
EPOCH = 1970
def my_timegm(tt):
year, month, mday, hour, min, sec = tt[:6]
if ((year >= EPOCH) and (1 <= month <= 12) and (1 <= mday <= 31) and
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
return timegm(tt)
else:
return None
days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
months_lower = []
for month in months: months_lower.append(month.lower())
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec = time.gmtime(t)[:6]
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
year, mon, mday, hour, min, sec)
def time2netscape(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like this:
Wed, DD-Mon-YYYY HH:MM:SS GMT
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7]
return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
days[wday], mday, months[mon-1], year, hour, min, sec)
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
timezone_re = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$")
def offset_from_tz_string(tz):
offset = None
if UTC_ZONES.has_key(tz):
offset = 0
else:
m = timezone_re.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
return offset
def _str2time(day, mon, yr, hr, min, sec, tz):
# translate month name to number
# month numbers start with 1 (January)
try:
mon = months_lower.index(mon.lower())+1
except ValueError:
# maybe it's already a number
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
# make sure clock elements are defined
if hr is None: hr = 0
if min is None: min = 0
if sec is None: sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
# find "obvious" year
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0: yr = yr + 100
else: yr = yr - 100
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
t = my_timegm((yr, mon, day, hr, min, sec, tz))
if t is not None:
# adjust time using timezone string, to get absolute time since epoch
if tz is None:
tz = "UTC"
tz = tz.upper()
offset = offset_from_tz_string(tz)
if offset is None:
return None
t = t - offset
return t
strict_re = re.compile(r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
r"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
wkday_re = re.compile(
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I)
loose_http_re = re.compile(
r"""^
(\d\d?) # day
(?:\s+|[-\/])
(\w+) # month
(?:\s+|[-\/])
(\d+) # year
(?:
(?:\s+|:) # separator before clock
(\d\d?):(\d\d) # hour:min
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
\s*
(?:\(\w+\))? # ASCII representation of timezone in parens.
\s*$""", re.X)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = strict_re.search(text)
if m:
g = m.groups()
mon = months_lower.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return my_timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = wkday_re.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = loose_http_re.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
iso_re = re.compile(
"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
[-\/]?
(\d\d?) # day
(?:
(?:\s+|[-:Tt]) # separator before clock
(\d\d?):?(\d\d) # hour:min
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
([-+]?\d\d?:?(:?\d\d)?
|Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
\s*$""", re.X)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
1994-02-03 14:15:29 -0100 -- ISO 8601 format
1994-02-03 14:15:29 -- zone is optional
1994-02-03 -- only date
1994-02-03T14:15:29 -- Use T as separator
19940203T141529Z -- ISO 8601 compact format
19940203 -- only date
"""
# clean up
text = text.lstrip()
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = iso_re.search(text)
if m is not None:
# XXX there's an extra bit of the timezone I'm ignoring here: is
# this the right thing to do?
yr, mon, day, hr, min, sec, tz, _ = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
|
burzillibus/RobHome | refs/heads/master | venv/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py | 1107 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
return self._spec[0]
@property
def version(self):
return self._spec[1]
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self.operator)(item, self.version)
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease and not
(prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the begining.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^,;\s)]* # Since this is a "legacy" specifier, and the version
# string can be just about anything, we match everything
# except for whitespace, a semi-colon for marker support,
# a closing paren since versions can be enclosed in
# them, and a comma since it's a version separator.
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post") and not
x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec) and
self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is techincally greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]):])
right_split.append(right[len(right_split[0]):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
|
nolanliou/tensorflow | refs/heads/master | tensorflow/contrib/timeseries/python/timeseries/state_management.py | 67 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for wrapping a model to operate on different data shapes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.model import ModelOutputs
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
class PassthroughStateManager(object):
"""A minimal wrapper for models which do not need state management."""
def __init__(self):
self._input_statistics = None
self._graph_initialized = False
def initialize_graph(self, model, input_statistics=None):
"""Adds required operations to the graph."""
del model # unused
self._graph_initialized = True
self._input_statistics = input_statistics
def define_loss(self, model, features, mode):
"""Wrap "model" with StateManager-specific operations.
Args:
model: The model (inheriting from TimeSeriesModel) to manage state for.
features: A dictionary with the following key/value pairs:
feature_keys.TrainEvalFeatures.TIMES: A [batch size x window size]
Tensor with times for each observation.
feature_keys.TrainEvalFeatures.VALUES: A [batch size x window size x num
features] Tensor with values for each observation.
mode: The tf.estimator.ModeKeys mode to use (TRAIN or EVAL).
Returns:
A ModelOutputs object.
Raises:
ValueError: If start state was specified.
"""
if feature_keys.State.STATE_TUPLE in features:
raise ValueError(
"Overriding start state is not supported for this model.")
return model.define_loss(features, mode)
class _OverridableStateManager(PassthroughStateManager):
"""Base class for state managers which support overriding model state."""
@abc.abstractmethod
def _define_loss_with_saved_state(self, model, features, mode):
pass
def define_loss(self, model, features, mode):
"""Switches between explicit start state and managed state."""
if feature_keys.FilteringFeatures.STATE_TUPLE in features:
# Explicit start state has been provided, so we should use that.
if mode == estimator_lib.ModeKeys.TRAIN:
raise ValueError(
"Overriding saved state for training is not supported (but a value "
"for feature {} was specified).".format(
feature_keys.FilteringFeatures.STATE_TUPLE))
start_state = features[feature_keys.FilteringFeatures.STATE_TUPLE]
del features[feature_keys.FilteringFeatures.STATE_TUPLE]
return model.get_batch_loss(
features=features, mode=mode, state=start_state)
else:
# No explicit start state; use managed state.
return self._define_loss_with_saved_state(
model=model, features=features, mode=mode)
class FilteringOnlyStateManager(_OverridableStateManager):
"""State manager for models which use state only for filtering.
Window-based models (ARModel) do not require state to be fed during training
(instead requiring a specific window size). Rather than requiring a minimum
window size for filtering, these models maintain this window in their state,
and so need state to be fed.
"""
def _define_loss_with_saved_state(self, model, features, mode):
return model.define_loss(features, mode)
class ChainingStateManager(_OverridableStateManager):
"""Maintains state across a batch for SequentialTimeSeriesModel subclasses.
The batch dimension is treated as indexing sequential chunks of the same
timeseries. End state from each chunk is fed as start state to the next chunk
during the next timestep. This is an approximation to full-batch training for
sequential models, but is typically much faster while still accurately
recovering parameters. The speedup comes from reduced scheduling overhead of
TensorFlow ops, since each operation can do much more work.
"""
def __init__(self, state_saving_interval=20, checkpoint_state=False):
"""Initialize the state manager.
Args:
state_saving_interval: This state manager saves intermediate model state
every `state_saving_interval` times. Larger values save memory, and
checkpoint size if `checkpoint_state` is enabled, but models
will need to impute across artificial gaps of up to this size
(i.e. gaps not appearing in the original data). This imputation may
affect training. Set state_saving_interval to 1 to avoid any
artificial imputation.
checkpoint_state: If True, saved intermediate model state will be
written to checkpoints. Checkpoints will then scale with dataset
size. If False, state will be freshly imputed from the beginning of a
series each time the model is restored, which means it may take a few
iterations for state to warm up.
"""
super(ChainingStateManager, self).__init__()
self._checkpoint_state = checkpoint_state
self._state_saving_interval = state_saving_interval
self._start_state = None
self._cached_states = None
def initialize_graph(self, model, input_statistics=None):
"""Adds required operations to the graph."""
super(ChainingStateManager, self).initialize_graph(
model=model, input_statistics=input_statistics)
self._start_state = model.get_start_state()
self._cached_states = math_utils.TupleOfTensorsLookup(
key_dtype=dtypes.int64,
default_values=self._start_state,
empty_key=-1,
name="cached_states",
checkpoint=self._checkpoint_state)
def _define_loss_with_saved_state(self, model, features, mode):
"""Feeds end state from one training iteration into the next.
Args:
model: The model to wrap. Compatible with children of TimeSeriesModel.
features: Dictionary with Tensor values defining the data to be
processed. The expected key/value pairs are at minimum:
feature_keys.TrainEvalFeatures.TIMES: A [number of chunks x window
size] Tensor with times for each observation, the result of chunking
a single longer time series.
feature_keys.TrainEvalFeatures.VALUES: A [number of chunks x window
size x num features] Tensor with values for each observation,
corresponding to times.
mode: The tf.estimator.ModeKeys mode to use. For EVAL and INFER, no
batching is performed, which may be slow. This is to avoid giving
cached and almost certainly stale values.
Returns:
A ModelOutputs object.
Raises:
ValueError: If initialize_graph has not been called.
"""
if not self._graph_initialized:
raise ValueError("ChainingStateManager requires initialize_graph() to be "
"called before use.")
(loss_op, end_state, batch_predictions) = self._update_cached_states(
model=model,
features=features,
mode=mode)
# Add a batch dimension so state can be used directly (e.g. for predictions)
# without the user manually reshaping it.
last_end_state_flat = [end_state_value[-1][None]
for end_state_value in nest.flatten(end_state)]
batch_predictions["observed"] = features[
feature_keys.TrainEvalFeatures.VALUES]
return ModelOutputs(
loss=loss_op,
end_state=nest.pack_sequence_as(end_state, last_end_state_flat),
predictions=batch_predictions,
prediction_times=features[feature_keys.TrainEvalFeatures.TIMES])
def _get_chunk_number(self, time):
return time // self._state_saving_interval
def _get_cached_states(self, times):
"""Retrieve cached states for a batch of times."""
read_chunk_numbers = self._get_chunk_number(times)
looked_up_state = list(self._cached_states.lookup(
math_ops.cast(read_chunk_numbers, dtypes.int64)))
looked_up_state = tuple(looked_up_state)
# We need to special-case the first chunk in a series to explicitly rely on
# the model's starting state so that gradients flow back to it. Otherwise it
# would affect only initialization, and would not be read from or updated
# during training. Not doing this also isolates that part of the graph,
# leading to errors on model reload if there are trainable variables
# affecting a model's start state.
if self._input_statistics is not None:
start_time = self._input_statistics.start_time
else:
start_time = 0
set_to_start_state = math_ops.equal(read_chunk_numbers,
self._get_chunk_number(start_time))
new_states = []
for start_state_value, cache_variable in zip(
nest.flatten(
math_utils.replicate_state(self._start_state,
array_ops.shape(times)[0])),
nest.flatten(looked_up_state)):
new_states.append(
array_ops.where(set_to_start_state, start_state_value,
cache_variable))
looked_up_state = nest.pack_sequence_as(looked_up_state, new_states)
return looked_up_state
def _update_cached_states(self, model, features, mode):
"""Read, process, and write chunks to the cache."""
times = features[feature_keys.TrainEvalFeatures.TIMES]
looked_up_state = self._get_cached_states(times[:, 0])
(model_loss, intermediate_states,
batch_predictions) = model.per_step_batch_loss(
features=features,
mode=mode,
state=looked_up_state)
# We need to at least write to the bucket after the one we read from.
min_chunk_numbers = self._get_chunk_number(times) + 1
# We write to the bucket that would have been read had the window started at
# the next sample (except for the last sample in the window, which gets
# written to the next bucket). This assumes fixed missing times (i.e. if we
# were presented with times [10, 50] we will never see times [30, 50]).
#
# TODO(allenl): Retrieve the highest time less than the current time rather
# than relying on fixed bucketing.
write_chunk_numbers = math_ops.maximum(
self._get_chunk_number(array_ops.concat(
[times[:, 1:], times[:, -1:] + 1], axis=1)),
min_chunk_numbers)
# Write once for every computed state; this may mean that we write multiple
# times to the same cell, but later writes will take precedence.
save_ops = [
self._cached_states.insert(
keys=write_chunk_numbers,
values=intermediate_states)]
end_state = nest.pack_sequence_as(
intermediate_states,
[state_element[:, -1]
for state_element in nest.flatten(intermediate_states)])
with ops.control_dependencies(save_ops):
# Make sure end states get saved at each iteration
loss_op = array_ops.identity(model_loss)
return loss_op, end_state, batch_predictions
|
AViisiion/namebench | refs/heads/master | libnamebench/charts_test.py | 175 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for all functions related to chart generation."""
__author__ = '[email protected] (Thomas Stromberg)'
import unittest
import nameserver
import charts
def _ExampleRunsData():
odns = nameserver.NameServer('208.67.220.220', name='OpenDNS')
udns = nameserver.NameServer('156.154.70.1', name='UltraDNS')
data = [(odns, [79.0, 191.0, 84.0, 878.0, 82.0, 85.0, 882.0, 187.0, 79.0,
80.0, 79.0, 261.0, 79.0, 83.0, 82.0, 420.0, 822.0, 1890.0,
78.0, 79.0, 86.0, 89.0, 125.0, 94.0, 81.0, 79.0, 81.0, 79.0,
1105.0, 84.0]),
(udns, [9.0, 8.0, 13.0, 329.0, 9.0, 9.0, 773.0, 52.0, 9.0, 8.0, 8.0,
143.0, 27.0, 104.0, 8.0, 8.0, 320.0, 594.0, 8.0, 312.0, 11.0,
9.0, 174.0, 83.0, 8.0, 9.0, 8.0, 8.0, 496.0, 533.0])]
return data
# TODO(tstromberg): Clean up long lines, cleanse IP/hostnames.
class ChartFunctionsTest(unittest.TestCase):
def testDarkenHexColorCode(self):
self.assertEquals(charts.DarkenHexColorCode('ffffff', 0), 'ffffff')
self.assertEquals(charts.DarkenHexColorCode('2c2c2c', 1), '0c0c0c')
self.assertEquals(charts.DarkenHexColorCode('ff0000', 1), 'df0000')
self.assertEquals(charts.DarkenHexColorCode('ff00ff', 2), 'bf00bf')
def testGoodTicks(self):
self.assertEquals(charts._GoodTicks(50), 5)
self.assertEquals(charts._GoodTicks(9.8, tick_size=0.5, num_ticks=7), 2.0)
class BasicChartTests(unittest.TestCase):
def testPerRunDurationBarGraph(self):
sorted_averages = [
('10.0.0.1', [5.871, 2.6599]),
('192.168.1.2', [15.0867, 15.2531]),
('172.168.1.2', [70.7752, 15.02163]),
]
results = charts.PerRunDurationBarGraph(sorted_averages)
self.assertTrue('e%3AFBM48Y%2CCRNBM' in results)
expected = (
'http://chart.apis.google.com/chart?chxt=y%2Cx%2Cx&chd=e%3AFBM48Y%2'
'CCRNBM0&chxp=2%2C31&chxr=1%2C0%2C75%7C2%2C-3.75%2C78.75&chxtc=1%2C-720'
'&chco=4684ee%2C00248e&chbh=a&chs=720x130&cht=bhg&chxl=0%3A%7C'
'172.168.1.2%7C192.168.1.2%7C10.0.0.1%7C1%3A%7C0%7C5%7C10%7C15%7C20'
'%7C25%7C30%7C35%7C40%7C45%7C50%7C55%7C60%7C65%7C70%7C75%7C2%3A%7C'
'Duration%20in%20ms.&chdl=Run%201%7CRun%202'
)
self.assertEqual(results, expected)
def testMinimumDurationBarGraph(self):
fastest = ((nameserver.NameServer('208.67.220.220', name='OpenDNS'), 10.0),
(nameserver.NameServer('156.154.70.1', name='UltraDNS'), 15.75))
expected = (
'http://chart.apis.google.com/chart?chxt=y%2Cx%2Cx&chd=e%3AgAyZ&'
'chxp=2%2C9&chxr=1%2C0%2C20%7C2%2C-1.0%2C21.0&chxtc=1%2C-720'
'&chco=0000ff&chbh=a&chs=720x78&cht=bhg&chxl=0%3A%7CUltraDNS%7COpenDNS'
'%7C1%3A%7C0%7C3%7C6%7C9%7C12%7C15%7C18%7C20%7C2%3A%7C'
'Duration%20in%20ms.'
)
self.assertEquals(charts.MinimumDurationBarGraph(fastest), expected)
def testMaximumRunDuration(self):
runs_data = [
('G', [3.851, 4.7690, 423.971998, 189.674001, 14.477, 174.788001]),
('Y', [99.99, 488.88])
]
self.assertEquals(charts._MaximumRunDuration(runs_data), 488.88)
class DistributionChartTests(unittest.TestCase):
def testMakeCumulativeDistribution(self):
runs_data = _ExampleRunsData()
expected = [
(runs_data[0][0],
[(0, 0), (3.3333333333333335, 78.0),(26.666666666666668, 79.0),
(30.0, 80.0), (36.666666666666664, 81.0), (43.333333333333336, 82.0),
(46.666666666666664, 83.0), (53.333333333333336, 84.0),
(56.666666666666664, 85.0), (60.0, 86.0), (63.333333333333329, 89.0),
(66.666666666666657, 94.0), (70.0, 125.0), (73.333333333333329, 187.0),
(76.666666666666671, 191.0), (80.0, 261.0), (83.333333333333343, 420.0),
(86.666666666666671, 822.0), (90.0, 878.0), (93.333333333333329, 882.0),
(96.666666666666671, 1105.0), (100, 1890.0)]),
(runs_data[1][0],
[(0, 0), (30.0, 8.0), (50.0, 9.0), (53.333333333333336, 11.0),
(56.666666666666664, 13.0), (60.0, 27.0), (63.333333333333329, 52.0),
(66.666666666666657, 83.0), (70.0, 104.0), (73.333333333333329, 143.0),
(76.666666666666671, 174.0), (80.0, 312.0), (83.333333333333343, 320.0),
(86.666666666666671, 329.0), (90.0, 496.0), (93.333333333333329, 533.0),
(96.666666666666671, 594.0), (100, 773.0)])]
self.assertEquals(charts._MakeCumulativeDistribution(runs_data), expected)
def testDistributionLineGraph(self):
runs_data = _ExampleRunsData()
url = charts.DistributionLineGraph(runs_data, scale=350)
expected = (
'http://chart.apis.google.com/chart?cht=lxy&chs=720x410&chxt=x,y&'
'chg=10,20&chxr=0,0,350|1,0,100&chd=t:0,22,23,23,23,23,24,24,24,25,25'
',27,36,53,55,75,120|0,3,27,30,37,43,47,53,57,60,63,67,70,73,77,80,83|'
'0,2,3,3,4,8,15,24,30,41,50,89,91,94,142|0,30,50,53,57,60,63,67,70,73,'
'77,80,83,87,90&chco=ff9900,1a00ff&chxt=x,y,x,y&chxl=2:||Duration+in+ms'
'||3:||%25|&chdl=OpenDNS|UltraDNS'
)
self.assertTrue('0,3,27,30,37,43,47,53,57,60,63,67,70,73,77' in expected)
self.assertTrue('0,0,350|1,0,100' in expected)
self.assertEquals(url, expected)
if __name__ == '__main__':
unittest.main()
|
spark-test/spark | refs/heads/master | dev/create-release/translate-contributors.py | 104 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script translates invalid authors in the contributors list generated
# by generate-contributors.py. When the script encounters an author name that
# is considered invalid, it searches Github and JIRA in an attempt to search
# for replacements. This tool runs in two modes:
#
# (1) Interactive mode: For each invalid author name, this script presents
# all candidate replacements to the user and awaits user response. In this
# mode, the user may also input a custom name. This is the default.
#
# (2) Non-interactive mode: For each invalid author name, this script replaces
# the name with the first valid candidate it can find. If there is none, it
# uses the original name. This can be enabled through the --non-interactive flag.
import os
import sys
from releaseutils import *
# You must set the following before use!
JIRA_API_BASE = os.environ.get("JIRA_API_BASE", "https://issues.apache.org/jira")
JIRA_USERNAME = os.environ.get("JIRA_USERNAME", None)
JIRA_PASSWORD = os.environ.get("JIRA_PASSWORD", None)
GITHUB_API_TOKEN = os.environ.get("GITHUB_API_TOKEN", None)
if not JIRA_USERNAME or not JIRA_PASSWORD:
sys.exit("Both JIRA_USERNAME and JIRA_PASSWORD must be set")
if not GITHUB_API_TOKEN:
sys.exit("GITHUB_API_TOKEN must be set")
# Write new contributors list to <old_file_name>.final
if not os.path.isfile(contributors_file_name):
print("Contributors file %s does not exist!" % contributors_file_name)
print("Have you run ./generate-contributors.py yet?")
sys.exit(1)
contributors_file = open(contributors_file_name, "r")
warnings = []
# In non-interactive mode, this script will choose the first replacement that is valid
INTERACTIVE_MODE = True
if len(sys.argv) > 1:
options = set(sys.argv[1:])
if "--non-interactive" in options:
INTERACTIVE_MODE = False
if INTERACTIVE_MODE:
print("Running in interactive mode. To disable this, provide the --non-interactive flag.")
# Setup Github and JIRA clients
jira_options = {"server": JIRA_API_BASE}
jira_client = JIRA(options=jira_options, basic_auth=(JIRA_USERNAME, JIRA_PASSWORD))
github_client = Github(GITHUB_API_TOKEN)
# Load known author translations that are cached locally
known_translations = {}
known_translations_file_name = "known_translations"
known_translations_file = open(known_translations_file_name, "r")
for line in known_translations_file:
if line.startswith("#"):
continue
[old_name, new_name] = line.strip("\n").split(" - ")
known_translations[old_name] = new_name
known_translations_file.close()
# Open again in case the user adds new mappings
known_translations_file = open(known_translations_file_name, "a")
# Generate candidates for the given author. This should only be called if the given author
# name does not represent a full name as this operation is somewhat expensive. Under the
# hood, it makes several calls to the Github and JIRA API servers to find the candidates.
#
# This returns a list of (candidate name, source) 2-tuples. E.g.
# [
# (NOT_FOUND, "No full name found for Github user andrewor14"),
# ("Andrew Or", "Full name of JIRA user andrewor14"),
# ("Andrew Orso", "Full name of SPARK-1444 assignee andrewor14"),
# ("Andrew Ordall", "Full name of SPARK-1663 assignee andrewor14"),
# (NOT_FOUND, "No assignee found for SPARK-1763")
# ]
NOT_FOUND = "Not found"
def generate_candidates(author, issues):
candidates = []
# First check for full name of Github user
github_name = get_github_name(author, github_client)
if github_name:
candidates.append((github_name, "Full name of Github user %s" % author))
else:
candidates.append((NOT_FOUND, "No full name found for Github user %s" % author))
# Then do the same for JIRA user
jira_name = get_jira_name(author, jira_client)
if jira_name:
candidates.append((jira_name, "Full name of JIRA user %s" % author))
else:
candidates.append((NOT_FOUND, "No full name found for JIRA user %s" % author))
# Then do the same for the assignee of each of the associated JIRAs
# Note that a given issue may not have an assignee, or the assignee may not have a full name
for issue in issues:
try:
jira_issue = jira_client.issue(issue)
except JIRAError as e:
# Do not exit just because an issue is not found!
if e.status_code == 404:
warnings.append("Issue %s not found!" % issue)
continue
raise e
jira_assignee = jira_issue.fields.assignee
if jira_assignee:
user_name = jira_assignee.name
display_name = jira_assignee.displayName
if display_name:
candidates.append(
(display_name, "Full name of %s assignee %s" % (issue, user_name)))
else:
candidates.append(
(NOT_FOUND, "No full name found for %s assignee %s" % (issue, user_name)))
else:
candidates.append((NOT_FOUND, "No assignee found for %s" % issue))
# Guard against special characters in candidate names
# Note that the candidate name may already be in unicode (JIRA returns this)
for i, (candidate, source) in enumerate(candidates):
try:
candidate = unicode(candidate, "UTF-8")
except TypeError:
# already in unicode
pass
candidate = unidecode.unidecode(candidate).strip()
candidates[i] = (candidate, source)
return candidates
# Translate each invalid author by searching for possible candidates from Github and JIRA
# In interactive mode, this script presents the user with a list of choices and have the user
# select from this list. Additionally, the user may also choose to enter a custom name.
# In non-interactive mode, this script picks the first valid author name from the candidates
# If no such name exists, the original name is used (without the JIRA numbers).
print("\n========================== Translating contributor list ==========================")
lines = contributors_file.readlines()
contributions = []
for i, line in enumerate(lines):
# It is possible that a line in the contributor file only has the github name, e.g. yhuai.
# So, we need a strip() to remove the newline.
temp_author = line.strip(" * ").split(" -- ")[0].strip()
print("Processing author %s (%d/%d)" % (temp_author, i + 1, len(lines)))
if not temp_author:
error_msg = " ERROR: Expected the following format \" * <author> -- <contributions>\"\n"
error_msg += " ERROR: Actual = %s" % line
print(error_msg)
warnings.append(error_msg)
contributions.append(line)
continue
author = temp_author.split("/")[0]
# Use the local copy of known translations where possible
if author in known_translations:
line = line.replace(temp_author, known_translations[author])
elif not is_valid_author(author):
new_author = author
issues = temp_author.split("/")[1:]
candidates = generate_candidates(author, issues)
# Print out potential replacement candidates along with the sources, e.g.
# [X] No full name found for Github user andrewor14
# [X] No assignee found for SPARK-1763
# [0] Andrew Or - Full name of JIRA user andrewor14
# [1] Andrew Orso - Full name of SPARK-1444 assignee andrewor14
# [2] Andrew Ordall - Full name of SPARK-1663 assignee andrewor14
# [3] andrewor14 - Raw Github username
# [4] Custom
candidate_names = []
bad_prompts = [] # Prompts that can't actually be selected; print these first.
good_prompts = [] # Prompts that contain valid choices
for candidate, source in candidates:
if candidate == NOT_FOUND:
bad_prompts.append(" [X] %s" % source)
else:
index = len(candidate_names)
candidate_names.append(candidate)
good_prompts.append(" [%d] %s - %s" % (index, candidate, source))
raw_index = len(candidate_names)
custom_index = len(candidate_names) + 1
for p in bad_prompts:
print(p)
if bad_prompts:
print(" ---")
for p in good_prompts:
print(p)
# In interactive mode, additionally provide "custom" option and await user response
if INTERACTIVE_MODE:
print(" [%d] %s - Raw Github username" % (raw_index, author))
print(" [%d] Custom" % custom_index)
response = raw_input(" Your choice: ")
last_index = custom_index
while not response.isdigit() or int(response) > last_index:
response = raw_input(" Please enter an integer between 0 and %d: " % last_index)
response = int(response)
if response == custom_index:
new_author = raw_input(" Please type a custom name for this author: ")
elif response != raw_index:
new_author = candidate_names[response]
# In non-interactive mode, just pick the first candidate
else:
valid_candidate_names = [name for name, _ in candidates
if is_valid_author(name) and name != NOT_FOUND]
if valid_candidate_names:
new_author = valid_candidate_names[0]
# Finally, capitalize the author and replace the original one with it
# If the final replacement is still invalid, log a warning
if is_valid_author(new_author):
new_author = capitalize_author(new_author)
else:
warnings.append(
"Unable to find a valid name %s for author %s" % (author, temp_author))
print(" * Replacing %s with %s" % (author, new_author))
# If we are in interactive mode, prompt the user whether we want to remember this new
# mapping
if INTERACTIVE_MODE and \
author not in known_translations and \
yesOrNoPrompt(
" Add mapping %s -> %s to known translations file?" % (author, new_author)):
known_translations_file.write("%s - %s\n" % (author, new_author))
known_translations_file.flush()
line = line.replace(temp_author, author)
contributions.append(line)
print("==================================================================================\n")
contributors_file.close()
known_translations_file.close()
# Sort the contributions before writing them to the new file.
# Additionally, check if there are any duplicate author rows.
# This could happen if the same user has both a valid full
# name (e.g. Andrew Or) and an invalid one (andrewor14).
# If so, warn the user about this at the end.
contributions.sort()
all_authors = set()
new_contributors_file_name = contributors_file_name + ".final"
new_contributors_file = open(new_contributors_file_name, "w")
for line in contributions:
author = line.strip(" * ").split(" -- ")[0]
if author in all_authors:
warnings.append("Detected duplicate author name %s. Please merge these manually." % author)
all_authors.add(author)
new_contributors_file.write(line)
new_contributors_file.close()
print("Translated contributors list successfully written to %s!" % new_contributors_file_name)
# Log any warnings encountered in the process
if warnings:
print("\n========== Warnings encountered while translating the contributor list ===========")
for w in warnings:
print(w)
print("Please manually correct these in the final contributors list at %s." %
new_contributors_file_name)
print("==================================================================================\n")
|
thaumos/ansible | refs/heads/devel | test/units/modules/network/netscaler/test_netscaler_lb_vserver.py | 68 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from units.compat.mock import patch, Mock, MagicMock, call
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
import sys
if sys.version_info[:2] != (2, 6):
import requests
class TestNetscalerLBVServerModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
cls.server_mock = MagicMock()
cls.server_mock.__class__ = MagicMock(add=Mock())
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.lb': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver.lbvserver': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding.lbvserver_service_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_servicegroup_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_servicegroup_binding.lbvserver_servicegroup_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.ssl': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslvserver_sslcertkey_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslvserver_sslcertkey_binding.sslvserver_sslcertkey_binding': m,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def setUp(self):
super(TestNetscalerLBVServerModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
super(TestNetscalerLBVServerModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_lb_vserver
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_lb_vserver.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_lb_vserver.nitro_exception', MockException):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
nitro_exception=self.MockException,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_save_config_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
m = Mock(return_value=client_mock)
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
lb_vserver_exists=Mock(side_effect=[False, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
m = Mock(return_value=client_mock)
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
lb_vserver_exists=Mock(side_effect=[True, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
m = Mock(return_value=client_mock)
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
lb_vserver_exists=Mock(side_effect=[False, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
m = Mock(return_value=client_mock)
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
lb_vserver_exists=Mock(side_effect=[True, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_ensure_feature_is_enabled_called(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
lb_vserver_proxy_mock = Mock()
feature_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[True, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=feature_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
feature_mock.assert_called_with(client_mock, 'LB')
def test_ensure_feature_is_enabled_nitro_exception_caught(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
lb_vserver_proxy_mock = Mock()
errorcode = 10
message = 'mock error'
class MockException(Exception):
def __init__(self):
self.errorcode = errorcode
self.message = message
feature_mock = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[True, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=feature_mock,
nitro_exception=MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
expected_msg = 'nitro exception errorcode=%s, message=%s' % (errorcode, message)
self.assertEqual(result['msg'], expected_msg, 'Failed to handle nitro exception')
def test_create_new_lb_vserver_workflow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=Mock()),
lb_vserver_exists=Mock(side_effect=[False, True]),
lb_vserver_identical=Mock(side_effect=[True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
do_state_change=Mock(return_value=Mock(errorcode=0)),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
):
self.module = netscaler_lb_vserver
result = self.exited()
lb_vserver_proxy_mock.assert_has_calls([call.add()])
self.assertTrue(result['changed'])
def test_update_lb_vserver_workflow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=Mock()),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=Mock(return_value=[]),
):
self.module = netscaler_lb_vserver
result = self.exited()
lb_vserver_proxy_mock.assert_has_calls([call.update()])
self.assertTrue(result['changed'])
def test_service_bindings_handling(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
configured_dict = {
'first': Mock(),
'second': Mock(has_equal_attributes=Mock(return_value=False)),
}
actual_dict = {
'second': Mock(),
'third': Mock(),
}
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[False, True]),
get_configured_service_bindings=Mock(return_value=configured_dict),
get_actual_service_bindings=Mock(return_value=actual_dict),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
):
self.module = netscaler_lb_vserver
result = self.exited()
configured_dict['first'].assert_has_calls([call.add()])
configured_dict['second'].assert_has_calls([call.has_equal_attributes(actual_dict['second']), call.add()])
actual_dict['second'].assert_has_calls([call.delete(client_mock, actual_dict['second'])])
actual_dict['third'].assert_has_calls([call.delete(client_mock, actual_dict['third'])])
self.assertTrue(result['changed'])
def test_servicegroup_bindings_handling(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
configured_dict = {
'first': Mock(),
'second': Mock(has_equal_attributes=Mock(return_value=False)),
}
actual_dict = {
'second': Mock(),
'third': Mock(),
}
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[False, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
get_configured_servicegroup_bindings=Mock(return_value=configured_dict),
get_actual_servicegroup_bindings=Mock(return_value=actual_dict),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
):
self.module = netscaler_lb_vserver
result = self.exited()
configured_dict['first'].assert_has_calls([call.add()])
configured_dict['second'].assert_has_calls([call.has_equal_attributes(actual_dict['second']), call.add()])
actual_dict['second'].assert_has_calls([call.delete(client_mock, actual_dict['second'])])
actual_dict['third'].assert_has_calls([call.delete(client_mock, actual_dict['third'])])
self.assertTrue(result['changed'])
def test_ssl_bindings_handling(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
servicetype='SSL',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
ssl_sync_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, True]),
ssl_certkey_bindings_sync=ssl_sync_mock,
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.exited()
self.assertTrue(len(ssl_sync_mock.mock_calls) > 0, msg='ssl cert_key bindings not called')
self.assertTrue(result['changed'])
def test_ssl_bindings_not_called_for_non_ssl_service(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
servicetype='HTTP',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
ssl_sync_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, True]),
ssl_certkey_bindings_sync=ssl_sync_mock,
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
):
self.module = netscaler_lb_vserver
result = self.exited()
ssl_sync_mock.assert_not_called()
self.assertTrue(result['changed'])
def test_server_exists_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
ssl_sync_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[False, False]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, True]),
ssl_certkey_bindings_sync=ssl_sync_mock,
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'Did not create lb vserver')
def test_server_identical_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
ssl_sync_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, False]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, True]),
ssl_certkey_bindings_sync=ssl_sync_mock,
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'lb vserver is not configured correctly')
def test_service_bindings_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[False, False]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'service bindings are not identical')
def test_servicegroup_bindings_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[False, False]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'servicegroup bindings are not identical')
def test_server_servicegroup_bindings_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[False, False]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'servicegroup bindings are not identical')
def test_absent_state_workflow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
lb_vserver_exists=Mock(side_effect=[True, False]),
):
self.module = netscaler_lb_vserver
result = self.exited()
lb_vserver_proxy_mock.assert_has_calls([call.delete()])
self.assertTrue(result['changed'])
def test_absent_state_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
lb_vserver_exists=Mock(side_effect=[True, True]),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
lb_vserver_proxy_mock.assert_has_calls([call.delete()])
self.assertEqual(result['msg'], 'lb vserver still exists')
def test_disabled_state_change_called(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
do_state_change_mock = Mock(return_value=Mock(errorcode=0))
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
lb_vserver_exists=Mock(side_effect=[True, True]),
nitro_exception=self.MockException,
do_state_change=do_state_change_mock,
):
self.module = netscaler_lb_vserver
self.exited()
self.assertTrue(len(do_state_change_mock.mock_calls) > 0, msg='Did not call state change')
def test_get_immutables_failure(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
m = Mock(return_value=['some'])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False]),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=m,
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertTrue(
result['msg'].startswith('Cannot update immutable attributes'),
msg='Did not handle immutables error correctly',
)
|
elkingtonmcb/scikit-learn | refs/heads/master | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
dcroc16/skunk_works | refs/heads/master | google_appengine/lib/django-1.5/django/views/generic/dates.py | 107 | from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.utils import timezone
from django.views.generic.base import View
from django.views.generic.detail import BaseDetailView, SingleObjectTemplateResponseMixin
from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin
class YearMixin(object):
"""
Mixin for views manipulating year-based data.
"""
year_format = '%Y'
year = None
def get_year_format(self):
"""
Get a year format string in strptime syntax to be used to parse the
year from url variables.
"""
return self.year_format
def get_year(self):
"""
Return the year for which this view should display data.
"""
year = self.year
if year is None:
try:
year = self.kwargs['year']
except KeyError:
try:
year = self.request.GET['year']
except KeyError:
raise Http404(_("No year specified"))
return year
def get_next_year(self, date):
"""
Get the next valid year.
"""
return _get_next_prev(self, date, is_previous=False, period='year')
def get_previous_year(self, date):
"""
Get the previous valid year.
"""
return _get_next_prev(self, date, is_previous=True, period='year')
def _get_next_year(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date.replace(year=date.year + 1, month=1, day=1)
def _get_current_year(self, date):
"""
Return the start date of the current interval.
"""
return date.replace(month=1, day=1)
class MonthMixin(object):
"""
Mixin for views manipulating month-based data.
"""
month_format = '%b'
month = None
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
def get_month(self):
"""
Return the month for which this view should display data.
"""
month = self.month
if month is None:
try:
month = self.kwargs['month']
except KeyError:
try:
month = self.request.GET['month']
except KeyError:
raise Http404(_("No month specified"))
return month
def get_next_month(self, date):
"""
Get the next valid month.
"""
return _get_next_prev(self, date, is_previous=False, period='month')
def get_previous_month(self, date):
"""
Get the previous valid month.
"""
return _get_next_prev(self, date, is_previous=True, period='month')
def _get_next_month(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
if date.month == 12:
return date.replace(year=date.year + 1, month=1, day=1)
else:
return date.replace(month=date.month + 1, day=1)
def _get_current_month(self, date):
"""
Return the start date of the previous interval.
"""
return date.replace(day=1)
class DayMixin(object):
"""
Mixin for views manipulating day-based data.
"""
day_format = '%d'
day = None
def get_day_format(self):
"""
Get a day format string in strptime syntax to be used to parse the day
from url variables.
"""
return self.day_format
def get_day(self):
"""
Return the day for which this view should display data.
"""
day = self.day
if day is None:
try:
day = self.kwargs['day']
except KeyError:
try:
day = self.request.GET['day']
except KeyError:
raise Http404(_("No day specified"))
return day
def get_next_day(self, date):
"""
Get the next valid day.
"""
return _get_next_prev(self, date, is_previous=False, period='day')
def get_previous_day(self, date):
"""
Get the previous valid day.
"""
return _get_next_prev(self, date, is_previous=True, period='day')
def _get_next_day(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date + datetime.timedelta(days=1)
def _get_current_day(self, date):
"""
Return the start date of the current interval.
"""
return date
class WeekMixin(object):
"""
Mixin for views manipulating week-based data.
"""
week_format = '%U'
week = None
def get_week_format(self):
"""
Get a week format string in strptime syntax to be used to parse the
week from url variables.
"""
return self.week_format
def get_week(self):
"""
Return the week for which this view should display data
"""
week = self.week
if week is None:
try:
week = self.kwargs['week']
except KeyError:
try:
week = self.request.GET['week']
except KeyError:
raise Http404(_("No week specified"))
return week
def get_next_week(self, date):
"""
Get the next valid week.
"""
return _get_next_prev(self, date, is_previous=False, period='week')
def get_previous_week(self, date):
"""
Get the previous valid week.
"""
return _get_next_prev(self, date, is_previous=True, period='week')
def _get_next_week(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date + datetime.timedelta(days=7 - self._get_weekday(date))
def _get_current_week(self, date):
"""
Return the start date of the current interval.
"""
return date - datetime.timedelta(self._get_weekday(date))
def _get_weekday(self, date):
"""
Return the weekday for a given date.
The first day according to the week format is 0 and the last day is 6.
"""
week_format = self.get_week_format()
if week_format == '%W': # week starts on Monday
return date.weekday()
elif week_format == '%U': # week starts on Sunday
return (date.weekday() + 1) % 7
else:
raise ValueError("unknown week format: %s" % week_format)
class DateMixin(object):
"""
Mixin class for views manipulating date-based data.
"""
date_field = None
allow_future = False
def get_date_field(self):
"""
Get the name of the date field to be used to filter by.
"""
if self.date_field is None:
raise ImproperlyConfigured("%s.date_field is required." % self.__class__.__name__)
return self.date_field
def get_allow_future(self):
"""
Returns `True` if the view should be allowed to display objects from
the future.
"""
return self.allow_future
# Note: the following three methods only work in subclasses that also
# inherit SingleObjectMixin or MultipleObjectMixin.
@cached_property
def uses_datetime_field(self):
"""
Return `True` if the date field is a `DateTimeField` and `False`
if it's a `DateField`.
"""
model = self.get_queryset().model if self.model is None else self.model
field = model._meta.get_field(self.get_date_field())
return isinstance(field, models.DateTimeField)
def _make_date_lookup_arg(self, value):
"""
Convert a date into a datetime when the date field is a DateTimeField.
When time zone support is enabled, `date` is assumed to be in the
current time zone, so that displayed items are consistent with the URL.
"""
if self.uses_datetime_field:
value = datetime.datetime.combine(value, datetime.time.min)
if settings.USE_TZ:
value = timezone.make_aware(value, timezone.get_current_timezone())
return value
def _make_single_date_lookup(self, date):
"""
Get the lookup kwargs for filtering on a single date.
If the date field is a DateTimeField, we can't just filter on
date_field=date because that doesn't take the time into account.
"""
date_field = self.get_date_field()
if self.uses_datetime_field:
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(date + datetime.timedelta(days=1))
return {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
else:
# Skip self._make_date_lookup_arg, it's a no-op in this branch.
return {date_field: date}
class BaseDateListView(MultipleObjectMixin, DateMixin, View):
"""
Abstract base class for date-based views displaying a list of objects.
"""
allow_empty = False
date_list_period = 'year'
def get(self, request, *args, **kwargs):
self.date_list, self.object_list, extra_context = self.get_dated_items()
context = self.get_context_data(object_list=self.object_list,
date_list=self.date_list)
context.update(extra_context)
return self.render_to_response(context)
def get_dated_items(self):
"""
Obtain the list of dates and items.
"""
raise NotImplementedError('A DateView must provide an implementation of get_dated_items()')
def get_dated_queryset(self, ordering=None, **lookup):
"""
Get a queryset properly filtered according to `allow_future` and any
extra lookup kwargs.
"""
qs = self.get_queryset().filter(**lookup)
date_field = self.get_date_field()
allow_future = self.get_allow_future()
allow_empty = self.get_allow_empty()
paginate_by = self.get_paginate_by(qs)
if ordering is not None:
qs = qs.order_by(ordering)
if not allow_future:
now = timezone.now() if self.uses_datetime_field else timezone_today()
qs = qs.filter(**{'%s__lte' % date_field: now})
if not allow_empty:
# When pagination is enabled, it's better to do a cheap query
# than to load the unpaginated queryset in memory.
is_empty = len(qs) == 0 if paginate_by is None else not qs.exists()
if is_empty:
raise Http404(_("No %(verbose_name_plural)s available") % {
'verbose_name_plural': force_text(qs.model._meta.verbose_name_plural)
})
return qs
def get_date_list_period(self):
"""
Get the aggregation period for the list of dates: 'year', 'month', or 'day'.
"""
return self.date_list_period
def get_date_list(self, queryset, date_type=None, ordering='ASC'):
"""
Get a date list by calling `queryset.dates()`, checking along the way
for empty lists that aren't allowed.
"""
date_field = self.get_date_field()
allow_empty = self.get_allow_empty()
if date_type is None:
date_type = self.get_date_list_period()
date_list = queryset.dates(date_field, date_type, ordering)
if date_list is not None and not date_list and not allow_empty:
name = force_text(queryset.model._meta.verbose_name_plural)
raise Http404(_("No %(verbose_name_plural)s available") %
{'verbose_name_plural': name})
return date_list
class BaseArchiveIndexView(BaseDateListView):
"""
Base class for archives of date-based items.
Requires a response mixin.
"""
context_object_name = 'latest'
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
qs = self.get_dated_queryset(ordering='-%s' % self.get_date_field())
date_list = self.get_date_list(qs, ordering='DESC')
if not date_list:
qs = qs.none()
return (date_list, qs, {})
class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView):
"""
Top-level archive of date-based items.
"""
template_name_suffix = '_archive'
class BaseYearArchiveView(YearMixin, BaseDateListView):
"""
List of objects published in a given year.
"""
date_list_period = 'month'
make_object_list = False
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_year(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(ordering='-%s' % date_field, **lookup_kwargs)
date_list = self.get_date_list(qs)
if not self.get_make_object_list():
# We need this to be a queryset since parent classes introspect it
# to find information about the model.
qs = qs.none()
return (date_list, qs, {
'year': date,
'next_year': self.get_next_year(date),
'previous_year': self.get_previous_year(date),
})
def get_make_object_list(self):
"""
Return `True` if this view should contain the full list of objects in
the given year.
"""
return self.make_object_list
class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):
"""
List of objects published in a given year.
"""
template_name_suffix = '_archive_year'
class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView):
"""
List of objects published in a given year.
"""
date_list_period = 'day'
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
month = self.get_month()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_month(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs)
return (date_list, qs, {
'month': date,
'next_month': self.get_next_month(date),
'previous_month': self.get_previous_month(date),
})
class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):
"""
List of objects published in a given year.
"""
template_name_suffix = '_archive_month'
class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView):
"""
List of objects published in a given week.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
week = self.get_week()
date_field = self.get_date_field()
week_format = self.get_week_format()
week_start = {
'%W': '1',
'%U': '0',
}[week_format]
date = _date_from_string(year, self.get_year_format(),
week_start, '%w',
week, week_format)
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_week(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'week': date,
'next_week': self.get_next_week(date),
'previous_week': self.get_previous_week(date),
})
class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):
"""
List of objects published in a given week.
"""
template_name_suffix = '_archive_week'
class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView):
"""
List of objects published on a given day.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
return self._get_dated_items(date)
def _get_dated_items(self, date):
"""
Do the actual heavy lifting of getting the dated items; this accepts a
date object so that TodayArchiveView can be trivial.
"""
lookup_kwargs = self._make_single_date_lookup(date)
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'day': date,
'previous_day': self.get_previous_day(date),
'next_day': self.get_next_day(date),
'previous_month': self.get_previous_month(date),
'next_month': self.get_next_month(date)
})
class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):
"""
List of objects published on a given day.
"""
template_name_suffix = "_archive_day"
class BaseTodayArchiveView(BaseDayArchiveView):
"""
List of objects published today.
"""
def get_dated_items(self):
"""
Return (date_list, items, extra_context) for this request.
"""
return self._get_dated_items(datetime.date.today())
class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):
"""
List of objects published today.
"""
template_name_suffix = "_archive_day"
class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
def get_object(self, queryset=None):
"""
Get the object this request displays.
"""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
# Use a custom queryset if provided
qs = queryset or self.get_queryset()
if not self.get_allow_future() and date > datetime.date.today():
raise Http404(_("Future %(verbose_name_plural)s not available because %(class_name)s.allow_future is False.") % {
'verbose_name_plural': qs.model._meta.verbose_name_plural,
'class_name': self.__class__.__name__,
})
# Filter down a queryset from self.queryset using the date from the
# URL. This'll get passed as the queryset to DetailView.get_object,
# which'll handle the 404
lookup_kwargs = self._make_single_date_lookup(date)
qs = qs.filter(**lookup_kwargs)
return super(BaseDetailView, self).get_object(queryset=qs)
class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
template_name_suffix = '_detail'
def _date_from_string(year, year_format, month='', month_format='', day='', day_format='', delim='__'):
"""
Helper: get a datetime.date object given a format string and a year,
month, and day (only year is mandatory). Raise a 404 for an invalid date.
"""
format = delim.join((year_format, month_format, day_format))
datestr = delim.join((year, month, day))
try:
return datetime.datetime.strptime(datestr, format).date()
except ValueError:
raise Http404(_("Invalid date string '%(datestr)s' given format '%(format)s'") % {
'datestr': datestr,
'format': format,
})
def _get_next_prev(generic_view, date, is_previous, period):
"""
Helper: Get the next or the previous valid date. The idea is to allow
links on month/day views to never be 404s by never providing a date
that'll be invalid for the given view.
This is a bit complicated since it handles different intervals of time,
hence the coupling to generic_view.
However in essence the logic comes down to:
* If allow_empty and allow_future are both true, this is easy: just
return the naive result (just the next/previous day/week/month,
reguardless of object existence.)
* If allow_empty is true, allow_future is false, and the naive result
isn't in the future, then return it; otherwise return None.
* If allow_empty is false and allow_future is true, return the next
date *that contains a valid object*, even if it's in the future. If
there are no next objects, return None.
* If allow_empty is false and allow_future is false, return the next
date that contains a valid object. If that date is in the future, or
if there are no next objects, return None.
"""
date_field = generic_view.get_date_field()
allow_empty = generic_view.get_allow_empty()
allow_future = generic_view.get_allow_future()
get_current = getattr(generic_view, '_get_current_%s' % period)
get_next = getattr(generic_view, '_get_next_%s' % period)
# Bounds of the current interval
start, end = get_current(date), get_next(date)
# If allow_empty is True, the naive result will be valid
if allow_empty:
if is_previous:
result = get_current(start - datetime.timedelta(days=1))
else:
result = end
if allow_future or result <= timezone_today():
return result
else:
return None
# Otherwise, we'll need to go to the database to look for an object
# whose date_field is at least (greater than/less than) the given
# naive result
else:
# Construct a lookup and an ordering depending on whether we're doing
# a previous date or a next date lookup.
if is_previous:
lookup = {'%s__lt' % date_field: generic_view._make_date_lookup_arg(start)}
ordering = '-%s' % date_field
else:
lookup = {'%s__gte' % date_field: generic_view._make_date_lookup_arg(end)}
ordering = date_field
# Filter out objects in the future if appropriate.
if not allow_future:
# Fortunately, to match the implementation of allow_future,
# we need __lte, which doesn't conflict with __lt above.
if generic_view.uses_datetime_field:
now = timezone.now()
else:
now = timezone_today()
lookup['%s__lte' % date_field] = now
qs = generic_view.get_queryset().filter(**lookup).order_by(ordering)
# Snag the first object from the queryset; if it doesn't exist that
# means there's no next/previous link available.
try:
result = getattr(qs[0], date_field)
except IndexError:
return None
# Convert datetimes to dates in the current time zone.
if generic_view.uses_datetime_field:
if settings.USE_TZ:
result = timezone.localtime(result)
result = result.date()
# Return the first day of the period.
return get_current(result)
def timezone_today():
"""
Return the current date in the current time zone.
"""
if settings.USE_TZ:
return timezone.localtime(timezone.now()).date()
else:
return datetime.date.today()
|
gangadharkadam/saloon_frappe_install | refs/heads/master | frappe/email/__init__.py | 27 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.email.email_body import get_email
from frappe.email.smtp import send
def sendmail_md(recipients, sender=None, msg=None, subject=None, attachments=None, content=None,
reply_to=None, cc=(), message_id=None):
"""send markdown email"""
import markdown2
sendmail(recipients, sender, markdown2.markdown(content or msg), subject, attachments, reply_to=reply_to, cc=cc)
def sendmail(recipients, sender='', msg='', subject='[No Subject]', attachments=None, content=None,
reply_to=None, cc=(), message_id=None):
"""send an html email as multipart with attachments and all"""
mail = get_email(recipients, sender, content or msg, subject, attachments=attachments, reply_to=reply_to, cc=cc)
if message_id:
mail.set_message_id(message_id)
send(mail)
def sendmail_to_system_managers(subject, content):
send(get_email(get_system_managers(), None, content, subject))
@frappe.whitelist()
def get_contact_list():
"""Returns contacts (from autosuggest)"""
cond = ['`%s` like "%s%%"' % (f,
frappe.form_dict.get('txt')) for f in frappe.form_dict.get('where').split(',')]
cl = frappe.db.sql("select `%s` from `tab%s` where %s" % (
frappe.form_dict.get('select')
,frappe.form_dict.get('from')
,' OR '.join(cond)
)
)
frappe.response['cl'] = filter(None, [c[0] for c in cl])
def get_system_managers():
return frappe.db.sql_list("""select parent FROM tabUserRole
WHERE role='System Manager'
AND parent!='Administrator'
AND parent IN (SELECT email FROM tabUser WHERE enabled=1)""")
|
dqnykamp/sympy | refs/heads/master | sympy/combinatorics/tests/test_permutations.py | 25 | from itertools import permutations
from sympy.combinatorics.permutations import (Permutation, _af_parity,
_af_rmul, _af_rmuln, Cycle)
from sympy.utilities.pytest import raises
rmul = Permutation.rmul
def test_Permutation():
# don't auto fill 0
raises(ValueError, lambda: Permutation([1]))
p = Permutation([0, 1, 2, 3])
# call as bijective
assert [p(i) for i in range(p.size)] == list(p)
# call as operator
assert p(list(range(p.size))) == list(p)
# call as function
assert list(p(1, 2)) == [0, 2, 1, 3]
# conversion to list
assert list(p) == list(range(4))
assert Permutation(size=4) == Permutation(3)
assert Permutation(Permutation(3), size=5) == Permutation(4)
# cycle form with size
assert Permutation([[1, 2]], size=4) == Permutation([[1, 2], [0], [3]])
# random generation
assert Permutation.random(2) in (Permutation([1, 0]), Permutation([0, 1]))
p = Permutation([2, 5, 1, 6, 3, 0, 4])
q = Permutation([[1], [0, 3, 5, 6, 2, 4]])
assert len(set([p, p])) == 1
r = Permutation([1, 3, 2, 0, 4, 6, 5])
ans = Permutation(_af_rmuln(*[w.array_form for w in (p, q, r)])).array_form
assert rmul(p, q, r).array_form == ans
# make sure no other permutation of p, q, r could have given
# that answer
for a, b, c in permutations((p, q, r)):
if (a, b, c) == (p, q, r):
continue
assert rmul(a, b, c).array_form != ans
assert p.support() == list(range(7))
assert q.support() == [0, 2, 3, 4, 5, 6]
assert Permutation(p.cyclic_form).array_form == p.array_form
assert p.cardinality == 5040
assert q.cardinality == 5040
assert q.cycles == 2
assert rmul(q, p) == Permutation([4, 6, 1, 2, 5, 3, 0])
assert rmul(p, q) == Permutation([6, 5, 3, 0, 2, 4, 1])
assert _af_rmul(p.array_form, q.array_form) == \
[6, 5, 3, 0, 2, 4, 1]
assert rmul(Permutation([[1, 2, 3], [0, 4]]),
Permutation([[1, 2, 4], [0], [3]])).cyclic_form == \
[[0, 4, 2], [1, 3]]
assert q.array_form == [3, 1, 4, 5, 0, 6, 2]
assert q.cyclic_form == [[0, 3, 5, 6, 2, 4]]
assert q.full_cyclic_form == [[0, 3, 5, 6, 2, 4], [1]]
assert p.cyclic_form == [[0, 2, 1, 5], [3, 6, 4]]
t = p.transpositions()
assert t == [(0, 5), (0, 1), (0, 2), (3, 4), (3, 6)]
assert Permutation.rmul(*[Permutation(Cycle(*ti)) for ti in (t)])
assert Permutation([1, 0]).transpositions() == [(0, 1)]
assert p**13 == p
assert q**0 == Permutation(list(range(q.size)))
assert q**-2 == ~q**2
assert q**2 == Permutation([5, 1, 0, 6, 3, 2, 4])
assert q**3 == q**2*q
assert q**4 == q**2*q**2
a = Permutation(1, 3)
b = Permutation(2, 0, 3)
I = Permutation(3)
assert ~a == a**-1
assert a*~a == I
assert a*b**-1 == a*~b
ans = Permutation(0, 5, 3, 1, 6)(2, 4)
assert (p + q.rank()).rank() == ans.rank()
assert (p + q.rank())._rank == ans.rank()
assert (q + p.rank()).rank() == ans.rank()
raises(TypeError, lambda: p + Permutation(list(range(10))))
assert (p - q.rank()).rank() == Permutation(0, 6, 3, 1, 2, 5, 4).rank()
assert p.rank() - q.rank() < 0 # for coverage: make sure mod is used
assert (q - p.rank()).rank() == Permutation(1, 4, 6, 2)(3, 5).rank()
assert p*q == Permutation(_af_rmuln(*[list(w) for w in (q, p)]))
assert p*Permutation([]) == p
assert Permutation([])*p == p
assert p*Permutation([[0, 1]]) == Permutation([2, 5, 0, 6, 3, 1, 4])
assert Permutation([[0, 1]])*p == Permutation([5, 2, 1, 6, 3, 0, 4])
pq = p ^ q
assert pq == Permutation([5, 6, 0, 4, 1, 2, 3])
assert pq == rmul(q, p, ~q)
qp = q ^ p
assert qp == Permutation([4, 3, 6, 2, 1, 5, 0])
assert qp == rmul(p, q, ~p)
raises(ValueError, lambda: p ^ Permutation([]))
assert p.commutator(q) == Permutation(0, 1, 3, 4, 6, 5, 2)
assert q.commutator(p) == Permutation(0, 2, 5, 6, 4, 3, 1)
assert p.commutator(q) == ~q.commutator(p)
raises(ValueError, lambda: p.commutator(Permutation([])))
assert len(p.atoms()) == 7
assert q.atoms() == set([0, 1, 2, 3, 4, 5, 6])
assert p.inversion_vector() == [2, 4, 1, 3, 1, 0]
assert q.inversion_vector() == [3, 1, 2, 2, 0, 1]
assert Permutation.from_inversion_vector(p.inversion_vector()) == p
assert Permutation.from_inversion_vector(q.inversion_vector()).array_form\
== q.array_form
raises(ValueError, lambda: Permutation.from_inversion_vector([0, 2]))
assert Permutation([i for i in range(500, -1, -1)]).inversions() == 125250
s = Permutation([0, 4, 1, 3, 2])
assert s.parity() == 0
_ = s.cyclic_form # needed to create a value for _cyclic_form
assert len(s._cyclic_form) != s.size and s.parity() == 0
assert not s.is_odd
assert s.is_even
assert Permutation([0, 1, 4, 3, 2]).parity() == 1
assert _af_parity([0, 4, 1, 3, 2]) == 0
assert _af_parity([0, 1, 4, 3, 2]) == 1
s = Permutation([0])
assert s.is_Singleton
assert Permutation([]).is_Empty
r = Permutation([3, 2, 1, 0])
assert (r**2).is_Identity
assert rmul(~p, p).is_Identity
assert (~p)**13 == Permutation([5, 2, 0, 4, 6, 1, 3])
assert ~(r**2).is_Identity
assert p.max() == 6
assert p.min() == 0
q = Permutation([[6], [5], [0, 1, 2, 3, 4]])
assert q.max() == 4
assert q.min() == 0
p = Permutation([1, 5, 2, 0, 3, 6, 4])
q = Permutation([[1, 2, 3, 5, 6], [0, 4]])
assert p.ascents() == [0, 3, 4]
assert q.ascents() == [1, 2, 4]
assert r.ascents() == []
assert p.descents() == [1, 2, 5]
assert q.descents() == [0, 3, 5]
assert Permutation(r.descents()).is_Identity
assert p.inversions() == 7
# test the merge-sort with a longer permutation
big = list(p) + list(range(p.max() + 1, p.max() + 130))
assert Permutation(big).inversions() == 7
assert p.signature() == -1
assert q.inversions() == 11
assert q.signature() == -1
assert rmul(p, ~p).inversions() == 0
assert rmul(p, ~p).signature() == 1
assert p.order() == 6
assert q.order() == 10
assert (p**(p.order())).is_Identity
assert p.length() == 6
assert q.length() == 7
assert r.length() == 4
assert p.runs() == [[1, 5], [2], [0, 3, 6], [4]]
assert q.runs() == [[4], [2, 3, 5], [0, 6], [1]]
assert r.runs() == [[3], [2], [1], [0]]
assert p.index() == 8
assert q.index() == 8
assert r.index() == 3
assert p.get_precedence_distance(q) == q.get_precedence_distance(p)
assert p.get_adjacency_distance(q) == p.get_adjacency_distance(q)
assert p.get_positional_distance(q) == p.get_positional_distance(q)
p = Permutation([0, 1, 2, 3])
q = Permutation([3, 2, 1, 0])
assert p.get_precedence_distance(q) == 6
assert p.get_adjacency_distance(q) == 3
assert p.get_positional_distance(q) == 8
p = Permutation([0, 3, 1, 2, 4])
q = Permutation.josephus(4, 5, 2)
assert p.get_adjacency_distance(q) == 3
raises(ValueError, lambda: p.get_adjacency_distance(Permutation([])))
raises(ValueError, lambda: p.get_positional_distance(Permutation([])))
raises(ValueError, lambda: p.get_precedence_distance(Permutation([])))
a = [Permutation.unrank_nonlex(4, i) for i in range(5)]
iden = Permutation([0, 1, 2, 3])
for i in range(5):
for j in range(i + 1, 5):
assert a[i].commutes_with(a[j]) == \
(rmul(a[i], a[j]) == rmul(a[j], a[i]))
if a[i].commutes_with(a[j]):
assert a[i].commutator(a[j]) == iden
assert a[j].commutator(a[i]) == iden
a = Permutation(3)
b = Permutation(0, 6, 3)(1, 2)
assert a.cycle_structure == {1: 4}
assert b.cycle_structure == {2: 1, 3: 1, 1: 2}
def test_josephus():
assert Permutation.josephus(4, 6, 1) == Permutation([3, 1, 0, 2, 5, 4])
assert Permutation.josephus(1, 5, 1).is_Identity
def test_ranking():
assert Permutation.unrank_lex(5, 10).rank() == 10
p = Permutation.unrank_lex(15, 225)
assert p.rank() == 225
p1 = p.next_lex()
assert p1.rank() == 226
assert Permutation.unrank_lex(15, 225).rank() == 225
assert Permutation.unrank_lex(10, 0).is_Identity
p = Permutation.unrank_lex(4, 23)
assert p.rank() == 23
assert p.array_form == [3, 2, 1, 0]
assert p.next_lex() is None
p = Permutation([1, 5, 2, 0, 3, 6, 4])
q = Permutation([[1, 2, 3, 5, 6], [0, 4]])
a = [Permutation.unrank_trotterjohnson(4, i).array_form for i in range(5)]
assert a == [[0, 1, 2, 3], [0, 1, 3, 2], [0, 3, 1, 2], [3, 0, 1,
2], [3, 0, 2, 1] ]
assert [Permutation(pa).rank_trotterjohnson() for pa in a] == list(range(5))
assert Permutation([0, 1, 2, 3]).next_trotterjohnson() == \
Permutation([0, 1, 3, 2])
assert q.rank_trotterjohnson() == 2283
assert p.rank_trotterjohnson() == 3389
assert Permutation([1, 0]).rank_trotterjohnson() == 1
a = Permutation(list(range(3)))
b = a
l = []
tj = []
for i in range(6):
l.append(a)
tj.append(b)
a = a.next_lex()
b = b.next_trotterjohnson()
assert a == b is None
assert set([tuple(a) for a in l]) == set([tuple(a) for a in tj])
p = Permutation([2, 5, 1, 6, 3, 0, 4])
q = Permutation([[6], [5], [0, 1, 2, 3, 4]])
assert p.rank() == 1964
assert q.rank() == 870
assert Permutation([]).rank_nonlex() == 0
prank = p.rank_nonlex()
assert prank == 1600
assert Permutation.unrank_nonlex(7, 1600) == p
qrank = q.rank_nonlex()
assert qrank == 41
assert Permutation.unrank_nonlex(7, 41) == Permutation(q.array_form)
a = [Permutation.unrank_nonlex(4, i).array_form for i in range(24)]
assert a == [
[1, 2, 3, 0], [3, 2, 0, 1], [1, 3, 0, 2], [1, 2, 0, 3], [2, 3, 1, 0],
[2, 0, 3, 1], [3, 0, 1, 2], [2, 0, 1, 3], [1, 3, 2, 0], [3, 0, 2, 1],
[1, 0, 3, 2], [1, 0, 2, 3], [2, 1, 3, 0], [2, 3, 0, 1], [3, 1, 0, 2],
[2, 1, 0, 3], [3, 2, 1, 0], [0, 2, 3, 1], [0, 3, 1, 2], [0, 2, 1, 3],
[3, 1, 2, 0], [0, 3, 2, 1], [0, 1, 3, 2], [0, 1, 2, 3]]
N = 10
p1 = Permutation(a[0])
for i in range(1, N+1):
p1 = p1*Permutation(a[i])
p2 = Permutation.rmul_with_af(*[Permutation(h) for h in a[N::-1]])
assert p1 == p2
ok = []
p = Permutation([1, 0])
for i in range(3):
ok.append(p.array_form)
p = p.next_nonlex()
if p is None:
ok.append(None)
break
assert ok == [[1, 0], [0, 1], None]
assert Permutation([3, 2, 0, 1]).next_nonlex() == Permutation([1, 3, 0, 2])
assert [Permutation(pa).rank_nonlex() for pa in a] == list(range(24))
def test_mul():
a, b = [0, 2, 1, 3], [0, 1, 3, 2]
assert _af_rmul(a, b) == [0, 2, 3, 1]
assert _af_rmuln(a, b, list(range(4))) == [0, 2, 3, 1]
assert rmul(Permutation(a), Permutation(b)).array_form == [0, 2, 3, 1]
a = Permutation([0, 2, 1, 3])
b = (0, 1, 3, 2)
c = (3, 1, 2, 0)
assert Permutation.rmul(a, b, c) == Permutation([1, 2, 3, 0])
assert Permutation.rmul(a, c) == Permutation([3, 2, 1, 0])
raises(TypeError, lambda: Permutation.rmul(b, c))
n = 6
m = 8
a = [Permutation.unrank_nonlex(n, i).array_form for i in range(m)]
h = list(range(n))
for i in range(m):
h = _af_rmul(h, a[i])
h2 = _af_rmuln(*a[:i + 1])
assert h == h2
def test_args():
p = Permutation([(0, 3, 1, 2), (4, 5)])
assert p._cyclic_form is None
assert Permutation(p) == p
assert p.cyclic_form == [[0, 3, 1, 2], [4, 5]]
assert p._array_form == [3, 2, 0, 1, 5, 4]
p = Permutation((0, 3, 1, 2))
assert p._cyclic_form is None
assert p._array_form == [0, 3, 1, 2]
assert Permutation([0]) == Permutation((0, ))
assert Permutation([[0], [1]]) == Permutation(((0, ), (1, ))) == \
Permutation(((0, ), [1]))
assert Permutation([[1, 2]]) == Permutation([0, 2, 1])
assert Permutation([[1], [4, 2]]) == Permutation([0, 1, 4, 3, 2])
assert Permutation([[1], [4, 2]], size=1) == Permutation([0, 1, 4, 3, 2])
assert Permutation(
[[1], [4, 2]], size=6) == Permutation([0, 1, 4, 3, 2, 5])
assert Permutation([], size=3) == Permutation([0, 1, 2])
assert Permutation(3).list(5) == [0, 1, 2, 3, 4]
assert Permutation(3).list(-1) == []
assert Permutation(5)(1, 2).list(-1) == [0, 2, 1]
assert Permutation(5)(1, 2).list() == [0, 2, 1, 3, 4, 5]
raises(TypeError, lambda: Permutation([1, 2], [0]))
# enclosing brackets needed
raises(ValueError, lambda: Permutation([[1, 2], 0]))
# enclosing brackets needed on 0
raises(ValueError, lambda: Permutation([1, 1, 0]))
raises(ValueError, lambda: Permutation([[1], [1, 2]]))
raises(ValueError, lambda: Permutation([4, 5], size=10)) # where are 0-3?
# but this is ok because cycles imply that only those listed moved
assert Permutation(4, 5) == Permutation([0, 1, 2, 3, 5, 4])
def test_Cycle():
assert str(Cycle()) == 'Cycle()'
assert Cycle(Cycle(1,2)) == Cycle(1, 2)
assert Cycle(1,2).copy() == Cycle(1,2)
assert list(Cycle(1, 3, 2)) == [0, 3, 1, 2]
assert Cycle(1, 2)(2, 3) == Cycle(1, 3, 2)
assert Cycle(1, 2)(2, 3)(4, 5) == Cycle(1, 3, 2)(4, 5)
assert Permutation(Cycle(1, 2)(2, 1, 0, 3)).cyclic_form, Cycle(0, 2, 1)
raises(ValueError, lambda: Cycle().list())
assert Cycle(1, 2).list() == [0, 2, 1]
assert Cycle(1, 2).list(4) == [0, 2, 1, 3]
assert Permutation(Cycle(1, 2), size=4) == \
Permutation([0, 2, 1, 3])
assert str(Cycle(1, 2)(4, 5)) == 'Cycle(1, 2)(4, 5)'
assert str(Cycle(1, 2)) == 'Cycle(1, 2)'
assert Cycle(Permutation(list(range(3)))) == Cycle()
assert Cycle(1, 2).list() == [0, 2, 1]
assert Cycle(1, 2).list(4) == [0, 2, 1, 3]
raises(TypeError, lambda: Cycle((1, 2)))
raises(ValueError, lambda: Cycle(1, 2, 1))
raises(TypeError, lambda: Cycle(1, 2)*{})
# check round-trip
p = Permutation([[1, 2], [4, 3]], size=5)
assert Permutation(Cycle(p)) == p
def test_from_sequence():
assert Permutation.from_sequence('SymPy') == Permutation(4)(0, 1, 3)
assert Permutation.from_sequence('SymPy', key=lambda x: x.lower()) == \
Permutation(4)(0, 2)(1, 3)
|
amarouni/incubator-beam | refs/heads/master | sdks/python/apache_beam/examples/complete/tfidf.py | 7 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A TF-IDF workflow (term frequency - inverse document frequency).
For an explanation of the TF-IDF algorithm see the following link:
http://en.wikipedia.org/wiki/Tf-idf
"""
from __future__ import absolute_import
import argparse
import glob
import math
import re
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.pvalue import AsSingleton
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
def read_documents(pipeline, uris):
"""Read the documents at the provided uris and returns (uri, line) pairs."""
pcolls = []
for uri in uris:
pcolls.append(
pipeline
| 'Read: %s' % uri >> ReadFromText(uri)
| 'WithKey: %s' % uri >> beam.Map(lambda v, uri: (uri, v), uri))
return pcolls | 'FlattenReadPColls' >> beam.Flatten()
class TfIdf(beam.PTransform):
"""A transform containing a basic TF-IDF pipeline.
The input consists of KV objects where the key is the document's URI and
the value is a piece of the document's content.
The output is mapping from terms to scores for each document URI.
"""
def expand(self, uri_to_content):
# Compute the total number of documents, and prepare a singleton
# PCollection to use as side input.
total_documents = (
uri_to_content
| 'GetUris 1' >> beam.Keys()
| 'GetUniqueUris' >> beam.RemoveDuplicates()
| 'CountUris' >> beam.combiners.Count.Globally())
# Create a collection of pairs mapping a URI to each of the words
# in the document associated with that that URI.
def split_into_words((uri, line)):
return [(uri, w.lower()) for w in re.findall(r'[A-Za-z\']+', line)]
uri_to_words = (
uri_to_content
| 'SplitWords' >> beam.FlatMap(split_into_words))
# Compute a mapping from each word to the total number of documents
# in which it appears.
word_to_doc_count = (
uri_to_words
| 'GetUniqueWordsPerDoc' >> beam.RemoveDuplicates()
| 'GetWords' >> beam.Values()
| 'CountDocsPerWord' >> beam.combiners.Count.PerElement())
# Compute a mapping from each URI to the total number of words in the
# document associated with that URI.
uri_to_word_total = (
uri_to_words
| 'GetUris 2' >> beam.Keys()
| 'CountWordsInDoc' >> beam.combiners.Count.PerElement())
# Count, for each (URI, word) pair, the number of occurrences of that word
# in the document associated with the URI.
uri_and_word_to_count = (
uri_to_words
| 'CountWord-DocPairs' >> beam.combiners.Count.PerElement())
# Adjust the above collection to a mapping from (URI, word) pairs to counts
# into an isomorphic mapping from URI to (word, count) pairs, to prepare
# for a join by the URI key.
uri_to_word_and_count = (
uri_and_word_to_count
| 'ShiftKeys' >> beam.Map(
lambda ((uri, word), count): (uri, (word, count))))
# Perform a CoGroupByKey (a sort of pre-join) on the prepared
# uri_to_word_total and uri_to_word_and_count tagged by 'word totals' and
# 'word counts' strings. This yields a mapping from URI to a dictionary
# that maps the above mentioned tag strings to an iterable containing the
# word total for that URI and word and count respectively.
#
# A diagram (in which '[]' just means 'iterable'):
#
# URI: {'word totals': [count], # Total words within this URI's document.
# 'word counts': [(word, count), # Counts of specific words
# (word, count), # within this URI's document.
# ... ]}
uri_to_word_and_count_and_total = (
{'word totals': uri_to_word_total, 'word counts': uri_to_word_and_count}
| 'CoGroupByUri' >> beam.CoGroupByKey())
# Compute a mapping from each word to a (URI, term frequency) pair for each
# URI. A word's term frequency for a document is simply the number of times
# that word occurs in the document divided by the total number of words in
# the document.
def compute_term_frequency((uri, count_and_total)):
word_and_count = count_and_total['word counts']
# We have an iterable for one element that we want extracted.
[word_total] = count_and_total['word totals']
for word, count in word_and_count:
yield word, (uri, float(count) / word_total)
word_to_uri_and_tf = (
uri_to_word_and_count_and_total
| 'ComputeTermFrequencies' >> beam.FlatMap(compute_term_frequency))
# Compute a mapping from each word to its document frequency.
# A word's document frequency in a corpus is the number of
# documents in which the word appears divided by the total
# number of documents in the corpus.
#
# This calculation uses a side input, a Dataflow-computed auxiliary value
# presented to each invocation of our MapFn lambda. The second argument to
# the lambda (called total---note that we are unpacking the first argument)
# receives the value we listed after the lambda in Map(). Additional side
# inputs (and ordinary Python values, too) can be provided to MapFns and
# DoFns in this way.
word_to_df = (
word_to_doc_count
| 'ComputeDocFrequencies' >> beam.Map(
lambda (word, count), total: (word, float(count) / total),
AsSingleton(total_documents)))
# Join the term frequency and document frequency collections,
# each keyed on the word.
word_to_uri_and_tf_and_df = (
{'tf': word_to_uri_and_tf, 'df': word_to_df}
| 'CoGroupWordsByTf-df' >> beam.CoGroupByKey())
# Compute a mapping from each word to a (URI, TF-IDF) score for each URI.
# There are a variety of definitions of TF-IDF
# ("term frequency - inverse document frequency") score; here we use a
# basic version that is the term frequency divided by the log of the
# document frequency.
def compute_tf_idf((word, tf_and_df)):
[docf] = tf_and_df['df']
for uri, tf in tf_and_df['tf']:
yield word, (uri, tf * math.log(1 / docf))
word_to_uri_and_tfidf = (
word_to_uri_and_tf_and_df
| 'ComputeTf-idf' >> beam.FlatMap(compute_tf_idf))
return word_to_uri_and_tfidf
def run(argv=None):
"""Main entry point; defines and runs the tfidf pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--uris',
required=True,
help='URIs to process.')
parser.add_argument('--output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(options=pipeline_options) as p:
# Read documents specified by the uris command line option.
pcoll = read_documents(p, glob.glob(known_args.uris))
# Compute TF-IDF information for each word.
output = pcoll | TfIdf()
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
output | 'write' >> WriteToText(known_args.output)
# Execute the pipeline and wait until it is completed.
if __name__ == '__main__':
run()
|
diego-d5000/MisValesMd | refs/heads/master | env/lib/python2.7/site-packages/django/conf/locale/az/formats.py | 3 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y г.'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j E Y г. G:i'
YEAR_MONTH_FORMAT = 'F Y г.'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y', # '25.10.06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
pelle/talk.org | refs/heads/master | django/contrib/sessions/backends/cache.py | 3 | from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase
from django.core.cache import cache
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = cache
super(SessionStore, self).__init__(session_key)
def load(self):
session_data = self._cache.get(self.session_key)
return session_data or {}
def save(self):
self._cache.set(self.session_key, self._session, settings.SESSION_COOKIE_AGE)
def exists(self, session_key):
if self._cache.get(session_key):
return True
return False
def delete(self, session_key):
self._cache.delete(session_key) |
adelez/grpc | refs/heads/master | tools/debug/core/chttp2_ref_leak.py | 5 | #!/usr/bin/env python2.7
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reads stdin to find chttp2_refcount log lines, and prints reference leaks
# to stdout
import collections
import sys
import re
def new_obj():
return ['destroy']
outstanding = collections.defaultdict(new_obj)
# Sample log line:
# chttp2:unref:0x629000005200 2->1 destroy [src/core/ext/transport/chttp2/transport/chttp2_transport.c:599]
for line in sys.stdin:
m = re.search(r'chttp2:( ref|unref):0x([a-fA-F0-9]+) [^ ]+ ([^[]+) \[(.*)\]', line)
if m:
if m.group(1) == ' ref':
outstanding[m.group(2)].append(m.group(3))
else:
outstanding[m.group(2)].remove(m.group(3))
for obj, remaining in outstanding.items():
if remaining:
print 'LEAKED: %s %r' % (obj, remaining)
|
muricoca/crab | refs/heads/master | scikits/crab/similarities/tests/test_basic_similarities.py | 10 | import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises, assert_equals
from ..basic_similarities import UserSimilarity, ItemSimilarity, find_common_elements
from ...metrics.pairwise import cosine_distances, \
pearson_correlation, euclidean_distances, manhattan_distances, jaccard_coefficient, \
sorensen_coefficient, loglikehood_coefficient
from ...models.classes import MatrixPreferenceDataModel, \
MatrixBooleanPrefDataModel
#Simple Movies DataSet
movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Luciana Nunes': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Penny Frewman': {'Snakes on a Plane': 4.5, 'You, Me and Dupree': 1.0, 'Superman Returns': 4.0},
'Maria Gabriela': {}}
def test_find_common_elements():
#MatrixModel
model_matrix = MatrixPreferenceDataModel(movies)
source_preferences = model_matrix.preferences_from_user('Marcel Caraciolo')
target_preferences = model_matrix.preferences_from_user('Leopoldo Pires')
assert_array_equal(np.array([[2.5, 3.5, 3.5, 3.0]]), find_common_elements(source_preferences, target_preferences)[0])
assert_array_equal(np.array([[2.5, 3.0, 3.5, 4.0]]), find_common_elements(source_preferences, target_preferences)[1])
#MatrixModel
source_preferences = model_matrix.preferences_from_user('Marcel Caraciolo')
target_preferences = model_matrix.preferences_from_user('Luciana Nunes')
assert_array_equal(np.array([[3., 2.5, 3.5, 3.5, 3., 2.5]]), find_common_elements(source_preferences, target_preferences)[0])
assert_array_equal(np.array([[1.5, 3., 3.5, 5., 3., 3.5]]), find_common_elements(source_preferences, target_preferences)[1])
#MatrixModel
source_preferences = model_matrix.preferences_from_user('Marcel Caraciolo')
target_preferences = model_matrix.preferences_from_user('Maria Gabriela')
assert_array_equal(np.array([[]]), find_common_elements(source_preferences, target_preferences)[0])
assert_array_equal(np.array([[]]), find_common_elements(source_preferences, target_preferences)[1])
#MatrixModel
source_preferences = model_matrix.preferences_for_item('Snakes on a Plane')
target_preferences = model_matrix.preferences_for_item('Superman Returns')
assert_array_equal(np.array([[3., 3.5, 3.5, 3.5, 4.5, 4., 4.]]), find_common_elements(source_preferences, target_preferences)[0])
assert_array_equal(np.array([[3.5, 4., 5., 3.5, 4., 5., 3.]]), find_common_elements(source_preferences, target_preferences)[1])
model_matrix.set_preference('Maria Gabriela', 'Back to the Future', 3.5)
source_preferences = model_matrix.preferences_for_item('Back to the Future')
target_preferences = model_matrix.preferences_for_item('Superman Returns')
assert_array_equal(np.array([[]]), find_common_elements(source_preferences, target_preferences)[0])
assert_array_equal(np.array([[]]), find_common_elements(source_preferences, target_preferences)[1])
def test_get__item___UserSimilarity():
#Cosine #With limits
#MatrixModel
model = MatrixPreferenceDataModel(movies)
similarity = UserSimilarity(model, cosine_distances, 3)
assert_array_equal(np.array([[1.]]), similarity['Marcel Caraciolo'][0][1])
assert_equals('Marcel Caraciolo', similarity['Marcel Caraciolo'][0][0])
assert_array_almost_equal(np.array([[0.99127583]]), similarity['Marcel Caraciolo'][1][1])
assert_equals('Sheldom', similarity['Marcel Caraciolo'][1][0])
assert_array_almost_equal(np.array([[0.98658676]]), similarity['Marcel Caraciolo'][2][1])
assert_equals('Lorena Abreu', similarity['Marcel Caraciolo'][2][0])
#Pearson Without limits
similarity = UserSimilarity(model, pearson_correlation)
assert_array_almost_equal(np.array([[1.]]), similarity['Leopoldo Pires'][0][1])
assert_equals('Leopoldo Pires', similarity['Leopoldo Pires'][0][0])
assert_array_almost_equal(np.array([[1.]]), similarity['Leopoldo Pires'][1][1])
assert_equals('Lorena Abreu', similarity['Leopoldo Pires'][1][0])
assert_array_almost_equal(np.array([[0.40451992]]), similarity['Leopoldo Pires'][2][1])
assert_equals('Marcel Caraciolo', similarity['Leopoldo Pires'][2][0])
assert_array_almost_equal(np.array([[0.2045983]]), similarity['Leopoldo Pires'][3][1])
assert_equals('Luciana Nunes', similarity['Leopoldo Pires'][3][0])
assert_array_almost_equal(np.array([[0.13483997]]), similarity['Leopoldo Pires'][4][1])
assert_equals('Sheldom', similarity['Leopoldo Pires'][4][0])
assert_array_almost_equal(np.array([[-0.25819889]]), similarity['Leopoldo Pires'][5][1])
assert_equals('Steve Gates', similarity['Leopoldo Pires'][5][0])
assert_array_almost_equal(np.array([[-1.]]), similarity['Leopoldo Pires'][6][1])
assert_equals('Penny Frewman', similarity['Leopoldo Pires'][6][0])
assert_array_almost_equal(np.array([[np.nan]]), similarity['Leopoldo Pires'][7][1])
assert_equals('Maria Gabriela', similarity['Leopoldo Pires'][7][0])
#Euclidean Without limits
similarity = UserSimilarity(model, euclidean_distances)
assert_array_equal(np.array([[1.]]), similarity['Steve Gates'][0][1])
assert_equals('Steve Gates', similarity['Steve Gates'][0][0])
assert_array_almost_equal(np.array([[0.41421356]]), similarity['Steve Gates'][1][1])
assert_equals('Marcel Caraciolo', similarity['Steve Gates'][1][0])
assert_array_almost_equal(np.array([[0.4]]), similarity['Steve Gates'][2][1])
assert_equals('Penny Frewman', similarity['Steve Gates'][2][0])
assert_array_almost_equal(np.array([[0.38742589]]), similarity['Steve Gates'][3][1])
assert_equals('Leopoldo Pires', similarity['Steve Gates'][3][0])
assert_array_almost_equal(np.array([[0.31451986]]), similarity['Steve Gates'][4][1])
assert_equals('Lorena Abreu', similarity['Steve Gates'][4][0])
assert_array_almost_equal(np.array([[0.28571429]]), similarity['Steve Gates'][5][1])
assert_equals('Sheldom', similarity['Steve Gates'][5][0])
assert_array_almost_equal(np.array([[0.2779263]]), similarity['Steve Gates'][6][1])
assert_equals('Luciana Nunes', similarity['Steve Gates'][6][0])
assert_array_almost_equal(np.array([[np.nan]]), similarity['Steve Gates'][7][1])
assert_equals('Maria Gabriela', similarity['Steve Gates'][7][0])
#Manhattan Without limits
similarity = UserSimilarity(model, manhattan_distances, 0)
assert_equals([], similarity['Steve Gates'])
similarity = UserSimilarity(model, manhattan_distances, 20)
assert_array_equal(np.array([[1.]]), similarity['Steve Gates'][0][1])
assert_equals('Steve Gates', similarity['Steve Gates'][0][0])
assert_array_almost_equal(np.array([[0.5]]), similarity['Steve Gates'][1][1])
assert_equals('Marcel Caraciolo', similarity['Steve Gates'][1][0])
assert_array_almost_equal(np.array([[0.3]]), similarity['Steve Gates'][2][1])
assert_equals('Sheldom', similarity['Steve Gates'][2][0])
assert_array_almost_equal(np.array([[0.25]]), similarity['Steve Gates'][3][1])
assert_equals('Leopoldo Pires', similarity['Steve Gates'][3][0])
assert_array_almost_equal(np.array([[0.25]]), similarity['Steve Gates'][4][1])
assert_equals('Luciana Nunes', similarity['Steve Gates'][4][0])
assert_array_almost_equal(np.array([[0.16666667]]), similarity['Steve Gates'][5][1])
assert_equals('Penny Frewman', similarity['Steve Gates'][5][0])
assert_array_almost_equal(np.array([[0.1]]), similarity['Steve Gates'][6][1])
assert_equals('Lorena Abreu', similarity['Steve Gates'][6][0])
assert_array_almost_equal(np.array([[np.nan]]), similarity['Steve Gates'][7][1])
assert_equals('Maria Gabriela', similarity['Steve Gates'][7][0])
#MatrixBooleanModel
model = MatrixBooleanPrefDataModel(movies)
similarity = UserSimilarity(model, jaccard_coefficient, 3)
assert_array_equal(np.array([[1.]]), similarity['Marcel Caraciolo'][0][1])
assert_equals('Luciana Nunes', similarity['Marcel Caraciolo'][0][0])
assert_array_almost_equal(np.array([[1.]]), similarity['Marcel Caraciolo'][1][1])
assert_equals('Marcel Caraciolo', similarity['Marcel Caraciolo'][1][0])
assert_array_almost_equal(np.array([[1.]]), similarity['Marcel Caraciolo'][2][1])
assert_equals('Steve Gates', similarity['Marcel Caraciolo'][2][0])
#sorensen Without limits
similarity = UserSimilarity(model, sorensen_coefficient)
assert_array_almost_equal(np.array([[1.]]), similarity['Leopoldo Pires'][0][1])
assert_equals('Leopoldo Pires', similarity['Leopoldo Pires'][0][0])
assert_array_almost_equal(np.array([[0.88888889]]), similarity['Leopoldo Pires'][1][1])
assert_equals('Sheldom', similarity['Leopoldo Pires'][1][0])
assert_array_almost_equal(np.array([[0.8]]), similarity['Leopoldo Pires'][2][1])
assert_equals('Luciana Nunes', similarity['Leopoldo Pires'][2][0])
assert_array_almost_equal(np.array([[0.8]]), similarity['Leopoldo Pires'][3][1])
assert_equals('Marcel Caraciolo', similarity['Leopoldo Pires'][3][0])
assert_array_almost_equal(np.array([[0.8]]), similarity['Leopoldo Pires'][4][1])
assert_equals('Steve Gates', similarity['Leopoldo Pires'][4][0])
assert_array_almost_equal(np.array([[0.66666667]]), similarity['Leopoldo Pires'][5][1])
assert_equals('Lorena Abreu', similarity['Leopoldo Pires'][5][0])
assert_array_almost_equal(np.array([[0.57142857]]), similarity['Leopoldo Pires'][6][1])
assert_equals('Penny Frewman', similarity['Leopoldo Pires'][6][0])
assert_array_almost_equal(np.array([[0.]]), similarity['Leopoldo Pires'][7][1])
assert_equals('Maria Gabriela', similarity['Leopoldo Pires'][7][0])
#loglikehood with limits
similarity = UserSimilarity(model, loglikehood_coefficient, 0)
assert_equals([], similarity['Steve Gates'])
similarity = UserSimilarity(model, loglikehood_coefficient, 20)
assert_array_equal(np.array([[1.]]), similarity['Steve Gates'][0][1])
assert_equals('Luciana Nunes', similarity['Steve Gates'][0][0])
assert_array_almost_equal(np.array([[1.]]), similarity['Steve Gates'][1][1])
assert_equals('Marcel Caraciolo', similarity['Steve Gates'][1][0])
assert_array_almost_equal(np.array([[1.]]), similarity['Steve Gates'][2][1])
assert_equals('Steve Gates', similarity['Steve Gates'][2][0])
assert_array_almost_equal(np.array([[0.74804989]]), similarity['Steve Gates'][3][1])
assert_equals('Lorena Abreu', similarity['Steve Gates'][3][0])
assert_array_almost_equal(np.array([[0.74804989]]), similarity['Steve Gates'][4][1])
assert_equals('Sheldom', similarity['Steve Gates'][4][0])
assert_array_almost_equal(np.array([[0.65783229]]), similarity['Steve Gates'][5][1])
assert_equals('Leopoldo Pires', similarity['Steve Gates'][5][0])
assert_array_almost_equal(np.array([[0.55415805]]), similarity['Steve Gates'][6][1])
assert_equals('Penny Frewman', similarity['Steve Gates'][6][0])
assert_array_almost_equal(np.array([[0.0]]), similarity['Steve Gates'][7][1])
assert_equals('Maria Gabriela', similarity['Steve Gates'][7][0])
def test_get_similarities__UserSimilarity():
#MatrixModel
model = MatrixPreferenceDataModel(movies)
similarity = UserSimilarity(model, cosine_distances, 3)
sim = similarity.get_similarities('Marcel Caraciolo')
assert_equals(len(sim), model.users_count())
similarity = UserSimilarity(model, pearson_correlation)
sim = similarity.get_similarities('Leopoldo Pires')
assert_equals(len(sim), model.users_count())
similarity = UserSimilarity(model, euclidean_distances)
sim = similarity.get_similarities('Steve Gates')
assert_equals(len(sim), model.users_count())
similarity = UserSimilarity(model, manhattan_distances, 0)
sim = similarity.get_similarities('Steve Gates')
assert_equals(len(sim), model.users_count())
similarity = UserSimilarity(model, manhattan_distances, 20)
sim = similarity.get_similarities('Steve Gates')
assert_equals(len(sim), model.users_count())
#MatrixBooleanPrefDataModel
model = MatrixBooleanPrefDataModel(movies)
similarity = UserSimilarity(model, sorensen_coefficient, 3)
sim = similarity.get_similarities('Marcel Caraciolo')
assert_equals(len(sim), model.users_count())
similarity = UserSimilarity(model, loglikehood_coefficient)
sim = similarity.get_similarities('Leopoldo Pires')
assert_equals(len(sim), model.users_count())
similarity = UserSimilarity(model, jaccard_coefficient)
sim = similarity.get_similarities('Steve Gates')
assert_equals(len(sim), model.users_count())
similarity = UserSimilarity(model, loglikehood_coefficient, 0)
sim = similarity.get_similarities('Steve Gates')
assert_equals(len(sim), model.users_count())
similarity = UserSimilarity(model, sorensen_coefficient, 20)
sim = similarity.get_similarities('Steve Gates')
assert_equals(len(sim), model.users_count())
def test__iter__UserSimilarity():
#MatrixModel
model = MatrixPreferenceDataModel(movies)
similarity = UserSimilarity(model, cosine_distances, 3)
source_ids = []
prefs = []
for source_id, preferences in similarity:
source_ids.append(source_id)
prefs.append(preferences)
assert_equals(len(source_ids), model.users_count())
for pref in prefs:
assert_equals(len(pref), 3)
similarity = UserSimilarity(model, pearson_correlation)
source_ids = []
prefs = []
for source_id, preferences in similarity:
source_ids.append(source_id)
prefs.append(preferences)
assert_equals(len(source_ids), model.users_count())
for pref in prefs:
assert_equals(len(pref), model.users_count())
similarity = UserSimilarity(model, manhattan_distances, 0)
source_ids = []
prefs = []
for source_id, preferences in similarity:
source_ids.append(source_id)
prefs.append(preferences)
assert_equals(len(source_ids), model.users_count())
for pref in prefs:
assert_equals(len(pref), 0)
similarity = UserSimilarity(model, manhattan_distances, 20)
source_ids = []
prefs = []
for source_id, preferences in similarity:
source_ids.append(source_id)
prefs.append(preferences)
assert_equals(len(source_ids), model.users_count())
for pref in prefs:
assert_equals(len(pref), model.users_count())
#MatrixBooleanPrefDataModel
model = MatrixBooleanPrefDataModel(movies)
similarity = UserSimilarity(model, jaccard_coefficient, 3)
source_ids = []
prefs = []
for source_id, preferences in similarity:
source_ids.append(source_id)
prefs.append(preferences)
assert_equals(len(source_ids), model.users_count())
for pref in prefs:
assert_equals(len(pref), 3)
similarity = UserSimilarity(model, loglikehood_coefficient)
source_ids = []
prefs = []
for source_id, preferences in similarity:
source_ids.append(source_id)
prefs.append(preferences)
assert_equals(len(source_ids), model.users_count())
for pref in prefs:
assert_equals(len(pref), model.users_count())
similarity = UserSimilarity(model, sorensen_coefficient, 0)
source_ids = []
prefs = []
for source_id, preferences in similarity:
source_ids.append(source_id)
prefs.append(preferences)
assert_equals(len(source_ids), model.users_count())
for pref in prefs:
assert_equals(len(pref), 0)
similarity = UserSimilarity(model, loglikehood_coefficient, 20)
source_ids = []
prefs = []
for source_id, preferences in similarity:
source_ids.append(source_id)
prefs.append(preferences)
assert_equals(len(source_ids), model.users_count())
for pref in prefs:
assert_equals(len(pref), model.users_count())
def test_get__item___ItemSimilarity():
#MATRIXMODEL
#Cosine #With limits
model = MatrixPreferenceDataModel(movies)
similarity = ItemSimilarity(model, cosine_distances, 3)
assert_array_equal(np.array([[1.]]), similarity['Snakes on a Plane'][0][1])
assert_equals('Snakes on a Plane', similarity['Snakes on a Plane'][0][0])
assert_array_almost_equal(np.array([[0.99773877]]), similarity['Snakes on a Plane'][1][1])
assert_equals('Lady in the Water', similarity['Snakes on a Plane'][1][0])
assert_array_almost_equal(np.array([[0.9798780]]), similarity['Snakes on a Plane'][2][1])
assert_equals('Superman Returns', similarity['Snakes on a Plane'][2][0])
#Pearson Without limits
similarity = ItemSimilarity(model, pearson_correlation)
assert_array_equal(np.array([[1.]]), similarity['The Night Listener'][0][1])
assert_equals('The Night Listener', similarity['The Night Listener'][0][0])
assert_array_almost_equal(np.array([[0.55555556]]), similarity['The Night Listener'][1][1])
assert_equals('Just My Luck', similarity['The Night Listener'][1][0])
assert_array_almost_equal(np.array([[-0.17984719]]), similarity['The Night Listener'][2][1])
assert_equals('Superman Returns', similarity['The Night Listener'][2][0])
assert_array_almost_equal(np.array([[-0.25]]), similarity['The Night Listener'][3][1])
assert_equals('You, Me and Dupree', similarity['The Night Listener'][3][0])
assert_array_almost_equal(np.array([[-0.56635211]]), similarity['The Night Listener'][4][1])
assert_equals('Snakes on a Plane', similarity['The Night Listener'][4][0])
assert_array_almost_equal(np.array([[-0.61237244]]), similarity['The Night Listener'][5][1])
assert_equals('Lady in the Water', similarity['The Night Listener'][5][0])
assert_array_almost_equal(np.array([[np.nan]]), similarity['The Night Listener'][6][1])
assert_equals('Back to the Future', similarity['The Night Listener'][6][0])
similarity = ItemSimilarity(model, euclidean_distances)
assert_array_equal(np.array([[1.]]), similarity['The Night Listener'][0][1])
assert_equals('The Night Listener', similarity['The Night Listener'][0][0])
assert_array_almost_equal(np.array([[0.38742589]]), similarity['The Night Listener'][1][1])
assert_equals('Lady in the Water', similarity['The Night Listener'][1][0])
assert_array_almost_equal(np.array([[0.32037724]]), similarity['The Night Listener'][2][1])
assert_equals('Snakes on a Plane', similarity['The Night Listener'][2][0])
assert_array_almost_equal(np.array([[0.29893508]]), similarity['The Night Listener'][3][1])
assert_equals('Just My Luck', similarity['The Night Listener'][3][0])
assert_array_almost_equal(np.array([[0.29429806]]), similarity['The Night Listener'][4][1])
assert_equals('You, Me and Dupree', similarity['The Night Listener'][4][0])
assert_array_almost_equal(np.array([[0.25265031]]), similarity['The Night Listener'][5][1])
assert_equals('Superman Returns', similarity['The Night Listener'][5][0])
assert_array_almost_equal(np.array([[np.nan]]), similarity['The Night Listener'][6][1])
assert_equals('Back to the Future', similarity['The Night Listener'][6][0])
similarity = ItemSimilarity(model, manhattan_distances, 0)
assert_equals([], similarity['Lady in the Water'])
similarity = ItemSimilarity(model, manhattan_distances, 20)
assert_array_almost_equal(np.array([[1.]]), similarity['Snakes on a Plane'][0][1])
assert_equals('Snakes on a Plane', similarity['Snakes on a Plane'][0][0])
assert_array_almost_equal(np.array([[0.28571429]]), similarity['Snakes on a Plane'][1][1])
assert_equals('Superman Returns', similarity['Snakes on a Plane'][1][0])
assert_array_almost_equal(np.array([[0.2]]), similarity['Snakes on a Plane'][2][1])
assert_equals('Lady in the Water', similarity['Snakes on a Plane'][2][0])
assert_array_almost_equal(np.array([[0.16666667]]), similarity['Snakes on a Plane'][3][1])
assert_equals('The Night Listener', similarity['Snakes on a Plane'][3][0])
assert_array_almost_equal(np.array([[-0.25]]), similarity['Snakes on a Plane'][4][1])
assert_equals('Just My Luck', similarity['Snakes on a Plane'][4][0])
assert_array_almost_equal(np.array([[-0.33333333]]), similarity['Snakes on a Plane'][5][1])
assert_equals('You, Me and Dupree', similarity['Snakes on a Plane'][5][0])
#MatrixBooleanPrefDataModel
#Jaccard #With limits
model = MatrixBooleanPrefDataModel(movies)
similarity = ItemSimilarity(model, jaccard_coefficient, 3)
assert_array_equal(np.array([[1.]]), similarity['Snakes on a Plane'][0][1])
assert_equals('Snakes on a Plane', similarity['Snakes on a Plane'][0][0])
assert_array_almost_equal(np.array([[1.]]), similarity['Snakes on a Plane'][1][1])
assert_equals('Superman Returns', similarity['Snakes on a Plane'][1][0])
assert_array_almost_equal(np.array([[0.85714286]]), similarity['Snakes on a Plane'][2][1])
assert_equals('The Night Listener', similarity['Snakes on a Plane'][2][0])
#Sorensen Without limits
similarity = ItemSimilarity(model, sorensen_coefficient)
assert_array_equal(np.array([[1.]]), similarity['The Night Listener'][0][1])
assert_equals('The Night Listener', similarity['The Night Listener'][0][0])
assert_array_almost_equal(np.array([[0.92307692]]), similarity['The Night Listener'][1][1])
assert_equals('Snakes on a Plane', similarity['The Night Listener'][1][0])
assert_array_almost_equal(np.array([[0.92307692]]), similarity['The Night Listener'][2][1])
assert_equals('Superman Returns', similarity['The Night Listener'][2][0])
assert_array_almost_equal(np.array([[0.90909091]]), similarity['The Night Listener'][3][1])
assert_equals('Lady in the Water', similarity['The Night Listener'][3][0])
assert_array_almost_equal(np.array([[0.83333333]]), similarity['The Night Listener'][4][1])
assert_equals('You, Me and Dupree', similarity['The Night Listener'][4][0])
assert_array_almost_equal(np.array([[0.8]]), similarity['The Night Listener'][5][1])
assert_equals('Just My Luck', similarity['The Night Listener'][5][0])
assert_array_almost_equal(np.array([[0.]]), similarity['The Night Listener'][6][1])
assert_equals('Back to the Future', similarity['The Night Listener'][6][0])
similarity = ItemSimilarity(model, loglikehood_coefficient)
assert_array_equal(np.array([[1.]]), similarity['The Night Listener'][0][1])
assert_equals('Snakes on a Plane', similarity['The Night Listener'][0][0])
assert_array_almost_equal(np.array([[1.]]), similarity['The Night Listener'][1][1])
assert_equals('Superman Returns', similarity['The Night Listener'][1][0])
assert_array_almost_equal(np.array([[1.]]), similarity['The Night Listener'][2][1])
assert_equals('The Night Listener', similarity['The Night Listener'][2][0])
assert_array_almost_equal(np.array([[0.74804989]]), similarity['The Night Listener'][3][1])
assert_equals('Lady in the Water', similarity['The Night Listener'][3][0])
assert_array_almost_equal(np.array([[0.65783229]]), similarity['The Night Listener'][4][1])
assert_equals('Just My Luck', similarity['The Night Listener'][4][0])
assert_array_almost_equal(np.array([[0.25087682]]), similarity['The Night Listener'][5][1])
assert_equals('You, Me and Dupree', similarity['The Night Listener'][5][0])
assert_array_almost_equal(np.array([[0.]]), similarity['The Night Listener'][6][1])
assert_equals('Back to the Future', similarity['The Night Listener'][6][0])
similarity = ItemSimilarity(model, jaccard_coefficient, 0)
assert_equals([], similarity['Lady in the Water'])
similarity = ItemSimilarity(model, sorensen_coefficient, 20)
assert_array_almost_equal(np.array([[1.]]), similarity['Snakes on a Plane'][0][1])
assert_equals('Snakes on a Plane', similarity['Snakes on a Plane'][0][0])
assert_array_almost_equal(np.array([[1.]]), similarity['Snakes on a Plane'][1][1])
assert_equals('Superman Returns', similarity['Snakes on a Plane'][1][0])
assert_array_almost_equal(np.array([[0.92307692]]), similarity['Snakes on a Plane'][2][1])
assert_equals('The Night Listener', similarity['Snakes on a Plane'][2][0])
assert_array_almost_equal(np.array([[0.92307692]]), similarity['Snakes on a Plane'][3][1])
assert_equals('You, Me and Dupree', similarity['Snakes on a Plane'][3][0])
assert_array_almost_equal(np.array([[0.8333333333]]), similarity['Snakes on a Plane'][4][1])
assert_equals('Lady in the Water', similarity['Snakes on a Plane'][4][0])
assert_array_almost_equal(np.array([[0.72727272]]), similarity['Snakes on a Plane'][5][1])
assert_equals('Just My Luck', similarity['Snakes on a Plane'][5][0])
assert_array_almost_equal(np.array([[0.]]), similarity['Snakes on a Plane'][6][1])
assert_equals('Back to the Future', similarity['Snakes on a Plane'][6][0])
def test_get_similarities__ItemSimilarity():
#MatrixModel
model = MatrixPreferenceDataModel(movies)
similarity = ItemSimilarity(model, cosine_distances, 3)
sim = similarity.get_similarities('Snakes on a Plane')
assert_equals(len(sim), model.items_count())
#Pearson Without limits
similarity = ItemSimilarity(model, pearson_correlation)
sim = similarity.get_similarities('Lady in the Water')
assert_equals(len(sim), model.items_count())
similarity = ItemSimilarity(model, euclidean_distances)
sim = similarity.get_similarities('Lady in the Water')
assert_equals(len(sim), model.items_count())
similarity = ItemSimilarity(model, manhattan_distances, 0)
sim = similarity.get_similarities('Lady in the Water')
assert_equals(len(sim), model.items_count())
similarity = ItemSimilarity(model, manhattan_distances, 20)
sim = similarity.get_similarities('Lady in the Water')
assert_equals(len(sim), model.items_count())
#MatrixBooleanPrefDataModel
model = MatrixBooleanPrefDataModel(movies)
similarity = ItemSimilarity(model, jaccard_coefficient, 3)
sim = similarity.get_similarities('Snakes on a Plane')
assert_equals(len(sim), model.items_count())
#Sorensen Without limits
similarity = ItemSimilarity(model, sorensen_coefficient)
sim = similarity.get_similarities('Lady in the Water')
assert_equals(len(sim), model.items_count())
similarity = ItemSimilarity(model, loglikehood_coefficient)
sim = similarity.get_similarities('Lady in the Water')
assert_equals(len(sim), model.items_count())
similarity = ItemSimilarity(model, loglikehood_coefficient, 0)
sim = similarity.get_similarities('Lady in the Water')
assert_equals(len(sim), model.items_count())
similarity = ItemSimilarity(model, sorensen_coefficient, 20)
sim = similarity.get_similarities('Lady in the Water')
assert_equals(len(sim), model.items_count())
def test__iter__ItemSimilarity():
#MATRIXMODEL
model = MatrixPreferenceDataModel(movies)
similarity = ItemSimilarity(model, cosine_distances, 3)
item_ids = []
prefs = []
for item_id, preferences in similarity:
item_ids.append(item_id)
prefs.append(preferences)
assert_equals(len(item_ids), model.items_count())
for pref in prefs:
assert_equals(len(pref), 3)
similarity = ItemSimilarity(model, pearson_correlation)
item_ids = []
prefs = []
for item_id, preferences in similarity:
item_ids.append(item_id)
prefs.append(preferences)
assert_equals(len(item_ids), model.items_count())
for pref in prefs:
assert_equals(len(pref), model.items_count())
similarity = ItemSimilarity(model, manhattan_distances, 0)
item_ids = []
prefs = []
for item_id, preferences in similarity:
item_ids.append(item_id)
prefs.append(preferences)
assert_equals(len(item_ids), model.items_count())
for pref in prefs:
assert_equals(len(pref), 0)
similarity = ItemSimilarity(model, manhattan_distances, 20)
item_ids = []
prefs = []
for item_id, preferences in similarity:
item_ids.append(item_id)
prefs.append(preferences)
assert_equals(len(item_ids), model.items_count())
for pref in prefs:
assert_equals(len(pref), model.items_count())
#MatrixBooleanPrefDataModel
model = MatrixBooleanPrefDataModel(movies)
similarity = ItemSimilarity(model, sorensen_coefficient, 3)
item_ids = []
prefs = []
for item_id, preferences in similarity:
item_ids.append(item_id)
prefs.append(preferences)
assert_equals(len(item_ids), model.items_count())
for pref in prefs:
assert_equals(len(pref), 3)
similarity = ItemSimilarity(model, jaccard_coefficient)
item_ids = []
prefs = []
for item_id, preferences in similarity:
item_ids.append(item_id)
prefs.append(preferences)
assert_equals(len(item_ids), model.items_count())
for pref in prefs:
assert_equals(len(pref), model.items_count())
similarity = ItemSimilarity(model, loglikehood_coefficient, 0)
item_ids = []
prefs = []
for item_id, preferences in similarity:
item_ids.append(item_id)
prefs.append(preferences)
assert_equals(len(item_ids), model.items_count())
for pref in prefs:
assert_equals(len(pref), 0)
similarity = ItemSimilarity(model, sorensen_coefficient, 20)
item_ids = []
prefs = []
for item_id, preferences in similarity:
item_ids.append(item_id)
prefs.append(preferences)
assert_equals(len(item_ids), model.items_count())
for pref in prefs:
assert_equals(len(pref), model.items_count())
|
kenshay/ImageScript | refs/heads/master | ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/raw/GL/ARB/texture_query_lod.py | 9 | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_texture_query_lod'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_texture_query_lod',error_checker=_errors._error_checker)
|
pranavtendolkr/horizon | refs/heads/master | openstack_dashboard/dashboards/admin/hypervisors/tabs.py | 59 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.admin.hypervisors.compute \
import tabs as cmp_tabs
from openstack_dashboard.dashboards.admin.hypervisors import tables
class HypervisorTab(tabs.TableTab):
table_classes = (tables.AdminHypervisorsTable,)
name = _("Hypervisor")
slug = "hypervisor"
template_name = "horizon/common/_detail_table.html"
def get_hypervisors_data(self):
hypervisors = []
try:
hypervisors = nova.hypervisor_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve hypervisor information.'))
return hypervisors
class HypervisorHostTabs(tabs.TabGroup):
slug = "hypervisor_info"
tabs = (HypervisorTab, cmp_tabs.ComputeHostTab)
sticky = True
|
leonardowolf/bookfree | refs/heads/master | flask/lib/python3.5/site-packages/sqlalchemy/event/registry.py | 55 | # event/registry.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides managed registration services on behalf of :func:`.listen`
arguments.
By "managed registration", we mean that event listening functions and
other objects can be added to various collections in such a way that their
membership in all those collections can be revoked at once, based on
an equivalent :class:`._EventKey`.
"""
from __future__ import absolute_import
import weakref
import collections
import types
from .. import exc, util
_key_to_collection = collections.defaultdict(dict)
"""
Given an original listen() argument, can locate all
listener collections and the listener fn contained
(target, identifier, fn) -> {
ref(listenercollection) -> ref(listener_fn)
ref(listenercollection) -> ref(listener_fn)
ref(listenercollection) -> ref(listener_fn)
}
"""
_collection_to_key = collections.defaultdict(dict)
"""
Given a _ListenerCollection or _ClsLevelListener, can locate
all the original listen() arguments and the listener fn contained
ref(listenercollection) -> {
ref(listener_fn) -> (target, identifier, fn),
ref(listener_fn) -> (target, identifier, fn),
ref(listener_fn) -> (target, identifier, fn),
}
"""
def _collection_gced(ref):
# defaultdict, so can't get a KeyError
if not _collection_to_key or ref not in _collection_to_key:
return
listener_to_key = _collection_to_key.pop(ref)
for key in listener_to_key.values():
if key in _key_to_collection:
# defaultdict, so can't get a KeyError
dispatch_reg = _key_to_collection[key]
dispatch_reg.pop(ref)
if not dispatch_reg:
_key_to_collection.pop(key)
def _stored_in_collection(event_key, owner):
key = event_key._key
dispatch_reg = _key_to_collection[key]
owner_ref = owner.ref
listen_ref = weakref.ref(event_key._listen_fn)
if owner_ref in dispatch_reg:
return False
dispatch_reg[owner_ref] = listen_ref
listener_to_key = _collection_to_key[owner_ref]
listener_to_key[listen_ref] = key
return True
def _removed_from_collection(event_key, owner):
key = event_key._key
dispatch_reg = _key_to_collection[key]
listen_ref = weakref.ref(event_key._listen_fn)
owner_ref = owner.ref
dispatch_reg.pop(owner_ref, None)
if not dispatch_reg:
del _key_to_collection[key]
if owner_ref in _collection_to_key:
listener_to_key = _collection_to_key[owner_ref]
listener_to_key.pop(listen_ref)
def _stored_in_collection_multi(newowner, oldowner, elements):
if not elements:
return
oldowner = oldowner.ref
newowner = newowner.ref
old_listener_to_key = _collection_to_key[oldowner]
new_listener_to_key = _collection_to_key[newowner]
for listen_fn in elements:
listen_ref = weakref.ref(listen_fn)
key = old_listener_to_key[listen_ref]
dispatch_reg = _key_to_collection[key]
if newowner in dispatch_reg:
assert dispatch_reg[newowner] == listen_ref
else:
dispatch_reg[newowner] = listen_ref
new_listener_to_key[listen_ref] = key
def _clear(owner, elements):
if not elements:
return
owner = owner.ref
listener_to_key = _collection_to_key[owner]
for listen_fn in elements:
listen_ref = weakref.ref(listen_fn)
key = listener_to_key[listen_ref]
dispatch_reg = _key_to_collection[key]
dispatch_reg.pop(owner, None)
if not dispatch_reg:
del _key_to_collection[key]
class _EventKey(object):
"""Represent :func:`.listen` arguments.
"""
__slots__ = (
'target', 'identifier', 'fn', 'fn_key', 'fn_wrap', 'dispatch_target'
)
def __init__(self, target, identifier,
fn, dispatch_target, _fn_wrap=None):
self.target = target
self.identifier = identifier
self.fn = fn
if isinstance(fn, types.MethodType):
self.fn_key = id(fn.__func__), id(fn.__self__)
else:
self.fn_key = id(fn)
self.fn_wrap = _fn_wrap
self.dispatch_target = dispatch_target
@property
def _key(self):
return (id(self.target), self.identifier, self.fn_key)
def with_wrapper(self, fn_wrap):
if fn_wrap is self._listen_fn:
return self
else:
return _EventKey(
self.target,
self.identifier,
self.fn,
self.dispatch_target,
_fn_wrap=fn_wrap
)
def with_dispatch_target(self, dispatch_target):
if dispatch_target is self.dispatch_target:
return self
else:
return _EventKey(
self.target,
self.identifier,
self.fn,
dispatch_target,
_fn_wrap=self.fn_wrap
)
def listen(self, *args, **kw):
once = kw.pop("once", False)
named = kw.pop("named", False)
target, identifier, fn = \
self.dispatch_target, self.identifier, self._listen_fn
dispatch_collection = getattr(target.dispatch, identifier)
adjusted_fn = dispatch_collection._adjust_fn_spec(fn, named)
self = self.with_wrapper(adjusted_fn)
if once:
self.with_wrapper(
util.only_once(self._listen_fn)).listen(*args, **kw)
else:
self.dispatch_target.dispatch._listen(self, *args, **kw)
def remove(self):
key = self._key
if key not in _key_to_collection:
raise exc.InvalidRequestError(
"No listeners found for event %s / %r / %s " %
(self.target, self.identifier, self.fn)
)
dispatch_reg = _key_to_collection.pop(key)
for collection_ref, listener_ref in dispatch_reg.items():
collection = collection_ref()
listener_fn = listener_ref()
if collection is not None and listener_fn is not None:
collection.remove(self.with_wrapper(listener_fn))
def contains(self):
"""Return True if this event key is registered to listen.
"""
return self._key in _key_to_collection
def base_listen(self, propagate=False, insert=False,
named=False):
target, identifier, fn = \
self.dispatch_target, self.identifier, self._listen_fn
dispatch_collection = getattr(target.dispatch, identifier)
if insert:
dispatch_collection.\
for_modify(target.dispatch).insert(self, propagate)
else:
dispatch_collection.\
for_modify(target.dispatch).append(self, propagate)
@property
def _listen_fn(self):
return self.fn_wrap or self.fn
def append_to_list(self, owner, list_):
if _stored_in_collection(self, owner):
list_.append(self._listen_fn)
return True
else:
return False
def remove_from_list(self, owner, list_):
_removed_from_collection(self, owner)
list_.remove(self._listen_fn)
def prepend_to_list(self, owner, list_):
if _stored_in_collection(self, owner):
list_.appendleft(self._listen_fn)
return True
else:
return False
|
macobo/python-grader | refs/heads/master | grader/code_runner.py | 1 | import os
import subprocess
import datetime
import signal
import time
import sys
CURRENT_FOLDER = os.path.abspath(os.path.dirname(__file__))
SANDBOX_DIR = os.path.join(os.path.dirname(CURRENT_FOLDER), "sandbox")
TEST_RUN_CMD = [sys.executable, os.path.join(SANDBOX_DIR, "run_test")]
DOCKER_SANDBOX = [sys.executable, os.path.join(SANDBOX_DIR, 'run_tests_docker_sandbox')]
def read_proc_results(proc, decode):
stdout = proc.stdout.read()
stderr = proc.stderr.read()
if decode:
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
status = proc.returncode
return status, stdout, stderr
def microseconds_passed(time_delta):
return time_delta.microseconds + time_delta.seconds * 10**6
def call_command(cmd, timeout=float('inf'), cwd=None, decode=True, **subproc_options):
if cwd is None:
cwd = os.getcwd()
start = datetime.datetime.now()
subproc = subprocess.Popen(
cmd,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**subproc_options
)
reached_timeout = False
while subproc.poll() is None:
time.sleep(0.02)
now = datetime.datetime.now()
if microseconds_passed(now-start) >= timeout * 10**6:
subproc.kill()
os.kill(subproc.pid, signal.SIGKILL)
os.waitpid(-1, os.WNOHANG)
reached_timeout = True
#break
status, stdout, stderr = read_proc_results(subproc, decode)
if reached_timeout:
status = 1
return status, stdout, stderr
def call_test(test_index, tester_path, solution_path, options):
# this assumes that tester, solution resides in the same path
#working_dir = os.getcwd()#os.path.dirname(tester_path)
cmd = TEST_RUN_CMD + [
tester_path,
solution_path,
str(test_index)
]
timeout = options.get('timeout', 1.0)
status, stdout, stderr = call_command(cmd, timeout)
return status == 0, stdout, stderr
def call_sandbox(sandbox_cmd, tester_path, solution_path):
cmd = sandbox_cmd + [tester_path, solution_path]
return call_command(cmd)
|
necozay/tulip-control | refs/heads/master | examples/developer/partialStateExample.py | 1 | #!/usr/bin/env python
#
# WARNING: This example may not yet be working. Please check again in
# the upcoming release.
#
"""
This example is an extension of robot_discrete.py by including continuous
dynamics with disturbances.
Petter Nilsson ([email protected])
August 14, 2011
NO, system and cont. prop definitions based on TuLiP 1.x
2 Jul, 2013
NO, TuLiP 1.x discretization
17 Jul, 2013
OM, Testing with shrunk Polytopes
4 Oct, 2014
"""
#
# Note: This code is commented to allow components to be extracted into
# the tutorial that is part of the users manual. Comments containing
# strings of the form @label@ are used for this purpose.
import logging
logging.basicConfig(level=logging.INFO)
# @import_section@
import numpy as np
from tulip import spec, synth, hybrid
from polytope import box2poly
from tulip.abstract import prop2part, discretize
from tulip.abstract.plot import plot_partition
from tulip.abstract.prop2partition import shrinkPartition, shrinkPoly
from tulip.hybrid import generateFilter
from cvxopt import matrix
# @import_section_end@
show = False
# @dynamics_section@
# Problem parameters
input_bound = 6.0
uncertainty = 0.001
epsilon = 0.02
filter_bound = 1 - uncertainty/epsilon
# Continuous state space
cont_state_space = box2poly([[0., 3.], [0., 2.]])
# Continuous dynamics
A = np.array([[0.95, 0.2], [ 0., 0.95]]) #need (A,C) observable
B = np.array([[0.2, 0.], [ 0., 0.2]])
C = np.array([[1.0, 1.0]])
E = np.array([[1.0,0.], [0.,1.0]])
# Available control, possible disturbances
U = input_bound *np.array([[-1., 1.], [-1., 1.]])
W = uncertainty *np.array([[-1., 1.], [-1., 1.]])
# Convert to polyhedral representation
U = box2poly(U)
W = box2poly(W)
# Construct the LTI system describing the dynamics
sys_dyn = hybrid.LtiOutSysDyn(A=A,B=B,C=C, E=E, K=None, Uset=U, Wset=W, domain=cont_state_space)
L = generateFilter(A, C, filter_bound, use_mosek=False)
sys_dyn_hat = sys_dyn.generateObservedDynamics(L,epsilon)
# @dynamics_section_end@
# @partition_section@
# Define atomic propositions for relevant regions of state space
cont_props = {}
cont_props['home'] = shrinkPoly(box2poly([[0., 1.], [0., 1.]]),epsilon)
cont_props['lot'] = shrinkPoly(box2poly([[2., 3.], [1., 2.]]),epsilon)
# Compute the proposition preserving partition of the continuous state space
cont_partition = prop2part(cont_state_space, cont_props)
plot_partition(cont_partition) if show else None
cont_partition = shrinkPartition(cont_partition, epsilon)
plot_partition(cont_partition) if show else None
# @partition_section_end@
# @discretize_section@
# Given dynamics & proposition-preserving partition, find feasible transitions
disc_dynamics = discretize(
cont_partition, sys_dyn_hat, closed_loop=True, conservative=True,
N=1, min_cell_volume=0.01, plotit=show, trans_length=3
)
# @discretize_section_end@
"""Visualize transitions in continuous domain (optional)"""
plot_partition(disc_dynamics.ppp, disc_dynamics.ts,
disc_dynamics.ppp2ts) if show else None
"""Specifications"""
# Environment variables and assumptions
env_vars = {'park'}
env_init = set() # empty set
env_prog = '!park'
env_safe = set() # empty set
# System variables and requirements
sys_vars = {'X0reach'}
sys_init = {'X0reach'}
sys_prog = {'home'} # []<>home
sys_safe = {'(X(X0reach) <-> lot) || (X0reach && !park)'}
sys_prog |= {'X0reach'}
# Create the specification
specs = spec.GRSpec(env_vars, sys_vars, env_init, sys_init,
env_safe, sys_safe, env_prog, sys_prog)
# @synthesize_section@
"""Synthesize"""
ctrl = synth.synthesize('gr1c', specs,
sys=disc_dynamics.ts, ignore_sys_init=True)
# Generate a graphical representation of the controller for viewing
#if not ctrl.save('continuous.png'):
# print(ctrl)
# @synthesize_section_end@
# Simulation
|
radarsat1/mapperRec | refs/heads/master | mapperRec.py | 1 | #!/usr/bin/env python
import ctypes, time
import _ctypes
import os
def func_address(adll, name):
if 'dlsym' in dir(_ctypes):
return _ctypes.dlsym(adll._handle, name)
else:
return _ctypes.GetProcAddress(adll._handle, name)
if os.path.exists("./libmapperrec.dylib"):
rec = ctypes.cdll.LoadLibrary("./libmapperrec.dylib")
elif os.path.exists("./libmapperrec.so"):
rec = ctypes.cdll.LoadLibrary("./libmapperrec.so")
rec.oscstreamdb_defaults()
backend_strings = (ctypes.c_char_p*3).in_dll(rec, "backend_strings")
backend = ctypes.c_int.in_dll(rec, "backend")
BACKEND_FILE = 0
BACKEND_BINARY = 1
BACKEND_OSCSTREAMDB = 2
class backend_text_options_t(ctypes.Structure):
_fields_ = [("file_path", ctypes.c_char_p)]
class backend_binary_options_t(ctypes.Structure):
_fields_ = [("file_path", ctypes.c_char_p)]
backend_text_options = backend_text_options_t.in_dll(rec, "backend_text_options")
backend_binary_options = backend_binary_options_t.in_dll(rec, "backend_binary_options")
def set_output_filename(fn):
backend_text_options.file_path = fn
backend_binary_options.file_path = fn
backend_patterns = {
BACKEND_FILE: BACKEND_FILE,
BACKEND_BINARY: BACKEND_BINARY,
BACKEND_OSCSTREAMDB: BACKEND_OSCSTREAMDB,
'file': BACKEND_FILE,
'text': BACKEND_FILE,
'binary': BACKEND_BINARY,
'oscstreamdb': BACKEND_OSCSTREAMDB,
}
def set_device_name(name):
rec.recmonitor_add_device_string(name)
def set_path_name(name):
rec.recmonitor_add_signal_string(name)
def set_backend(b):
backend.value = backend_patterns[b]
def start():
if ctypes.c_int.in_dll(rec, "n_device_strings").value < 1:
raise Exception("Must set device or path name.")
backend_start = ctypes.c_void_p.in_dll(rec, "backend_start")
backend_poll = ctypes.c_void_p.in_dll(rec, "backend_poll")
backend_stop = ctypes.c_void_p.in_dll(rec, "backend_stop")
backend_write_value = ctypes.c_void_p.in_dll(rec, "backend_write_value")
if backend.value == BACKEND_FILE:
backend_start.value = func_address(rec, "text_start")
backend_stop.value = func_address(rec, "text_stop")
backend_poll.value = func_address(rec, "text_poll")
backend_write_value.value = func_address(rec, "text_write_value")
elif backend.value == BACKEND_BINARY:
backend_start.value = func_address(rec, "binary_start")
backend_stop.value = func_address(rec, "binary_stop")
backend_poll.value = func_address(rec, "binary_poll")
backend_write_value.value = func_address(rec, "binary_write_value")
elif backend.value == BACKEND_OSCSTREAMDB:
# TODO OSCStreamDB options
raise Exception("Still need to implement OSCStreamDB options.")
backend_start.value = func_address(rec, "oscstreamdb_start")
backend_stop.value = func_address(rec, "oscstreamdb_stop")
backend_poll.value = func_address(rec, "oscstreamdb_poll")
backend_write_value.value = func_address(rec, "oscstreamdb_write_value")
backend_start = ctypes.CFUNCTYPE(None).in_dll(rec, "backend_start")
backend_poll = ctypes.CFUNCTYPE(None).in_dll(rec, "backend_poll")
backend_stop = ctypes.CFUNCTYPE(None).in_dll(rec, "backend_stop")
# Instruct library to send us names via a memory FIFO
ctypes.c_int.in_dll(rec, "send_device_names").value = 1
ctypes.c_int.in_dll(rec, "send_signal_names").value = 1
if backend_start():
raise Exception("Error starting backend.")
try:
if rec.recmonitor_start():
raise Exception("Error starting monitor.")
try:
if rec.recdevice_start():
raise Exception("Error starting device.")
except:
rec.recmonitor_stop()
raise
except:
backend_stop()
raise
def stop():
rec.recdevice_stop()
rec.recmonitor_stop()
if ctypes.c_int.in_dll(rec, "backend_stop")!=0:
backend_stop = ctypes.CFUNCTYPE(None).in_dll(rec, "backend_stop")
backend_stop()
def poll():
backend_poll = ctypes.CFUNCTYPE(None).in_dll(rec, "backend_poll")
if backend_poll() or rec.command_poll():
return True
rec.recmonitor_poll()
rec.recdevice_poll()
_get_device_name = rec.get_device_name
_get_device_name.argtypes = None
_get_device_name.restype = ctypes.c_char_p
_get_signal_name = rec.get_signal_name
_get_signal_name.argtypes = None
_get_signal_name.restype = ctypes.c_char_p
def get_device_name():
s = _get_device_name()
if s!=None and s!='':
return ord(s[0]), s[1:]
return None, None
def get_signal_name():
s = _get_signal_name()
if s!=None and s!='':
return ord(s[0]), s[1:]
return None, None
if __name__=="__main__":
set_backend(BACKEND_FILE)
set_device_name("testsend")
set_output_filename("test.txt")
start()
|
adoosii/edx-platform | refs/heads/master | common/djangoapps/enrollment/tests/fake_data_api.py | 104 | """
A Fake Data API for testing purposes.
"""
import copy
import datetime
_DEFAULT_FAKE_MODE = {
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": None,
"description": None
}
_ENROLLMENTS = []
_COURSES = []
_ENROLLMENT_ATTRIBUTES = []
# pylint: disable=unused-argument
def get_course_enrollments(student_id):
"""Stubbed out Enrollment data request."""
return _ENROLLMENTS
def get_course_enrollment(student_id, course_id):
"""Stubbed out Enrollment data request."""
return _get_fake_enrollment(student_id, course_id)
def create_course_enrollment(student_id, course_id, mode='honor', is_active=True):
"""Stubbed out Enrollment creation request. """
return add_enrollment(student_id, course_id, mode=mode, is_active=is_active)
def update_course_enrollment(student_id, course_id, mode=None, is_active=None):
"""Stubbed out Enrollment data request."""
enrollment = _get_fake_enrollment(student_id, course_id)
if enrollment and mode is not None:
enrollment['mode'] = mode
if enrollment and is_active is not None:
enrollment['is_active'] = is_active
return enrollment
def get_course_enrollment_info(course_id, include_expired=False):
"""Stubbed out Enrollment data request."""
return _get_fake_course_info(course_id)
def _get_fake_enrollment(student_id, course_id):
"""Get an enrollment from the enrollments array."""
for enrollment in _ENROLLMENTS:
if student_id == enrollment['student'] and course_id == enrollment['course']['course_id']:
return enrollment
def _get_fake_course_info(course_id):
"""Get a course from the courses array."""
for course in _COURSES:
if course_id == course['course_id']:
return course
def add_enrollment(student_id, course_id, is_active=True, mode='honor'):
"""Append an enrollment to the enrollments array."""
enrollment = {
"created": datetime.datetime.now(),
"mode": mode,
"is_active": is_active,
"course": _get_fake_course_info(course_id),
"student": student_id
}
_ENROLLMENTS.append(enrollment)
return enrollment
# pylint: disable=unused-argument
def add_or_update_enrollment_attr(user_id, course_id, attributes):
"""Add or update enrollment attribute array"""
for attribute in attributes:
_ENROLLMENT_ATTRIBUTES.append({
'namespace': attribute['namespace'],
'name': attribute['name'],
'value': attribute['value']
})
# pylint: disable=unused-argument
def get_enrollment_attributes(user_id, course_id):
"""Retrieve enrollment attribute array"""
return _ENROLLMENT_ATTRIBUTES
def add_course(course_id, enrollment_start=None, enrollment_end=None, invite_only=False, course_modes=None):
"""Append course to the courses array."""
course_info = {
"course_id": course_id,
"enrollment_end": enrollment_end,
"course_modes": [],
"enrollment_start": enrollment_start,
"invite_only": invite_only,
}
if not course_modes:
course_info['course_modes'].append(_DEFAULT_FAKE_MODE)
else:
for mode in course_modes:
new_mode = copy.deepcopy(_DEFAULT_FAKE_MODE)
new_mode['slug'] = mode
course_info['course_modes'].append(new_mode)
_COURSES.append(course_info)
def reset():
"""Set the enrollments and courses arrays to be empty."""
global _COURSES # pylint: disable=global-statement
_COURSES = []
global _ENROLLMENTS # pylint: disable=global-statement
_ENROLLMENTS = []
|
vadimtk/chrome4sdp | refs/heads/master | third_party/protobuf/descriptor_pb2.py | 193 | # Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='google/protobuf/descriptor.proto',
package='google.protobuf',
serialized_pb='\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xdc\x02\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\"\xa9\x03\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x1a,\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"\x94\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"\x8c\x01\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\x7f\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\"\xa4\x03\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12!\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x04true\x12#\n\x15java_generic_services\x18\x11 \x01(\x08:\x04true\x12!\n\x13py_generic_services\x18\x12 \x01(\x08:\x04true\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xb8\x01\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x94\x02\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\x14\x65xperimental_map_key\x18\t \x01(\t\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"]\n\x0b\x45numOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"b\n\x10\x45numValueOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"`\n\x0eServiceOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"_\n\rMethodOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x85\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\x42)\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01')
_FIELDDESCRIPTORPROTO_TYPE = descriptor.EnumDescriptor(
name='Type',
full_name='google.protobuf.FieldDescriptorProto.Type',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TYPE_DOUBLE', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_FLOAT', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_INT64', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_UINT64', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_INT32', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_FIXED64', index=5, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_FIXED32', index=6, number=7,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_BOOL', index=7, number=8,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_STRING', index=8, number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_GROUP', index=9, number=10,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_MESSAGE', index=10, number=11,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_BYTES', index=11, number=12,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_UINT32', index=12, number=13,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_ENUM', index=13, number=14,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_SFIXED32', index=14, number=15,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_SFIXED64', index=15, number=16,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_SINT32', index=16, number=17,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_SINT64', index=17, number=18,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1187,
serialized_end=1497,
)
_FIELDDESCRIPTORPROTO_LABEL = descriptor.EnumDescriptor(
name='Label',
full_name='google.protobuf.FieldDescriptorProto.Label',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='LABEL_OPTIONAL', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LABEL_REQUIRED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LABEL_REPEATED', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1499,
serialized_end=1566,
)
_FILEOPTIONS_OPTIMIZEMODE = descriptor.EnumDescriptor(
name='OptimizeMode',
full_name='google.protobuf.FileOptions.OptimizeMode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='SPEED', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CODE_SIZE', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LITE_RUNTIME', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2449,
serialized_end=2507,
)
_FIELDOPTIONS_CTYPE = descriptor.EnumDescriptor(
name='CType',
full_name='google.protobuf.FieldOptions.CType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='STRING', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CORD', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='STRING_PIECE', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2926,
serialized_end=2973,
)
_FILEDESCRIPTORSET = descriptor.Descriptor(
name='FileDescriptorSet',
full_name='google.protobuf.FileDescriptorSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='file', full_name='google.protobuf.FileDescriptorSet.file', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=53,
serialized_end=124,
)
_FILEDESCRIPTORPROTO = descriptor.Descriptor(
name='FileDescriptorProto',
full_name='google.protobuf.FileDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.FileDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='package', full_name='google.protobuf.FileDescriptorProto.package', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='dependency', full_name='google.protobuf.FileDescriptorProto.dependency', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='message_type', full_name='google.protobuf.FileDescriptorProto.message_type', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='enum_type', full_name='google.protobuf.FileDescriptorProto.enum_type', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='service', full_name='google.protobuf.FileDescriptorProto.service', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='extension', full_name='google.protobuf.FileDescriptorProto.extension', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.FileDescriptorProto.options', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=127,
serialized_end=475,
)
_DESCRIPTORPROTO_EXTENSIONRANGE = descriptor.Descriptor(
name='ExtensionRange',
full_name='google.protobuf.DescriptorProto.ExtensionRange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='start', full_name='google.protobuf.DescriptorProto.ExtensionRange.start', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='end', full_name='google.protobuf.DescriptorProto.ExtensionRange.end', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=859,
serialized_end=903,
)
_DESCRIPTORPROTO = descriptor.Descriptor(
name='DescriptorProto',
full_name='google.protobuf.DescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.DescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='field', full_name='google.protobuf.DescriptorProto.field', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='extension', full_name='google.protobuf.DescriptorProto.extension', index=2,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='nested_type', full_name='google.protobuf.DescriptorProto.nested_type', index=3,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='enum_type', full_name='google.protobuf.DescriptorProto.enum_type', index=4,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='extension_range', full_name='google.protobuf.DescriptorProto.extension_range', index=5,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.DescriptorProto.options', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_DESCRIPTORPROTO_EXTENSIONRANGE, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=478,
serialized_end=903,
)
_FIELDDESCRIPTORPROTO = descriptor.Descriptor(
name='FieldDescriptorProto',
full_name='google.protobuf.FieldDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.FieldDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.FieldDescriptorProto.number', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='label', full_name='google.protobuf.FieldDescriptorProto.label', index=2,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='type', full_name='google.protobuf.FieldDescriptorProto.type', index=3,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='type_name', full_name='google.protobuf.FieldDescriptorProto.type_name', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='extendee', full_name='google.protobuf.FieldDescriptorProto.extendee', index=5,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='default_value', full_name='google.protobuf.FieldDescriptorProto.default_value', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.FieldDescriptorProto.options', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FIELDDESCRIPTORPROTO_TYPE,
_FIELDDESCRIPTORPROTO_LABEL,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=906,
serialized_end=1566,
)
_ENUMDESCRIPTORPROTO = descriptor.Descriptor(
name='EnumDescriptorProto',
full_name='google.protobuf.EnumDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.EnumDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.EnumDescriptorProto.value', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.EnumDescriptorProto.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1569,
serialized_end=1709,
)
_ENUMVALUEDESCRIPTORPROTO = descriptor.Descriptor(
name='EnumValueDescriptorProto',
full_name='google.protobuf.EnumValueDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.EnumValueDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.EnumValueDescriptorProto.number', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.EnumValueDescriptorProto.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1711,
serialized_end=1819,
)
_SERVICEDESCRIPTORPROTO = descriptor.Descriptor(
name='ServiceDescriptorProto',
full_name='google.protobuf.ServiceDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.ServiceDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='method', full_name='google.protobuf.ServiceDescriptorProto.method', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.ServiceDescriptorProto.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1822,
serialized_end=1966,
)
_METHODDESCRIPTORPROTO = descriptor.Descriptor(
name='MethodDescriptorProto',
full_name='google.protobuf.MethodDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.MethodDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='input_type', full_name='google.protobuf.MethodDescriptorProto.input_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='output_type', full_name='google.protobuf.MethodDescriptorProto.output_type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.MethodDescriptorProto.options', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1968,
serialized_end=2095,
)
_FILEOPTIONS = descriptor.Descriptor(
name='FileOptions',
full_name='google.protobuf.FileOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='java_package', full_name='google.protobuf.FileOptions.java_package', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='java_outer_classname', full_name='google.protobuf.FileOptions.java_outer_classname', index=1,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='java_multiple_files', full_name='google.protobuf.FileOptions.java_multiple_files', index=2,
number=10, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='optimize_for', full_name='google.protobuf.FileOptions.optimize_for', index=3,
number=9, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='cc_generic_services', full_name='google.protobuf.FileOptions.cc_generic_services', index=4,
number=16, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='java_generic_services', full_name='google.protobuf.FileOptions.java_generic_services', index=5,
number=17, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='py_generic_services', full_name='google.protobuf.FileOptions.py_generic_services', index=6,
number=18, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.FileOptions.uninterpreted_option', index=7,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FILEOPTIONS_OPTIMIZEMODE,
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=2098,
serialized_end=2518,
)
_MESSAGEOPTIONS = descriptor.Descriptor(
name='MessageOptions',
full_name='google.protobuf.MessageOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='message_set_wire_format', full_name='google.protobuf.MessageOptions.message_set_wire_format', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='no_standard_descriptor_accessor', full_name='google.protobuf.MessageOptions.no_standard_descriptor_accessor', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.MessageOptions.uninterpreted_option', index=2,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=2521,
serialized_end=2705,
)
_FIELDOPTIONS = descriptor.Descriptor(
name='FieldOptions',
full_name='google.protobuf.FieldOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='ctype', full_name='google.protobuf.FieldOptions.ctype', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='packed', full_name='google.protobuf.FieldOptions.packed', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.FieldOptions.deprecated', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='experimental_map_key', full_name='google.protobuf.FieldOptions.experimental_map_key', index=3,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.FieldOptions.uninterpreted_option', index=4,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FIELDOPTIONS_CTYPE,
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=2708,
serialized_end=2984,
)
_ENUMOPTIONS = descriptor.Descriptor(
name='EnumOptions',
full_name='google.protobuf.EnumOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.EnumOptions.uninterpreted_option', index=0,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=2986,
serialized_end=3079,
)
_ENUMVALUEOPTIONS = descriptor.Descriptor(
name='EnumValueOptions',
full_name='google.protobuf.EnumValueOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.EnumValueOptions.uninterpreted_option', index=0,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=3081,
serialized_end=3179,
)
_SERVICEOPTIONS = descriptor.Descriptor(
name='ServiceOptions',
full_name='google.protobuf.ServiceOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.ServiceOptions.uninterpreted_option', index=0,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=3181,
serialized_end=3277,
)
_METHODOPTIONS = descriptor.Descriptor(
name='MethodOptions',
full_name='google.protobuf.MethodOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.MethodOptions.uninterpreted_option', index=0,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=3279,
serialized_end=3374,
)
_UNINTERPRETEDOPTION_NAMEPART = descriptor.Descriptor(
name='NamePart',
full_name='google.protobuf.UninterpretedOption.NamePart',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name_part', full_name='google.protobuf.UninterpretedOption.NamePart.name_part', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_extension', full_name='google.protobuf.UninterpretedOption.NamePart.is_extension', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3587,
serialized_end=3638,
)
_UNINTERPRETEDOPTION = descriptor.Descriptor(
name='UninterpretedOption',
full_name='google.protobuf.UninterpretedOption',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.UninterpretedOption.name', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='identifier_value', full_name='google.protobuf.UninterpretedOption.identifier_value', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='positive_int_value', full_name='google.protobuf.UninterpretedOption.positive_int_value', index=2,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='negative_int_value', full_name='google.protobuf.UninterpretedOption.negative_int_value', index=3,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='double_value', full_name='google.protobuf.UninterpretedOption.double_value', index=4,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='string_value', full_name='google.protobuf.UninterpretedOption.string_value', index=5,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_UNINTERPRETEDOPTION_NAMEPART, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3377,
serialized_end=3638,
)
_FILEDESCRIPTORSET.fields_by_name['file'].message_type = _FILEDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['message_type'].message_type = _DESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['service'].message_type = _SERVICEDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['options'].message_type = _FILEOPTIONS
_DESCRIPTORPROTO_EXTENSIONRANGE.containing_type = _DESCRIPTORPROTO;
_DESCRIPTORPROTO.fields_by_name['field'].message_type = _FIELDDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['nested_type'].message_type = _DESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['extension_range'].message_type = _DESCRIPTORPROTO_EXTENSIONRANGE
_DESCRIPTORPROTO.fields_by_name['options'].message_type = _MESSAGEOPTIONS
_FIELDDESCRIPTORPROTO.fields_by_name['label'].enum_type = _FIELDDESCRIPTORPROTO_LABEL
_FIELDDESCRIPTORPROTO.fields_by_name['type'].enum_type = _FIELDDESCRIPTORPROTO_TYPE
_FIELDDESCRIPTORPROTO.fields_by_name['options'].message_type = _FIELDOPTIONS
_FIELDDESCRIPTORPROTO_TYPE.containing_type = _FIELDDESCRIPTORPROTO;
_FIELDDESCRIPTORPROTO_LABEL.containing_type = _FIELDDESCRIPTORPROTO;
_ENUMDESCRIPTORPROTO.fields_by_name['value'].message_type = _ENUMVALUEDESCRIPTORPROTO
_ENUMDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMOPTIONS
_ENUMVALUEDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMVALUEOPTIONS
_SERVICEDESCRIPTORPROTO.fields_by_name['method'].message_type = _METHODDESCRIPTORPROTO
_SERVICEDESCRIPTORPROTO.fields_by_name['options'].message_type = _SERVICEOPTIONS
_METHODDESCRIPTORPROTO.fields_by_name['options'].message_type = _METHODOPTIONS
_FILEOPTIONS.fields_by_name['optimize_for'].enum_type = _FILEOPTIONS_OPTIMIZEMODE
_FILEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_FILEOPTIONS_OPTIMIZEMODE.containing_type = _FILEOPTIONS;
_MESSAGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_FIELDOPTIONS.fields_by_name['ctype'].enum_type = _FIELDOPTIONS_CTYPE
_FIELDOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_FIELDOPTIONS_CTYPE.containing_type = _FIELDOPTIONS;
_ENUMOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_ENUMVALUEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_SERVICEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_METHODOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_UNINTERPRETEDOPTION_NAMEPART.containing_type = _UNINTERPRETEDOPTION;
_UNINTERPRETEDOPTION.fields_by_name['name'].message_type = _UNINTERPRETEDOPTION_NAMEPART
class FileDescriptorSet(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FILEDESCRIPTORSET
# @@protoc_insertion_point(class_scope:google.protobuf.FileDescriptorSet)
class FileDescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FILEDESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.FileDescriptorProto)
class DescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class ExtensionRange(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DESCRIPTORPROTO_EXTENSIONRANGE
# @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto.ExtensionRange)
DESCRIPTOR = _DESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto)
class FieldDescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FIELDDESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.FieldDescriptorProto)
class EnumDescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENUMDESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.EnumDescriptorProto)
class EnumValueDescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENUMVALUEDESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.EnumValueDescriptorProto)
class ServiceDescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SERVICEDESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.ServiceDescriptorProto)
class MethodDescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _METHODDESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.MethodDescriptorProto)
class FileOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FILEOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.FileOptions)
class MessageOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MESSAGEOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.MessageOptions)
class FieldOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FIELDOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.FieldOptions)
class EnumOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENUMOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.EnumOptions)
class EnumValueOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENUMVALUEOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.EnumValueOptions)
class ServiceOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SERVICEOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.ServiceOptions)
class MethodOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _METHODOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.MethodOptions)
class UninterpretedOption(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class NamePart(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _UNINTERPRETEDOPTION_NAMEPART
# @@protoc_insertion_point(class_scope:google.protobuf.UninterpretedOption.NamePart)
DESCRIPTOR = _UNINTERPRETEDOPTION
# @@protoc_insertion_point(class_scope:google.protobuf.UninterpretedOption)
# @@protoc_insertion_point(module_scope)
|
Gamecredits-Universe/Gamecredits-electrum-client | refs/heads/master | gui/kivy/uix/drawer.py | 33 | '''Drawer Widget to hold the main window and the menu/hidden section that
can be swiped in from the left. This Menu would be only hidden in phone mode
and visible in Tablet Mode.
This class is specifically in lined to save on start up speed(minimize i/o).
'''
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import OptionProperty, NumericProperty, ObjectProperty
from kivy.clock import Clock
from kivy.lang import Builder
import gc
# delayed imports
app = None
class Drawer(Factory.RelativeLayout):
'''Drawer Widget to hold the main window and the menu/hidden section that
can be swiped in from the left. This Menu would be only hidden in phone mode
and visible in Tablet Mode.
'''
state = OptionProperty('closed',
options=('closed', 'open', 'opening', 'closing'))
'''This indicates the current state the drawer is in.
:attr:`state` is a `OptionProperty` defaults to `closed`. Can be one of
`closed`, `open`, `opening`, `closing`.
'''
scroll_timeout = NumericProperty(200)
'''Timeout allowed to trigger the :data:`scroll_distance`,
in milliseconds. If the user has not moved :data:`scroll_distance`
within the timeout, the scrolling will be disabled and the touch event
will go to the children.
:data:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty`
and defaults to 200 (milliseconds)
'''
scroll_distance = NumericProperty('9dp')
'''Distance to move before scrolling the :class:`Drawer` in pixels.
As soon as the distance has been traveled, the :class:`Drawer` will
start to scroll, and no touch event will go to children.
It is advisable that you base this value on the dpi of your target
device's screen.
:data:`scroll_distance` is a :class:`~kivy.properties.NumericProperty`
and defaults to 20dp.
'''
drag_area = NumericProperty('9dp')
'''The percentage of area on the left edge that triggers the opening of
the drawer. from 0-1
:attr:`drag_area` is a `NumericProperty` defaults to 2
'''
hidden_widget = ObjectProperty(None)
''' This is the widget that is hidden in phone mode on the left side of
drawer or displayed on the left of the overlay widget in tablet mode.
:attr:`hidden_widget` is a `ObjectProperty` defaults to None.
'''
overlay_widget = ObjectProperty(None)
'''This a pointer to the default widget that is overlayed either on top or
to the right of the hidden widget.
'''
def __init__(self, **kwargs):
super(Drawer, self).__init__(**kwargs)
self._triigger_gc = Clock.create_trigger(self._re_enable_gc, .2)
def toggle_drawer(self):
if app.ui_mode[0] == 't':
return
Factory.Animation.cancel_all(self.overlay_widget)
anim = Factory.Animation(x=self.hidden_widget.width
if self.state in ('opening', 'closed') else 0,
d=.1, t='linear')
anim.bind(on_complete = self._complete_drawer_animation)
anim.start(self.overlay_widget)
def _re_enable_gc(self, dt):
global gc
gc.enable()
def on_touch_down(self, touch):
if self.disabled:
return
if not self.collide_point(*touch.pos):
return
touch.grab(self)
# disable gc for smooth interaction
# This is still not enough while wallet is synchronising
# look into pausing all background tasks while ui interaction like this
gc.disable()
global app
if not app:
app = App.get_running_app()
# skip on tablet mode
if app.ui_mode[0] == 't':
return super(Drawer, self).on_touch_down(touch)
state = self.state
touch.ud['send_touch_down'] = False
start = 0 #if state[0] == 'c' else self.hidden_widget.right
drag_area = self.drag_area\
if self.state[0] == 'c' else\
(self.overlay_widget.x)
if touch.x < start or touch.x > drag_area:
if self.state == 'open':
self.toggle_drawer()
return
return super(Drawer, self).on_touch_down(touch)
self._touch = touch
Clock.schedule_once(self._change_touch_mode,
self.scroll_timeout/1000.)
touch.ud['in_drag_area'] = True
touch.ud['send_touch_down'] = True
return
def on_touch_move(self, touch):
if not touch.grab_current is self:
return
self._touch = False
# skip on tablet mode
if app.ui_mode[0] == 't':
return super(Drawer, self).on_touch_move(touch)
if not touch.ud.get('in_drag_area', None):
return super(Drawer, self).on_touch_move(touch)
ov = self.overlay_widget
ov.x=min(self.hidden_widget.width,
max(ov.x + touch.dx*2, 0))
#_anim = Animation(x=x, duration=1/2, t='in_out_quart')
#_anim.cancel_all(ov)
#_anim.start(ov)
if abs(touch.x - touch.ox) < self.scroll_distance:
return
touch.ud['send_touch_down'] = False
Clock.unschedule(self._change_touch_mode)
self._touch = None
self.state = 'opening' if touch.dx > 0 else 'closing'
touch.ox = touch.x
return
def _change_touch_mode(self, *args):
if not self._touch:
return
touch = self._touch
touch.ungrab(self)
touch.ud['in_drag_area'] = False
touch.ud['send_touch_down'] = False
self._touch = None
super(Drawer, self).on_touch_down(touch)
return
def on_touch_up(self, touch):
if not touch.grab_current is self:
return
self._triigger_gc()
touch.ungrab(self)
touch.grab_current = None
# skip on tablet mode
get = touch.ud.get
if app.ui_mode[0] == 't':
return super(Drawer, self).on_touch_up(touch)
self.old_x = [1, ] * 10
self.speed = sum((
(self.old_x[x + 1] - self.old_x[x]) for x in range(9))) / 9.
if get('send_touch_down', None):
# touch up called before moving
Clock.unschedule(self._change_touch_mode)
self._touch = None
Clock.schedule_once(
lambda dt: super(Drawer, self).on_touch_down(touch))
if get('in_drag_area', None):
if abs(touch.x - touch.ox) < self.scroll_distance:
anim_to = (0 if self.state[0] == 'c'
else self.hidden_widget.width)
Factory.Animation(x=anim_to, d=.1).start(self.overlay_widget)
return
touch.ud['in_drag_area'] = False
if not get('send_touch_down', None):
self.toggle_drawer()
Clock.schedule_once(lambda dt: super(Drawer, self).on_touch_up(touch))
def _complete_drawer_animation(self, *args):
self.state = 'open' if self.state in ('opening', 'closed') else 'closed'
def add_widget(self, widget, index=1):
if not widget:
return
iget = self.ids.get
if not iget('hidden_widget') or not iget('overlay_widget'):
super(Drawer, self).add_widget(widget)
return
if not self.hidden_widget:
self.hidden_widget = self.ids.hidden_widget
if not self.overlay_widget:
self.overlay_widget = self.ids.overlay_widget
if self.overlay_widget.children and self.hidden_widget.children:
Logger.debug('Drawer: Accepts only two widgets. discarding rest')
return
if not self.hidden_widget.children:
self.hidden_widget.add_widget(widget)
else:
self.overlay_widget.add_widget(widget)
widget.x = 0
def remove_widget(self, widget):
if self.overlay_widget.children[0] == widget:
self.overlay_widget.clear_widgets()
return
if widget == self.hidden_widget.children:
self.hidden_widget.clear_widgets()
return
def clear_widgets(self):
self.overlay_widget.clear_widgets()
self.hidden_widget.clear_widgets()
if __name__ == '__main__':
from kivy.app import runTouchApp
from kivy.lang import Builder
runTouchApp(Builder.load_string('''
Drawer:
Button:
Button
''')) |
oneconvergence/group-based-policy | refs/heads/oneconvergence_service_node_driver | gbpservice/neutron/services/grouppolicy/common/constants.py | 2 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
GP_ACTION_ALLOW = 'allow'
GP_ACTION_REDIRECT = 'redirect'
GP_DIRECTION_IN = 'in'
GP_DIRECTION_OUT = 'out'
GP_DIRECTION_BI = 'bi'
GP_NETWORK_SVC_PARAM_TYPE = 'type'
GP_NETWORK_SVC_PARAM_NAME = 'name'
GP_NETWORK_SVC_PARAM_VALUE = 'value'
GP_NETWORK_SVC_PARAM_TYPE_IP_SINGLE = 'ip_single'
GP_NETWORK_SVC_PARAM_TYPE_IP_POOL = 'ip_pool'
GP_NETWORK_SVC_PARAM_TYPE_STRING = 'string'
GP_NETWORK_SVC_PARAM_VALUE_SELF_SUBNET = 'self_subnet'
GP_NETWORK_SVC_PARAM_VALUE_NAT_POOL = 'nat_pool'
|
simonwydooghe/ansible | refs/heads/devel | test/units/modules/network/fortios/test_fortios_switch_controller_vlan.py | 21 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_switch_controller_vlan
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_switch_controller_vlan.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_switch_controller_vlan_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_vlan': {
'auth': 'radius',
'color': '4',
'comments': 'test_value_5',
'name': 'default_name_6',
'portal_message_override_group': 'test_value_7',
'radius_server': 'test_value_8',
'security': 'open',
'usergroup': 'test_value_10',
'vdom': 'test_value_11',
'vlanid': '12'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_vlan.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'auth': 'radius',
'color': '4',
'comments': 'test_value_5',
'name': 'default_name_6',
'portal-message-override-group': 'test_value_7',
'radius-server': 'test_value_8',
'security': 'open',
'usergroup': 'test_value_10',
'vdom': 'test_value_11',
'vlanid': '12'
}
set_method_mock.assert_called_with('switch-controller', 'vlan', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_switch_controller_vlan_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_vlan': {
'auth': 'radius',
'color': '4',
'comments': 'test_value_5',
'name': 'default_name_6',
'portal_message_override_group': 'test_value_7',
'radius_server': 'test_value_8',
'security': 'open',
'usergroup': 'test_value_10',
'vdom': 'test_value_11',
'vlanid': '12'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_vlan.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'auth': 'radius',
'color': '4',
'comments': 'test_value_5',
'name': 'default_name_6',
'portal-message-override-group': 'test_value_7',
'radius-server': 'test_value_8',
'security': 'open',
'usergroup': 'test_value_10',
'vdom': 'test_value_11',
'vlanid': '12'
}
set_method_mock.assert_called_with('switch-controller', 'vlan', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_switch_controller_vlan_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'switch_controller_vlan': {
'auth': 'radius',
'color': '4',
'comments': 'test_value_5',
'name': 'default_name_6',
'portal_message_override_group': 'test_value_7',
'radius_server': 'test_value_8',
'security': 'open',
'usergroup': 'test_value_10',
'vdom': 'test_value_11',
'vlanid': '12'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_vlan.fortios_switch_controller(input_data, fos_instance)
delete_method_mock.assert_called_with('switch-controller', 'vlan', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_switch_controller_vlan_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'switch_controller_vlan': {
'auth': 'radius',
'color': '4',
'comments': 'test_value_5',
'name': 'default_name_6',
'portal_message_override_group': 'test_value_7',
'radius_server': 'test_value_8',
'security': 'open',
'usergroup': 'test_value_10',
'vdom': 'test_value_11',
'vlanid': '12'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_vlan.fortios_switch_controller(input_data, fos_instance)
delete_method_mock.assert_called_with('switch-controller', 'vlan', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_switch_controller_vlan_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_vlan': {
'auth': 'radius',
'color': '4',
'comments': 'test_value_5',
'name': 'default_name_6',
'portal_message_override_group': 'test_value_7',
'radius_server': 'test_value_8',
'security': 'open',
'usergroup': 'test_value_10',
'vdom': 'test_value_11',
'vlanid': '12'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_vlan.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'auth': 'radius',
'color': '4',
'comments': 'test_value_5',
'name': 'default_name_6',
'portal-message-override-group': 'test_value_7',
'radius-server': 'test_value_8',
'security': 'open',
'usergroup': 'test_value_10',
'vdom': 'test_value_11',
'vlanid': '12'
}
set_method_mock.assert_called_with('switch-controller', 'vlan', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_switch_controller_vlan_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_vlan': {
'random_attribute_not_valid': 'tag',
'auth': 'radius',
'color': '4',
'comments': 'test_value_5',
'name': 'default_name_6',
'portal_message_override_group': 'test_value_7',
'radius_server': 'test_value_8',
'security': 'open',
'usergroup': 'test_value_10',
'vdom': 'test_value_11',
'vlanid': '12'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_vlan.fortios_switch_controller(input_data, fos_instance)
expected_data = {
'auth': 'radius',
'color': '4',
'comments': 'test_value_5',
'name': 'default_name_6',
'portal-message-override-group': 'test_value_7',
'radius-server': 'test_value_8',
'security': 'open',
'usergroup': 'test_value_10',
'vdom': 'test_value_11',
'vlanid': '12'
}
set_method_mock.assert_called_with('switch-controller', 'vlan', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
DirectXMan12/nova-hacking | refs/heads/feature_novnc_krb | nova/cells/rpc_driver.py | 1 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells RPC Communication Driver
"""
from oslo.config import cfg
from nova.cells import driver
from nova.openstack.common import rpc
from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
from nova.openstack.common.rpc import proxy as rpc_proxy
cell_rpc_driver_opts = [
cfg.StrOpt('rpc_driver_queue_base',
default='cells.intercell',
help="Base queue name to use when communicating between "
"cells. Various topics by message type will be "
"appended to this.")]
CONF = cfg.CONF
CONF.register_opts(cell_rpc_driver_opts, group='cells')
CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells')
rpcapi_cap_opt = cfg.StrOpt('intercell',
default=None,
help='Set a version cap for messages sent between cells services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
_CELL_TO_CELL_RPC_API_VERSION = '1.0'
class CellsRPCDriver(driver.BaseCellsDriver):
"""Driver for cell<->cell communication via RPC. This is used to
setup the RPC consumers as well as to send a message to another cell.
One instance of this class will be created for every neighbor cell
that we find in the DB and it will be associated with the cell in
its CellState.
One instance is also created by the cells manager for setting up
the consumers.
"""
BASE_RPC_API_VERSION = _CELL_TO_CELL_RPC_API_VERSION
def __init__(self, *args, **kwargs):
super(CellsRPCDriver, self).__init__(*args, **kwargs)
self.rpc_connections = []
self.intercell_rpcapi = InterCellRPCAPI(
self.BASE_RPC_API_VERSION)
def _start_consumer(self, dispatcher, topic):
"""Start an RPC consumer."""
conn = rpc.create_connection(new=True)
conn.create_consumer(topic, dispatcher, fanout=False)
conn.create_consumer(topic, dispatcher, fanout=True)
self.rpc_connections.append(conn)
conn.consume_in_thread()
return conn
def start_consumers(self, msg_runner):
"""Start RPC consumers.
Start up 2 separate consumers for handling inter-cell
communication via RPC. Both handle the same types of
messages, but requests/replies are separated to solve
potential deadlocks. (If we used the same queue for both,
it's possible to exhaust the RPC thread pool while we wait
for replies.. such that we'd never consume a reply.)
"""
topic_base = CONF.cells.rpc_driver_queue_base
proxy_manager = InterCellRPCDispatcher(msg_runner)
dispatcher = rpc_dispatcher.RpcDispatcher([proxy_manager])
for msg_type in msg_runner.get_message_types():
topic = '%s.%s' % (topic_base, msg_type)
self._start_consumer(dispatcher, topic)
def stop_consumers(self):
"""Stop RPC consumers.
NOTE: Currently there's no hooks when stopping services
to have managers cleanup, so this is not currently called.
"""
for conn in self.rpc_connections:
conn.close()
def send_message_to_cell(self, cell_state, message):
"""Use the IntercellRPCAPI to send a message to a cell."""
self.intercell_rpcapi.send_message_to_cell(cell_state, message)
class InterCellRPCAPI(rpc_proxy.RpcProxy):
"""Client side of the Cell<->Cell RPC API.
The CellsRPCDriver uses this to make calls to another cell.
API version history:
1.0 - Initial version.
... Grizzly supports message version 1.0. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.0.
"""
VERSION_ALIASES = {
'grizzly': '1.0',
}
def __init__(self, default_version):
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.intercell,
CONF.upgrade_levels.intercell)
super(InterCellRPCAPI, self).__init__(None, default_version,
version_cap=version_cap)
@staticmethod
def _get_server_params_for_cell(next_hop):
"""Turn the DB information for a cell into the parameters
needed for the RPC call.
"""
param_map = {'username': 'username',
'password': 'password',
'rpc_host': 'hostname',
'rpc_port': 'port',
'rpc_virtual_host': 'virtual_host'}
server_params = {}
for source, target in param_map.items():
if next_hop.db_info[source]:
server_params[target] = next_hop.db_info[source]
return server_params
def send_message_to_cell(self, cell_state, message):
"""Send a message to another cell by JSON-ifying the message and
making an RPC cast to 'process_message'. If the message says to
fanout, do it. The topic that is used will be
'CONF.rpc_driver_queue_base.<message_type>'.
"""
ctxt = message.ctxt
json_message = message.to_json()
rpc_message = self.make_msg('process_message', message=json_message)
topic_base = CONF.cells.rpc_driver_queue_base
topic = '%s.%s' % (topic_base, message.message_type)
server_params = self._get_server_params_for_cell(cell_state)
if message.fanout:
self.fanout_cast_to_server(ctxt, server_params,
rpc_message, topic=topic)
else:
self.cast_to_server(ctxt, server_params,
rpc_message, topic=topic)
class InterCellRPCDispatcher(object):
"""RPC Dispatcher to handle messages received from other cells.
All messages received here have come from a sibling cell. Depending
on the ultimate target and type of message, we may process the message
in this cell, relay the message to another sibling cell, or both. This
logic is defined by the message class in the messaging module.
"""
BASE_RPC_API_VERSION = _CELL_TO_CELL_RPC_API_VERSION
def __init__(self, msg_runner):
"""Init the Intercell RPC Dispatcher."""
self.msg_runner = msg_runner
def process_message(self, _ctxt, message):
"""We received a message from another cell. Use the MessageRunner
to turn this from JSON back into an instance of the correct
Message class. Then process it!
"""
message = self.msg_runner.message_from_json(message)
message.process()
|
marinho/geraldo | refs/heads/master | site/newsite/site-geraldo/django/contrib/localflavor/sk/forms.py | 344 | """
Slovak-specific form helpers
"""
from django.forms.fields import Select, RegexField
from django.utils.translation import ugettext_lazy as _
class SKRegionSelect(Select):
"""
A select widget widget with list of Slovak regions as choices.
"""
def __init__(self, attrs=None):
from sk_regions import REGION_CHOICES
super(SKRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class SKDistrictSelect(Select):
"""
A select widget with list of Slovak districts as choices.
"""
def __init__(self, attrs=None):
from sk_districts import DISTRICT_CHOICES
super(SKDistrictSelect, self).__init__(attrs, choices=DISTRICT_CHOICES)
class SKPostalCodeField(RegexField):
"""
A form field that validates its input as Slovak postal code.
Valid form is XXXXX or XXX XX, where X represents integer.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XXXXX or XXX XX.'),
}
def __init__(self, *args, **kwargs):
super(SKPostalCodeField, self).__init__(r'^\d{5}$|^\d{3} \d{2}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self, value):
"""
Validates the input and returns a string that contains only numbers.
Returns an empty string for empty values.
"""
v = super(SKPostalCodeField, self).clean(value)
return v.replace(' ', '')
|
Noviat/odoo | refs/heads/8.0 | addons/hr_attendance/res_config.py | 434 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class hr_attendance_config_settings(osv.osv_memory):
_inherit = 'hr.config.settings'
_columns = {
'group_hr_attendance': fields.boolean('Track attendances for all employees',
implied_group='base.group_hr_attendance',
help="Allocates attendance group to all users."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jamesrobertlloyd/automl-phase-2 | refs/heads/master | sandpit_two.py | 1 | __author__ = 'jrl44'
# import sandpit
import time
import global_data
def print_globals(_):
time.sleep(5)
print(globals())
# print(sandpit.__dict__)
# X = my_global
# X = X + 1
# print(X)
time.sleep(5)
def import_and_print_globals(_):
time.sleep(5)
print global_data.__dict__
time.sleep(5)
X = global_data.my_global
X = X + 1
print(X)
time.sleep(5) |
samchrisinger/osf.io | refs/heads/develop | framework/sessions/utils.py | 8 | # -*- coding: utf-8 -*-
from modularodm import Q
from framework.sessions.model import Session
def remove_sessions_for_user(user):
"""
Permanently remove all stored sessions for the user from the DB.
:param user: User
:return:
"""
Session.remove(Q('data.auth_user_id', 'eq', user._id))
def remove_session(session):
"""
Remove a session from database
:param session: Session
:return:
"""
Session.remove(Q('_id', 'eq', session._id))
|
t0mk/ansible | refs/heads/devel | lib/ansible/modules/notification/cisco_spark.py | 3 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'
}
DOCUMENTATION = '''
---
module: cisco_spark
short_description: Send a message to a Cisco Spark Room or Individual.
description:
- Send a message to a Cisco Spark Room or Individual with options to control the formatting.
version_added: "2.3"
author: Drew Rusell (@drusse11)
notes:
- The C(recipient_id) type must be valid for the supplied C(recipient_id).
- Full API documentation can be found at U(https://developer.ciscospark.com/endpoint-messages-post.html).
options:
recipient_type:
description:
- The request parameter you would like to send the message to.
- Messages can be sent to either a room or individual (by ID or E-Mail).
required: True
choices: ['roomId', 'toPersonEmail', 'toPersonId']
recipient_id:
description:
- The unique identifier associated with the supplied C(recipient_type).
required: true
message_type:
description:
- Specifies how you would like the message formatted.
required: False
default: text
choices: ['text', 'markdown']
personal_token:
description:
- Your personal access token required to validate the Spark API.
required: true
aliases: ['token']
message:
description:
- The message you would like to send.
required: True
'''
EXAMPLES = """
# Note: The following examples assume a variable file has been imported
# that contains the appropriate information.
- name: Cisco Spark - Markdown Message to a Room
cisco_spark:
recipient_type: roomId
recipient_id: "{{ room_id }}"
message_type: markdown
personal_token: "{{ token }}"
message: "**Cisco Spark Ansible Module - Room Message in Markdown**"
- name: Cisco Spark - Text Message to a Room
cisco_spark:
recipient_type: roomId
recipient_id: "{{ room_id }}"
message_type: text
personal_token: "{{ token }}"
message: "Cisco Spark Ansible Module - Room Message in Text"
- name: Cisco Spark - Text Message by an Individuals ID
cisco_spark:
recipient_type: toPersonId
recipient_id: "{{ person_id}}"
message_type: text
personal_token: "{{ token }}"
message: "Cisco Spark Ansible Module - Text Message to Individual by ID"
- name: Cisco Spark - Text Message by an Individuals E-Mail Address
cisco_spark:
recipient_type: toPersonEmail
recipient_id: "{{ person_email }}"
message_type: text
personal_token: "{{ token }}"
message: "Cisco Spark Ansible Module - Text Message to Individual by E-Mail"
"""
RETURN = """
status_code:
description:
- The Response Code returned by the Spark API.
- Full Responsde Code explanations can be found at U(https://developer.ciscospark.com/endpoint-messages-post.html).
returned: always
type: int
sample: 200
message:
description:
- The Response Message returned by the Spark API.
- Full Responsde Code explanations can be found at U(https://developer.ciscospark.com/endpoint-messages-post.html.
returned: always
type: string
sample: OK (585 bytes)
"""
def spark_message(module):
""" When check mode is specified, establish a read only connection, that does not return any user specific
data, to validate connectivity. In regular mode, send a message to a Cisco Spark Room or Individual"""
# Ansible Specific Variables
results = {}
ansible = module.params
headers = {
'Authorization': 'Bearer {}'.format(ansible['personal_token']),
'content-type': 'application/json'
}
if module.check_mode:
url = "https://api.ciscospark.com/v1/people/me"
payload = None
else:
url = "https://api.ciscospark.com/v1/messages"
payload = {
ansible['recipient_type']: ansible['recipient_id'],
ansible['message_type']: ansible['message']
}
payload = module.jsonify(payload)
response, info = fetch_url(module, url, data=payload, headers=headers)
status_code = info['status']
message = info['msg']
# Module will fail if the response is not 200
if status_code != 200:
results['failed'] = True
results['status_code'] = status_code
results['message'] = message
else:
results['failed'] = False
results['status_code'] = status_code
if module.check_mode:
results['message'] = 'Authentication Successful.'
else:
results['message'] = message
return results
def main():
'''Ansible main. '''
module = AnsibleModule(
argument_spec=dict(
recipient_type=dict(required=True, choices=[
'roomId', 'toPersonEmail', 'toPersonId']),
recipient_id=dict(required=True, no_log=True),
message_type=dict(required=False, default=['text'], aliases=[
'type'], choices=['text', 'markdown']),
personal_token=dict(required=True, no_log=True, aliases=['token']),
message=dict(required=True)
),
supports_check_mode=True
)
results = spark_message(module)
module.exit_json(**results)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
if __name__ == "__main__":
main()
|
kba/ocropy | refs/heads/master | ocrolib/chars.py | 11 | # -*- encoding: utf-8 -*-
import re
# common character sets
digits = u"0123456789"
letters = u"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
symbols = ur"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
ascii = digits+letters+symbols
xsymbols = u"""€¢£»«›‹÷©®†‡°∙•◦‣¶§÷¡¿▪▫"""
german = u"ÄäÖöÜüß"
french = u"ÀàÂâÆæÇçÉéÈèÊêËëÎîÏïÔôŒœÙùÛûÜüŸÿ"
turkish = u"ĞğŞşıſ"
greek = u"ΑαΒβΓγΔδΕεΖζΗηΘθΙιΚκΛλΜμΝνΞξΟοΠπΡρΣσςΤτΥυΦφΧχΨψΩω"
default = ascii+xsymbols+german+french
european = default+turkish+greek
# List of regular expressions for normalizing Unicode text.
# Cleans up common homographs. This is mostly used for
# training text.
# Note that the replacement of pretty much all quotes with
# ASCII straight quotes and commas requires some
# postprocessing to figure out which of those symbols
# represent typographic quotes. See `requote`
# TODO: We may want to try to preserve more shape; unfortunately,
# there are lots of inconsistencies between fonts. Generally,
# there seems to be left vs right leaning, and top-heavy vs bottom-heavy
replacements = [
(u'[_~#]',u"~"), # OCR control characters
(u'"',u"''"), # typewriter double quote
(u"`",u"'"), # grave accent
(u'[“”]',u"''"), # fancy quotes
(u"´",u"'"), # acute accent
(u"[‘’]",u"'"), # left single quotation mark
(u"[“”]",u"''"), # right double quotation mark
(u"“",u"''"), # German quotes
(u"„",u",,"), # German quotes
(u"…",u"..."), # ellipsis
(u"′",u"'"), # prime
(u"″",u"''"), # double prime
(u"‴",u"'''"), # triple prime
(u"〃",u"''"), # ditto mark
(u"µ",u"μ"), # replace micro unit with greek character
(u"[–—]",u"-"), # variant length hyphens
(u"fl",u"fl"), # expand Unicode ligatures
(u"fi",u"fi"),
(u"ff",u"ff"),
(u"ffi",u"ffi"),
(u"ffl",u"ffl"),
]
def requote(s):
s = unicode(s)
s = re.sub(ur"''",u'"',s)
return s
def requote_fancy(s,germanic=0):
s = unicode(s)
if germanic:
# germanic quoting style reverses the shapes
# straight double quotes
s = re.sub(ur"\s+''",u"”",s)
s = re.sub(u"''\s+",u"“",s)
s = re.sub(ur"\s+,,",u"„",s)
# straight single quotes
s = re.sub(ur"\s+'",u"’",s)
s = re.sub(ur"'\s+",u"‘",s)
s = re.sub(ur"\s+,",u"‚",s)
else:
# straight double quotes
s = re.sub(ur"\s+''",u"“",s)
s = re.sub(ur"''\s+",u"”",s)
s = re.sub(ur"\s+,,",u"„",s)
# straight single quotes
s = re.sub(ur"\s+'",u"‘",s)
s = re.sub(ur"'\s+",u"’",s)
s = re.sub(ur"\s+,",u"‚",s)
return s
|
lucafavatella/intellij-community | refs/heads/cli-wip | python/testData/inspections/PyAttributeOutsideInitInspection/fromSuperHierarchy.py | 166 | __author__ = 'ktisha'
class Base(object):
def __init__(self):
self.my = 1
class Child(Base):
def f(self):
self.my = 1 |
jmcarp/pyrobot | refs/heads/master | docs/conf.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
sys.path.append(os.path.abspath('_themes'))
import pyrobot
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyrobot'
copyright = u'"", Joshua Carp'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pyrobot.__version__
# The full version, including alpha/beta/rc tags.
release = pyrobot.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'kr'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyrobotdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyrobot.tex', u'pyrobot Documentation',
u'Joshua Carp', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyrobot', u'pyrobot Documentation',
[u'Joshua Carp'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyrobot', u'pyrobot Documentation',
u'Joshua Carp', 'pyrobot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
fr34k8/atomic-reactor | refs/heads/master | atomic_reactor/source.py | 6 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Code for getting source code to put inside container.
"""
import logging
import copy
import os
import shutil
import tempfile
from atomic_reactor import util
from atomic_reactor.constants import SOURCE_DIRECTORY_NAME
logger = logging.getLogger(__name__)
class Source(object):
def __init__(self, provider, uri, dockerfile_path=None, provider_params=None, tmpdir=None):
self.provider = provider
self.uri = uri
self.dockerfile_path = dockerfile_path
self.provider_params = provider_params or {}
# TODO: do we want to delete tmpdir when destroying the object?
self.tmpdir = tmpdir or tempfile.mkdtemp()
logger.debug("workdir is %s", repr(self.tmpdir))
self.source_path = os.path.join(self.tmpdir, SOURCE_DIRECTORY_NAME)
logger.debug("source path is %s", repr(self.source_path))
@property
def path(self):
return self.get()
@property
def workdir(self):
return self.tmpdir
def get(self):
"""Run this to get source and save it to `tmpdir` or a newly created tmpdir."""
raise NotImplementedError('Must override in subclasses!')
def get_dockerfile_path(self):
# TODO: will we need figure_out_dockerfile as a separate method?
return util.figure_out_dockerfile(self.path, self.dockerfile_path)
def remove_tmpdir(self):
shutil.rmtree(self.tmpdir)
class GitSource(Source):
def __init__(self, provider, uri, dockerfile_path=None, provider_params=None, tmpdir=None):
super(GitSource, self).__init__(provider, uri, dockerfile_path,
provider_params, tmpdir)
self.git_commit = self.provider_params.get('git_commit', None)
self.lg = util.LazyGit(self.uri, self.git_commit, self.source_path)
def get(self):
return self.lg.git_path
class PathSource(Source):
def __init__(self, provider, uri, dockerfile_path=None, provider_params=None, tmpdir=None):
super(PathSource, self).__init__(provider, uri, dockerfile_path,
provider_params, tmpdir)
# make sure we have canonical URI representation even if we got path without "file://"
if not self.uri.startswith('file://'):
self.uri = 'file://' + self.uri
self.schemeless_path = self.uri[len('file://'):]
os.makedirs(self.source_path)
def get(self):
# work around the weird behaviour of copytree, which requires the top dir
# to *not* exist
for f in os.listdir(self.schemeless_path):
old = os.path.join(self.schemeless_path, f)
new = os.path.join(self.source_path, f)
if os.path.exists(new):
# this is the second invocation of this method; just break the loop
break
else:
if os.path.isdir(old):
shutil.copytree(old, new)
else:
shutil.copy2(old, new)
return self.source_path
def get_source_instance_for(source, tmpdir=None):
validate_source_dict_schema(source)
klass = None
provider = source['provider'].lower()
if provider == 'git':
klass = GitSource
elif provider == 'path':
klass = PathSource
else:
raise ValueError('unknown source provider "{0}"'.format(provider))
# don't modify original source
args = copy.deepcopy(source)
args['tmpdir'] = tmpdir
return klass(**args)
def validate_source_dict_schema(sd):
if not isinstance(sd, dict):
raise ValueError('"source" must be a dict')
for k in ['provider', 'uri']:
if k not in sd:
raise ValueError('"source" must contain "{0}" key'.format(k))
|
ricardogsilva/QGIS | refs/heads/master | cmake/FindSIP.py | 23 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2007, Simon Edwards <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Simon Edwards <[email protected]> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Simon Edwards <[email protected]> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Simon Edwards <[email protected]> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# FindSIP.py
# Copyright (c) 2007, Simon Edwards <[email protected]>
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
try:
import sipbuild
print("sip_version:%06.0x" % sipbuild.version.SIP_VERSION)
print("sip_version_num:%d" % sipbuild.version.SIP_VERSION)
print("sip_version_str:%s" % sipbuild.version.SIP_VERSION_STR)
import shutil
sip_bin = shutil.which("sip5")
if sip_bin is None:
raise ImportError("sipbuild found, but not sip5")
print("sip_bin:%s" % sip_bin)
from distutils.sysconfig import get_python_lib
python_modules_dir = get_python_lib(plat_specific=1)
print("default_sip_dir:%s" % python_modules_dir)
except ImportError: # Code for SIP v4
import sipconfig
sipcfg = sipconfig.Configuration()
print("sip_version:%06.0x" % sipcfg.sip_version)
print("sip_version_num:%d" % sipcfg.sip_version)
print("sip_version_str:%s" % sipcfg.sip_version_str)
print("sip_bin:%s" % sipcfg.sip_bin)
print("default_sip_dir:%s" % sipcfg.default_sip_dir)
print("sip_inc_dir:%s" % sipcfg.sip_inc_dir)
# SIP 4.19.10+ has new sipcfg.sip_module_dir
if hasattr(sipcfg, "sip_module_dir"):
print("sip_module_dir:%s" % sipcfg.sip_module_dir)
else:
print("sip_module_dir:%s" % sipcfg.sip_mod_dir)
|
DirkHoffmann/indico | refs/heads/master | indico/modules/categories/views.py | 1 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from markupsafe import escape
from indico.modules.admin.views import WPAdmin
from indico.util.i18n import _
from indico.util.mathjax import MathjaxMixin
from indico.web.breadcrumbs import render_breadcrumbs
from indico.web.views import WPDecorated, WPJinjaMixin, render_header
class WPManageUpcomingEvents(WPAdmin):
template_prefix = 'categories/'
class WPCategory(MathjaxMixin, WPJinjaMixin, WPDecorated):
"""WP for category display pages."""
template_prefix = 'categories/'
ALLOW_JSON = False
bundles = ('module_categories.js',)
def __init__(self, rh, category, **kwargs):
kwargs['category'] = category
self.category = category
self.atom_feed_url = kwargs.get('atom_feed_url')
self.atom_feed_title = kwargs.get('atom_feed_title')
if category:
self.title = category.title
WPDecorated.__init__(self, rh, **kwargs)
self._mathjax = kwargs.pop('mathjax', False)
def _get_header(self):
return render_header(category=self.category, protected_object=self.category,
local_tz=self.category.display_tzinfo.zone)
def _get_body(self, params):
return self._get_page_content(params)
def _get_head_content(self):
head_content = WPDecorated._get_head_content(self)
if self.atom_feed_url:
title = self.atom_feed_title or _("Indico Atom feed")
head_content += ('<link rel="alternate" type="application/atom+xml" title="{}" href="{}">'
.format(escape(title), self.atom_feed_url))
if self._mathjax:
head_content += MathjaxMixin._get_head_content(self)
return head_content
def _get_breadcrumbs(self):
if not self.category or self.category.is_root:
return ''
return render_breadcrumbs(category=self.category)
class WPCategoryCalendar(WPCategory):
"""WP for category calendar page."""
bundles = ('module_categories.calendar.js', 'module_categories.calendar.css')
class WPCategoryManagement(WPCategory):
"""WP for category management pages."""
MANAGEMENT = True
bundles = ('module_categories.management.js',)
def __init__(self, rh, category, active_menu_item, **kwargs):
kwargs['active_menu_item'] = active_menu_item
WPCategory.__init__(self, rh, category, **kwargs)
def _get_header(self):
return render_header(category=self.category, protected_object=self.category,
local_tz=self.category.timezone, force_local_tz=True)
def _get_breadcrumbs(self):
if self.category.is_root:
return ''
return render_breadcrumbs(category=self.category, management=True)
class WPCategoryStatistics(WPCategory):
bundles = ('module_categories.css',)
|
namili/blueman | refs/heads/master | blueman/gui/CellRendererPixbufTable.py | 3 | # Copyright (C) 2008 Valmantas Paliksa <walmis at balticum-tv dot lt>
# Copyright (C) 2008 Tadas Dailyda <tadas at dailyda dot com>
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
import gobject
class CellRendererPixbufTable(gtk.GenericCellRenderer):
__gproperties__ = {
"pixbuffs": (gobject.TYPE_PYOBJECT, "pixbuf", "pixbuf", gobject.PARAM_READWRITE)
}
def __init__(self):
self.__gobject_init__()
self.set_property("yalign", 0.5)
self.set_property("xalign", 0.5)
def do_set_property(self, pspec, value):
setattr(self, pspec.name, value)
def do_get_property(self, pspec):
return getattr(self, pspec.name)
def on_render(self, window, widget, background_area, cell_area, expose_area, flags):
if not self.pixbuffs or self.pixbuffs.cols == 0:
return
pix_rect = gtk.gdk.Rectangle()
pix_rect.x, pix_rect.y, pix_rect.width, pix_rect.height = self.on_get_size(widget, cell_area)
pix_rect.x += cell_area.x
pix_rect.y += cell_area.y
pix_rect.width -= 2 * self.get_property("xpad") + (self.pixbuffs.total_width - self.pixbuffs.size)
pix_rect.height -= 2 * self.get_property("ypad") + (self.pixbuffs.total_height - self.pixbuffs.size)
row = 0
col = 0
for k,v in self.pixbuffs.get().iteritems():
#print rows
if row == self.pixbuffs.rows:
y_space = 0
row=0
col+=1
else:
y_space = self.pixbuffs.spacingy
if col == 0 or col == self.pixbuffs.cols:
x_space = 0
else:
x_space = self.pixbuffs.spacingx
draw_rect = cell_area.intersect(pix_rect)
draw_rect = expose_area.intersect(draw_rect)
if self.pixbuffs.cols > 2:
z = self.pixbuffs.size*(self.pixbuffs.cols-1)
else:
z = 0
h = v.get_height()
w = v.get_width()
#if w > h:
# x =
window.draw_pixbuf(
widget.style.black_gc,
v,
draw_rect.x - pix_rect.x, #source x
draw_rect.y - pix_rect.y, #source y
int(draw_rect.x + self.pixbuffs.size * col + x_space*col + (cell_area.width-self.pixbuffs.total_width) * self.get_property("xalign") + (h - w)/2), #dest x
int(draw_rect.y + self.pixbuffs.size * row + y_space*row + (cell_area.height-self.pixbuffs.total_height) * self.get_property("yalign")), #dest y
-1,
-1,
gtk.gdk.RGB_DITHER_NONE,
0,
0
)
row += 1
def on_get_size(self, widget, cell_area):
if not self.pixbuffs or self.pixbuffs.cols == 0:
return 0, 0, 0, 0
calc_width = self.get_property("xpad") * 2 + self.pixbuffs.size + (self.pixbuffs.total_width - self.pixbuffs.size)
calc_height = self.get_property("ypad") * 2 + self.pixbuffs.size + (self.pixbuffs.total_height - self.pixbuffs.size)
x_offset = 0
y_offset = 0
return x_offset, y_offset, calc_width, calc_height
gobject.type_register(CellRendererPixbufTable)
|
n0max/servo | refs/heads/master | tests/wpt/css-tests/tools/gitignore/gitignore.py | 90 | import itertools
import re
import os
end_space = re.compile(r"([^\\]\s)*$")
def fnmatch_translate(pat, path_name=False):
parts = []
seq = False
i = 0
if pat[0] == "/" or path_name:
parts.append("^")
any_char = "[^/]"
if pat[0] == "/":
pat = pat[1:]
else:
any_char = "."
parts.append("^(?:.*/)?")
while i < len(pat):
c = pat[i]
if c == "\\":
if i < len(pat) - 1:
i += 1
c = pat[i]
parts.append(re.escape(c))
else:
raise ValueError
elif seq:
if c == "]":
seq = False
# First two cases are to deal with the case where / is the only character
# in the sequence but path_name is True so it shouldn't match anything
if parts[-1] == "[":
parts = parts[:-1]
elif parts[-1] == "^" and parts[-2] == "[":
parts = parts[:-2]
else:
parts.append(c)
elif c == "-":
parts.append(c)
elif not (path_name and c == "/"):
parts += re.escape(c)
elif c == "[":
parts.append("[")
if i < len(pat) - 1 and pat[i+1] in ("!", "^"):
parts.append("^")
i += 1
seq = True
elif c == "*":
if i < len(pat) - 1 and pat[i+1] == "*":
parts.append(any_char + "*")
i += 1
if i < len(pat) - 1 and pat[i+1] == "*":
raise ValueError
else:
parts.append(any_char + "*")
elif c == "?":
parts.append(any_char)
else:
parts.append(re.escape(c))
i += 1
if seq:
raise ValueError
parts.append("$")
try:
return re.compile("".join(parts))
except:
raise
def parse_line(line):
line = line.rstrip()
if not line or line[0] == "#":
return
invert = line[0] == "!"
if invert:
line = line[1:]
dir_only = line[-1] == "/"
if dir_only:
line = line[:-1]
return invert, dir_only, fnmatch_translate(line, "/" in line)
class PathFilter(object):
def __init__(self, root, extras=None):
if root:
ignore_path = os.path.join(root, ".gitignore")
else:
ignore_path = None
if not ignore_path and not extras:
self.trivial = True
return
self.trivial = False
self.rules_file = []
self.rules_dir = []
if extras is None:
extras = []
if ignore_path and os.path.exists(ignore_path):
self._read_ignore(ignore_path)
for item in extras:
self._read_line(item)
def _read_ignore(self, ignore_path):
with open(ignore_path) as f:
for line in f:
self._read_line(line)
def _read_line(self, line):
parsed = parse_line(line)
if not parsed:
return
invert, dir_only, regexp = parsed
if dir_only:
self.rules_dir.append((regexp, invert))
else:
self.rules_file.append((regexp, invert))
def __call__(self, path):
if os.path.sep != "/":
path = path.replace(os.path.sep, "/")
if self.trivial:
return True
path_is_dir = path[-1] == "/"
if path_is_dir:
path = path[:-1]
rules = self.rules_dir
else:
rules = self.rules_file
include = True
for regexp, invert in rules:
if not include and invert and regexp.match(path):
include = True
elif include and not invert and regexp.match(path):
include = False
return include
|
gladk/trunk | refs/heads/master | scripts/checks-and-tests/collider-perf/perf.py | 8 |
utils.readParamsFromTable(nSpheres=8000,collider='InsertionSortCollider',noTableOk=True)
# name of file containing sphere packing with given number of spheres
spheresFile="packing-%dk.spheres"%(nSpheres/1000)
fast='@stride' in collider
import os
if not os.path.exists(spheresFile):
print "Generating packing"
p=TriaxialTest(numberOfGrains=nSpheres,radiusMean=1e-3,lowerCorner=[0,0,0],upperCorner=[1,1,1],noFiles=True)
p.load()
utils.spheresToFile(spheresFile)
O.reset()
print "Packing %s done"%spheresFile
else: print "Packing found (%s), using it."%spheresFile
from yade import timing
O.timingEnabled=True
TriaxialTest(importFilename=spheresFile,fast=fast,noFiles=True).load()
O.dt=utils.PWaveTimeStep()
isc=O.engines[2]
isc.sweepLength=1e-1
if not fast: utils.replaceCollider(eval(collider))
O.step()
timing.stats()
timing.reset()
O.run(200,True)
timing.stats()
quit()
|
HyperBaton/ansible | refs/heads/devel | lib/ansible/modules/network/fortios/fortios_firewall_vipgrp.py | 7 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_vipgrp
short_description: Configure IPv4 virtual IP groups in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and vipgrp category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_vipgrp:
description:
- Configure IPv4 virtual IP groups.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
color:
description:
- Integer value to determine the color of the icon in the GUI (range 1 to 32).
type: int
comments:
description:
- Comment.
type: str
interface:
description:
- interface Source system.interface.name.
type: str
member:
description:
- Member VIP objects of the group (Separate multiple objects with a space).
type: list
suboptions:
name:
description:
- VIP name. Source firewall.vip.name.
required: true
type: str
name:
description:
- VIP group name.
required: true
type: str
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPv4 virtual IP groups.
fortios_firewall_vipgrp:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_vipgrp:
color: "3"
comments: "<your_own_value>"
interface: "<your_own_value> (source system.interface.name)"
member:
-
name: "default_name_7 (source firewall.vip.name)"
name: "default_name_8"
uuid: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_vipgrp_data(json):
option_list = ['color', 'comments', 'interface',
'member', 'name', 'uuid']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_vipgrp(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_vipgrp'] and data['firewall_vipgrp']:
state = data['firewall_vipgrp']['state']
else:
state = True
firewall_vipgrp_data = data['firewall_vipgrp']
filtered_data = underscore_to_hyphen(filter_firewall_vipgrp_data(firewall_vipgrp_data))
if state == "present":
return fos.set('firewall',
'vipgrp',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'vipgrp',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_vipgrp']:
resp = firewall_vipgrp(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_vipgrp": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"color": {"required": False, "type": "int"},
"comments": {"required": False, "type": "str"},
"interface": {"required": False, "type": "str"},
"member": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"uuid": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
ColinIanKing/autotest | refs/heads/master | client/shared/ElementPath.py | 189 | #
# ElementTree
# $Id: ElementPath.py 1858 2004-06-17 21:31:41Z Fredrik $
#
# limited xpath support for element trees
#
# history:
# 2003-05-23 fl created
# 2003-05-28 fl added support for // etc
# 2003-08-27 fl fixed parsing of periods in element names
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
##
# Implementation module for XPath support. There's usually no reason
# to import this module directly; the <b>ElementTree</b> does this for
# you, if needed.
##
import re
xpath_tokenizer = re.compile(
"(::|\.\.|\(\)|[/.*:\[\]\(\)@=])|((?:\{[^}]+\})?[^/:\[\]\(\)@=\s]+)|\s+"
).findall
class xpath_descendant_or_self:
pass
##
# Wrapper for a compiled XPath.
class Path:
##
# Create an Path instance from an XPath expression.
def __init__(self, path):
tokens = xpath_tokenizer(path)
# the current version supports 'path/path'-style expressions only
self.path = []
self.tag = None
if tokens and tokens[0][0] == "/":
raise SyntaxError("cannot use absolute path on element")
while tokens:
op, tag = tokens.pop(0)
if tag or op == "*":
self.path.append(tag or op)
elif op == ".":
pass
elif op == "/":
self.path.append(xpath_descendant_or_self())
continue
else:
raise SyntaxError("unsupported path syntax (%s)" % op)
if tokens:
op, tag = tokens.pop(0)
if op != "/":
raise SyntaxError(
"expected path separator (%s)" % (op or tag)
)
if self.path and isinstance(self.path[-1], xpath_descendant_or_self):
raise SyntaxError("path cannot end with //")
if len(self.path) == 1 and isinstance(self.path[0], type("")):
self.tag = self.path[0]
##
# Find first matching object.
def find(self, element):
tag = self.tag
if tag is None:
nodeset = self.findall(element)
if not nodeset:
return None
return nodeset[0]
for elem in element:
if elem.tag == tag:
return elem
return None
##
# Find text for first matching object.
def findtext(self, element, default=None):
tag = self.tag
if tag is None:
nodeset = self.findall(element)
if not nodeset:
return default
return nodeset[0].text or ""
for elem in element:
if elem.tag == tag:
return elem.text or ""
return default
##
# Find all matching objects.
def findall(self, element):
nodeset = [element]
index = 0
while 1:
try:
path = self.path[index]
index = index + 1
except IndexError:
return nodeset
set = []
if isinstance(path, xpath_descendant_or_self):
try:
tag = self.path[index]
if not isinstance(tag, type("")):
tag = None
else:
index = index + 1
except IndexError:
tag = None # invalid path
for node in nodeset:
new = list(node.getiterator(tag))
if new and new[0] is node:
set.extend(new[1:])
else:
set.extend(new)
else:
for node in nodeset:
for node in node:
if path == "*" or node.tag == path:
set.append(node)
if not set:
return []
nodeset = set
_cache = {}
##
# (Internal) Compile path.
def _compile(path):
p = _cache.get(path)
if p is not None:
return p
p = Path(path)
if len(_cache) >= 100:
_cache.clear()
_cache[path] = p
return p
##
# Find first matching object.
def find(element, path):
return _compile(path).find(element)
##
# Find text for first matching object.
def findtext(element, path, default=None):
return _compile(path).findtext(element, default)
##
# Find all matching objects.
def findall(element, path):
return _compile(path).findall(element)
|
sharma1nitish/phantomjs | refs/heads/master | src/breakpad/src/tools/gyp/test/generator-output/gyptest-top-all.py | 151 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a project hierarchy created when the --generator-output=
option is used to put the build configuration files in a separate
directory tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.writable(test.workpath('src'), False)
test.run_gyp('prog1.gyp',
'-Dset_symroot=1',
'--generator-output=' + test.workpath('gypfiles'),
chdir='src')
test.writable(test.workpath('src/build'), True)
test.writable(test.workpath('src/subdir2/build'), True)
test.writable(test.workpath('src/subdir3/build'), True)
test.build('prog1.gyp', test.ALL, chdir='gypfiles')
chdir = 'gypfiles'
expect = """\
Hello from %s
Hello from inc.h
Hello from inc1/include1.h
Hello from inc2/include2.h
Hello from inc3/include3.h
Hello from subdir2/deeper/deeper.h
"""
if test.format == 'xcode':
chdir = 'src'
test.run_built_executable('prog1', chdir=chdir, stdout=expect % 'prog1.c')
if test.format == 'xcode':
chdir = 'src/subdir2'
test.run_built_executable('prog2', chdir=chdir, stdout=expect % 'prog2.c')
if test.format == 'xcode':
chdir = 'src/subdir3'
test.run_built_executable('prog3', chdir=chdir, stdout=expect % 'prog3.c')
test.pass_test()
|
katiewsimon/JP-Morgan-Hackathon-Project | refs/heads/master | jp_server/lib/python2.7/site-packages/setuptools/command/install_scripts.py | 111 | import distutils.command.install_scripts as orig
from pkg_resources import Distribution, PathMetadata, ensure_directory
import os
from distutils import log
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
from setuptools.command.easy_install import get_script_args
from setuptools.command.easy_install import sys_executable
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
executable = getattr(bs_cmd,'executable',sys_executable)
is_wininst = getattr(
self.get_finalized_command("bdist_wininst"), '_is_running', False
)
for args in get_script_args(dist, executable, is_wininst):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0o777-mask)
|
pscholz/presto | refs/heads/master | python/ffdot_example.py | 2 | import numpy as num
import presto
import ppgplot
from Pgplot import pgpalette
N = 2**14
r = N/4.0 # average freq over "observation"
#r = N/4.0 + 0.5 # average freq over "observation"
rint = num.floor(r)
dr = 1.0/32.0
dz = 0.18
np = 512 # number of pixels across for f-fdot image
z = 0.0 # average fourier f-dot
w = 0.0 # fourier freq double deriv
noise = 0.0
#noise = 4*num.random.standard_normal(N)
us = num.arange(N, dtype=num.float64) / N # normalized time coordinate
r0 = r - 0.5 * z + w / 12.0 # Make symmetric for all z and w
z0 = z - 0.5 * w
phss = 2.0 * num.pi * (us * (us * (us * w/6.0 + z0/2.0) + r0))
ft = presto.rfft(num.cos(phss)+noise)
ffdot = presto.ffdot_plane(ft, rint-np/2*dr, dr, np, 0.0-np/2*dz, dz, np)
pffdot = presto.spectralpower(ffdot.flat)
theo_max_pow = N**2.0/4.0
frp = max(pffdot) / theo_max_pow # Fraction of recovered power
print "Fraction of recovered signal power = %f" % frp
# print "Raw power should be ~%.2e" % theo_max_pow
pffdot = pffdot / theo_max_pow
pffdot.shape = (np, np)
rs = num.arange(np) * dr - np/2*dr
zs = num.arange(np) * dz - np/2*dz
rgx = num.asarray([rs[0], rs[np-1]])
rgy = num.asarray([zs[0], zs[np-1]])
freqcut = pffdot[np/2, :]
fdotcut = pffdot[:, np/2]
image='antirainbow'
device='ffdot_combined.eps/VCPS'
device='/XWIN'
labx='Fourier Frequency Offset (bins)'
laby='Fourier Frequency Derivative (bins)'
contours = num.asarray([0.1, 0.3, 0.5, 0.7, 0.9])
imfract = 0.65
margin = 0.08
ppgplot.pgopen(device)
ppgplot.pgpap(0.0, 1.0)
ppgplot.pgpage()
# Give z and w values and power change
ppgplot.pgsvp(margin+imfract, 1.0-margin/2, margin+imfract, 1.0-margin/2)
ppgplot.pgswin(0.0, 1.0, 0.0, 1.0)
ppgplot.pgtext(0.1, 0.8, "Frac Recovered" % frp)
ppgplot.pgtext(0.2, 0.65, "Power = %.3f" % frp)
ppgplot.pgtext(0.1, 0.4, "signal z = %.1f" % z)
ppgplot.pgtext(0.1, 0.25, "signal w = %.1f" % w)
# freq cut
ppgplot.pgsvp(margin, margin+imfract, margin+imfract, 1.0-margin/2)
ppgplot.pgswin(min(rs), max(rs), -0.1, 1.1)
ppgplot.pgbox("BCST", 0.0, 0, "BCNST", 0.0, 0)
ppgplot.pgline(rs, freqcut)
ppgplot.pgmtxt("L", 2.0, 0.5, 0.5, "Relative Power");
#fdot cut
ppgplot.pgsvp(margin+imfract, 1.0-margin/2, margin, margin+imfract)
ppgplot.pgswin(-0.1, 1.1, min(zs), max(zs))
ppgplot.pgbox("BCNST", 0.0, 0, "BCST", 0.0, 0)
ppgplot.pgline(fdotcut, zs)
ppgplot.pgmtxt("B", 2.4, 0.5, 0.5, "Relative Power");
# f-fdot image
ppgplot.pgsvp(margin, margin+imfract, margin, margin+imfract)
ppgplot.pgswin(min(rs), max(rs), min(zs), max(zs))
ppgplot.pgmtxt("B", 2.4, 0.5, 0.5, labx);
ppgplot.pgmtxt("L", 2.0, 0.5, 0.5, laby);
lo_col_ind, hi_col_ind = ppgplot.pgqcol()
lo_col_ind = lo_col_ind + 2
ppgplot.pgscir(lo_col_ind, hi_col_ind)
pgpalette.setpalette(image)
ppgplot.pgctab(pgpalette.l, pgpalette.r, pgpalette.g, pgpalette.b)
ppgplot.pgimag_s(pffdot, 0.0, 0.0, rgx[0], rgy[0], rgx[1], rgy[1])
ppgplot.pgsci(1)
ppgplot.pgcont_s(pffdot, len(contours), contours, rgx[0], rgy[0], rgx[1], rgy[1])
ppgplot.pgbox("BCST", 0.0, 0, "BCST", 0.0, 0)
ppgplot.pgsci(1)
ppgplot.pgbox("N", 0.0, 0, "N", 0.0, 0)
# gray axes
ppgplot.pgscr(1, 0.5, 0.5, 0.5)
ppgplot.pgsci(1)
ppgplot.pgslw(2)
ppgplot.pgline(rgx, num.asarray([0.0, 0.0]))
ppgplot.pgline(num.asarray([0.0, 0.0]), rgy)
ppgplot.pgclos()
|
cruzegoodin/TSC-ShippingDetails | refs/heads/master | flask/lib/python2.7/site-packages/whoosh/lang/snowball/dutch.py | 96 | from .bases import _StandardStemmer
from whoosh.compat import u
class DutchStemmer(_StandardStemmer):
"""
The Dutch Snowball stemmer.
:cvar __vowels: The Dutch vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step3b_suffixes: Suffixes to be deleted in step 3b of the algorithm.
:type __step3b_suffixes: tuple
:note: A detailed description of the Dutch
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/dutch/stemmer.html
"""
__vowels = u("aeiouy\xE8")
__step1_suffixes = ("heden", "ene", "en", "se", "s")
__step3b_suffixes = ("baar", "lijk", "bar", "end", "ing", "ig")
def stem(self, word):
"""
Stem a Dutch word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
step2_success = False
# Vowel accents are removed.
word = (word.replace(u("\xE4"), "a").replace(u("\xE1"), "a")
.replace(u("\xEB"), "e").replace(u("\xE9"), "e")
.replace(u("\xED"), "i").replace(u("\xEF"), "i")
.replace(u("\xF6"), "o").replace(u("\xF3"), "o")
.replace(u("\xFC"), "u").replace(u("\xFA"), "u"))
# An initial 'y', a 'y' after a vowel,
# and an 'i' between self.__vowels is put into upper case.
# As from now these are treated as consonants.
if word.startswith("y"):
word = "".join(("Y", word[1:]))
for i in range(1, len(word)):
if word[i - 1] in self.__vowels and word[i] == "y":
word = "".join((word[:i], "Y", word[i + 1:]))
for i in range(1, len(word) - 1):
if (word[i - 1] in self.__vowels and word[i] == "i" and
word[i + 1] in self.__vowels):
word = "".join((word[:i], "I", word[i + 1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i - 1] in self.__vowels:
if len(word[:i + 1]) < 3 and len(word[:i + 1]) > 0:
r1 = word[3:]
elif len(word[:i + 1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "heden":
word = "".join((word[:-5], "heid"))
r1 = "".join((r1[:-5], "heid"))
if r2.endswith("heden"):
r2 = "".join((r2[:-5], "heid"))
elif (suffix in ("ene", "en") and
not word.endswith("heden") and
word[-len(suffix) - 1] not in self.__vowels and
word[-len(suffix) - 3:-len(suffix)] != "gem"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif (suffix in ("se", "s") and
word[-len(suffix) - 1] not in self.__vowels and
word[-len(suffix) - 1] != "j"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
if r1.endswith("e") and word[-2] not in self.__vowels:
step2_success = True
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3a
if r2.endswith("heid") and word[-5] != "c":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
if (r1.endswith("en") and word[-3] not in self.__vowels and
word[-5:-2] != "gem"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3b: Derivational suffixes
for suffix in self.__step3b_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ing"):
word = word[:-3]
r2 = r2[:-3]
if r2.endswith("ig") and word[-3] != "e":
word = word[:-2]
else:
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
elif suffix == "ig" and word[-3] != "e":
word = word[:-2]
elif suffix == "lijk":
word = word[:-4]
r1 = r1[:-4]
if r1.endswith("e") and word[-2] not in self.__vowels:
word = word[:-1]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
elif suffix == "baar":
word = word[:-4]
elif suffix == "bar" and step2_success:
word = word[:-3]
break
# STEP 4: Undouble vowel
if len(word) >= 4:
if word[-1] not in self.__vowels and word[-1] != "I":
if word[-3:-1] in ("aa", "ee", "oo", "uu"):
if word[-4] not in self.__vowels:
word = "".join((word[:-3], word[-3], word[-1]))
# All occurrences of 'I' and 'Y' are put back into lower case.
word = word.replace("I", "i").replace("Y", "y")
return word
|
Froggiewalker/geonode | refs/heads/master | geonode/contrib/geosites/utils.py | 21 | import os
import shutil
from django.contrib.sites.models import Site
from django.conf import settings
from .models import SiteResources, SitePeople
def resources_for_site():
return SiteResources.objects.get(site=Site.objects.get_current()).resources.all()
def users_for_site():
return SitePeople.objects.get(site=Site.objects.get_current()).people.all()
def sed(filename, change_dict):
""" Update file replacing key with value in provided dictionary """
f = open(filename, 'r')
data = f.read()
f.close()
for key, val in change_dict.items():
data = data.replace(key, val)
f = open(filename, 'w')
f.write(data)
f.close()
def dump_model(model, filename):
from django.core import serializers
data = serializers.serialize("json", model.objects.all(), indent=4)
f = open(filename, "w")
f.write(data)
f.close()
def add_site(name, domain):
""" Add a site to database, create directory tree """
# get latest SITE id
sites = Site.objects.all()
used_ids = [v[0] for v in sites.values_list()]
site_id = max(used_ids) + 1
# current settings is one of the sites
project_dir = os.path.realpath(os.path.join(settings.SITE_ROOT, '../'))
site_dir = os.path.join(project_dir, 'site%s' % site_id)
site_template = os.path.join(os.path.dirname(__file__), 'site_template')
shutil.copytree(site_template, site_dir)
# update configuration and settings files
change_dict = {
'$SITE_ID': str(site_id),
'$SITE_NAME': name,
'$DOMAIN': domain,
'$SITE_ROOT': site_dir,
'$SERVE_PATH': settings.SERVE_PATH,
'$PORTNUM': '8%s' % str(site_id).zfill(3),
'$GEOSERVER_URL': settings.GEOSERVER_URL,
'$PROJECT_NAME': os.path.basename(os.path.dirname(settings.PROJECT_ROOT)),
}
sed(os.path.join(site_dir, 'conf/gunicorn'), change_dict)
sed(os.path.join(site_dir, 'conf/nginx'), change_dict)
sed(os.path.join(site_dir, 'settings.py'), change_dict)
sed(os.path.join(site_dir, 'local_settings_template.py'), change_dict)
sed(os.path.join(site_dir, 'wsgi.py'), change_dict)
# add site to database
site = Site(id=site_id, name=name, domain=domain)
site.save()
dump_model(Site, os.path.join(project_dir, 'sites.json'))
|
bitcity/django | refs/heads/master | tests/migrations/migrations_test_apps/conflicting_app_with_dependencies/migrations/__init__.py | 12133432 | |
BondAnthony/ansible | refs/heads/devel | test/integration/targets/module_utils/module_utils/spam2/__init__.py | 12133432 | |
frankvdp/django | refs/heads/master | django/conf/locale/sr/__init__.py | 12133432 | |
daenamkim/ansible | refs/heads/devel | test/units/modules/packaging/language/__init__.py | 12133432 | |
banmoy/ns3 | refs/heads/master | .waf-1.8.19-b1fc8f7baef51bd2db4c2971909a568d/waflib/Tools/c_osx.py | 10 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,shutil,platform
from waflib import Task,Utils,Errors
from waflib.TaskGen import taskgen_method,feature,after_method,before_method
app_info='''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd">
<plist version="0.9">
<dict>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Created by Waf</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>NOTE</key>
<string>THIS IS A GENERATED FILE, DO NOT MODIFY</string>
<key>CFBundleExecutable</key>
<string>{app_name}</string>
</dict>
</plist>
'''
@feature('c','cxx')
def set_macosx_deployment_target(self):
if self.env['MACOSX_DEPLOYMENT_TARGET']:
os.environ['MACOSX_DEPLOYMENT_TARGET']=self.env['MACOSX_DEPLOYMENT_TARGET']
elif'MACOSX_DEPLOYMENT_TARGET'not in os.environ:
if Utils.unversioned_sys_platform()=='darwin':
os.environ['MACOSX_DEPLOYMENT_TARGET']='.'.join(platform.mac_ver()[0].split('.')[:2])
@taskgen_method
def create_bundle_dirs(self,name,out):
dir=out.parent.find_or_declare(name)
dir.mkdir()
macos=dir.find_or_declare(['Contents','MacOS'])
macos.mkdir()
return dir
def bundle_name_for_output(out):
name=out.name
k=name.rfind('.')
if k>=0:
name=name[:k]+'.app'
else:
name=name+'.app'
return name
@feature('cprogram','cxxprogram')
@after_method('apply_link')
def create_task_macapp(self):
if self.env['MACAPP']or getattr(self,'mac_app',False):
out=self.link_task.outputs[0]
name=bundle_name_for_output(out)
dir=self.create_bundle_dirs(name,out)
n1=dir.find_or_declare(['Contents','MacOS',out.name])
self.apptask=self.create_task('macapp',self.link_task.outputs,n1)
inst_to=getattr(self,'install_path','/Applications')+'/%s/Contents/MacOS/'%name
self.bld.install_files(inst_to,n1,chmod=Utils.O755)
if getattr(self,'mac_files',None):
mac_files_root=getattr(self,'mac_files_root',None)
if isinstance(mac_files_root,str):
mac_files_root=self.path.find_node(mac_files_root)
if not mac_files_root:
self.bld.fatal('Invalid mac_files_root %r'%self.mac_files_root)
res_dir=n1.parent.parent.make_node('Resources')
inst_to=getattr(self,'install_path','/Applications')+'/%s/Resources'%name
for node in self.to_nodes(self.mac_files):
relpath=node.path_from(mac_files_root or node.parent)
self.create_task('macapp',node,res_dir.make_node(relpath))
self.bld.install_as(os.path.join(inst_to,relpath),node)
if getattr(self,'mac_resources',None):
res_dir=n1.parent.parent.make_node('Resources')
inst_to=getattr(self,'install_path','/Applications')+'/%s/Resources'%name
for x in self.to_list(self.mac_resources):
node=self.path.find_node(x)
if not node:
raise Errors.WafError('Missing mac_resource %r in %r'%(x,self))
parent=node.parent
if os.path.isdir(node.abspath()):
nodes=node.ant_glob('**')
else:
nodes=[node]
for node in nodes:
rel=node.path_from(parent)
self.create_task('macapp',node,res_dir.make_node(rel))
self.bld.install_as(inst_to+'/%s'%rel,node)
if getattr(self.bld,'is_install',None):
self.install_task.hasrun=Task.SKIP_ME
@feature('cprogram','cxxprogram')
@after_method('apply_link')
def create_task_macplist(self):
if self.env['MACAPP']or getattr(self,'mac_app',False):
out=self.link_task.outputs[0]
name=bundle_name_for_output(out)
dir=self.create_bundle_dirs(name,out)
n1=dir.find_or_declare(['Contents','Info.plist'])
self.plisttask=plisttask=self.create_task('macplist',[],n1)
plisttask.context={'app_name':self.link_task.outputs[0].name,'env':self.env}
plist_ctx=getattr(self,'plist_context',None)
if(plist_ctx):
plisttask.context.update(plist_ctx)
if getattr(self,'mac_plist',False):
node=self.path.find_resource(self.mac_plist)
if node:
plisttask.inputs.append(node)
else:
plisttask.code=self.mac_plist
else:
plisttask.code=app_info
inst_to=getattr(self,'install_path','/Applications')+'/%s/Contents/'%name
self.bld.install_files(inst_to,n1)
@feature('cshlib','cxxshlib')
@before_method('apply_link','propagate_uselib_vars')
def apply_bundle(self):
if self.env['MACBUNDLE']or getattr(self,'mac_bundle',False):
self.env['LINKFLAGS_cshlib']=self.env['LINKFLAGS_cxxshlib']=[]
self.env['cshlib_PATTERN']=self.env['cxxshlib_PATTERN']=self.env['macbundle_PATTERN']
use=self.use=self.to_list(getattr(self,'use',[]))
if not'MACBUNDLE'in use:
use.append('MACBUNDLE')
app_dirs=['Contents','Contents/MacOS','Contents/Resources']
class macapp(Task.Task):
color='PINK'
def run(self):
self.outputs[0].parent.mkdir()
shutil.copy2(self.inputs[0].srcpath(),self.outputs[0].abspath())
class macplist(Task.Task):
color='PINK'
ext_in=['.bin']
def run(self):
if getattr(self,'code',None):
txt=self.code
else:
txt=self.inputs[0].read()
context=getattr(self,'context',{})
txt=txt.format(**context)
self.outputs[0].write(txt)
|
sakuramochi0/ticketcamp-scalping | refs/heads/master | ticketcamp_scalping/settings.py | 1 | # -*- coding: utf-8 -*-
# Scrapy settings for ticketcamp_scalping project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'ticketcamp_scalping'
SPIDER_MODULES = ['ticketcamp_scalping.spiders']
NEWSPIDER_MODULE = 'ticketcamp_scalping.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'ticketcamp_scalping (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'ticketcamp_scalping.middlewares.TicketcampScalpingSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'ticketcamp_scalping.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'ticketcamp_scalping.pipelines.TicketcampScalpingPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
forevernull/incubator-airflow | refs/heads/master | airflow/utils/db.py | 13 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
from functools import wraps
import logging
import os
from alembic.config import Config
from alembic import command
from alembic.migration import MigrationContext
from sqlalchemy import event, exc
from sqlalchemy.pool import Pool
from airflow import settings
from airflow import configuration
def provide_session(func):
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
@wraps(func)
def wrapper(*args, **kwargs):
needs_session = False
arg_session = 'session'
func_params = func.__code__.co_varnames
session_in_args = arg_session in func_params and \
func_params.index(arg_session) < len(args)
if not (arg_session in kwargs or session_in_args):
needs_session = True
session = settings.Session()
kwargs[arg_session] = session
result = func(*args, **kwargs)
if needs_session:
session.expunge_all()
session.commit()
session.close()
return result
return wrapper
def pessimistic_connection_handling():
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
'''
Disconnect Handling - Pessimistic, taken from:
http://docs.sqlalchemy.org/en/rel_0_9/core/pooling.html
'''
cursor = dbapi_connection.cursor()
try:
cursor.execute("SELECT 1")
except:
raise exc.DisconnectionError()
cursor.close()
@provide_session
def merge_conn(conn, session=None):
from airflow import models
C = models.Connection
if not session.query(C).filter(C.conn_id == conn.conn_id).first():
session.add(conn)
session.commit()
@event.listens_for(settings.engine, "connect")
def connect(dbapi_connection, connection_record):
connection_record.info['pid'] = os.getpid()
@event.listens_for(settings.engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
if connection_record.info['pid'] != pid:
connection_record.connection = connection_proxy.connection = None
raise exc.DisconnectionError(
"Connection record belongs to pid {}, "
"attempting to check out in pid {}".format(connection_record.info['pid'], pid)
)
def initdb():
session = settings.Session()
from airflow import models
upgradedb()
merge_conn(
models.Connection(
conn_id='airflow_db', conn_type='mysql',
host='localhost', login='root', password='',
schema='airflow'))
merge_conn(
models.Connection(
conn_id='airflow_ci', conn_type='mysql',
host='localhost', login='root',
schema='airflow_ci'))
merge_conn(
models.Connection(
conn_id='beeline_default', conn_type='beeline', port="10000",
host='localhost', extra="{\"use_beeline\": true, \"auth\": \"\"}",
schema='default'))
merge_conn(
models.Connection(
conn_id='bigquery_default', conn_type='bigquery'))
merge_conn(
models.Connection(
conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow', password='airflow',
schema='airflow'))
merge_conn(
models.Connection(
conn_id='presto_default', conn_type='presto',
host='localhost',
schema='hive', port=3400))
merge_conn(
models.Connection(
conn_id='hive_cli_default', conn_type='hive_cli',
schema='default',))
merge_conn(
models.Connection(
conn_id='hiveserver2_default', conn_type='hiveserver2',
host='localhost',
schema='default', port=10000))
merge_conn(
models.Connection(
conn_id='metastore_default', conn_type='hive_metastore',
host='localhost', extra="{\"authMechanism\": \"PLAIN\"}",
port=9083))
merge_conn(
models.Connection(
conn_id='mysql_default', conn_type='mysql',
login='root',
host='localhost'))
merge_conn(
models.Connection(
conn_id='postgres_default', conn_type='postgres',
login='postgres',
schema='airflow',
host='localhost'))
merge_conn(
models.Connection(
conn_id='sqlite_default', conn_type='sqlite',
host='/tmp/sqlite_default.db'))
merge_conn(
models.Connection(
conn_id='http_default', conn_type='http',
host='https://www.google.com/'))
merge_conn(
models.Connection(
conn_id='mssql_default', conn_type='mssql',
host='localhost', port=1433))
merge_conn(
models.Connection(
conn_id='vertica_default', conn_type='vertica',
host='localhost', port=5433))
merge_conn(
models.Connection(
conn_id='webhdfs_default', conn_type='hdfs',
host='localhost', port=50070))
merge_conn(
models.Connection(
conn_id='ssh_default', conn_type='ssh',
host='localhost'))
merge_conn(
models.Connection(
conn_id='fs_default', conn_type='fs',
extra='{"path": "/"}'))
merge_conn(
models.Connection(
conn_id='aws_default', conn_type='aws',
extra='{"region_name": "us-east-1"}'))
merge_conn(
models.Connection(
conn_id='emr_default', conn_type='emr',
extra='''
{ "Name": "default_job_flow_name",
"LogUri": "s3://my-emr-log-bucket/default_job_flow_location",
"ReleaseLabel": "emr-4.6.0",
"Instances": {
"InstanceGroups": [
{
"Name": "Master nodes",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
},
{
"Name": "Slave nodes",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
}
]
},
"Ec2KeyName": "mykey",
"KeepJobFlowAliveWhenNoSteps": false,
"TerminationProtected": false,
"Ec2SubnetId": "somesubnet",
"Applications":[
{ "Name": "Spark" }
],
"VisibleToAllUsers": true,
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
"Tags": [
{
"Key": "app",
"Value": "analytics"
},
{
"Key": "environment",
"Value": "development"
}
]
}
'''))
# Known event types
KET = models.KnownEventType
if not session.query(KET).filter(KET.know_event_type == 'Holiday').first():
session.add(KET(know_event_type='Holiday'))
if not session.query(KET).filter(KET.know_event_type == 'Outage').first():
session.add(KET(know_event_type='Outage'))
if not session.query(KET).filter(
KET.know_event_type == 'Natural Disaster').first():
session.add(KET(know_event_type='Natural Disaster'))
if not session.query(KET).filter(
KET.know_event_type == 'Marketing Campaign').first():
session.add(KET(know_event_type='Marketing Campaign'))
session.commit()
dagbag = models.DagBag()
# Save individual DAGs in the ORM
now = datetime.utcnow()
for dag in dagbag.dags.values():
models.DAG.sync_to_db(dag, dag.owner, now)
# Deactivate the unknown ones
models.DAG.deactivate_unknown_dags(dagbag.dags.keys())
Chart = models.Chart
chart_label = "Airflow task instance by type"
chart = session.query(Chart).filter(Chart.label == chart_label).first()
if not chart:
chart = Chart(
label=chart_label,
conn_id='airflow_db',
chart_type='bar',
x_is_date=False,
sql=(
"SELECT state, COUNT(1) as number "
"FROM task_instance "
"WHERE dag_id LIKE 'example%' "
"GROUP BY state"),
)
session.add(chart)
session.commit()
def upgradedb():
logging.info("Creating tables")
current_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.normpath(os.path.join(current_dir, '..'))
directory = os.path.join(package_dir, 'migrations')
config = Config(os.path.join(package_dir, 'alembic.ini'))
config.set_main_option('script_location', directory)
config.set_main_option('sqlalchemy.url',
configuration.get('core', 'SQL_ALCHEMY_CONN'))
command.upgrade(config, 'heads')
def resetdb():
'''
Clear out the database
'''
from airflow import models
logging.info("Dropping tables that exist")
models.Base.metadata.drop_all(settings.engine)
mc = MigrationContext.configure(settings.engine)
if mc._version.exists(settings.engine):
mc._version.drop(settings.engine)
initdb()
|
Lujeni/ansible | refs/heads/devel | lib/ansible/modules/network/onyx/onyx_qos.py | 28 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_qos
version_added: "2.9"
author: "Anas Badaha (@anasb)"
short_description: Configures QoS
description:
- This module provides declarative management of Onyx QoS configuration
on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.8130
options:
interfaces:
description:
- list of interfaces name.
required: true
trust:
description:
- trust type.
choices: ['L2', 'L3', 'both']
default: L2
rewrite_pcp:
description:
- rewrite with type pcp.
choices: ['enabled', 'disabled']
default: disabled
rewrite_dscp:
description:
- rewrite with type dscp.
choices: ['enabled', 'disabled']
default: disabled
"""
EXAMPLES = """
- name: configure QoS
onyx_QoS:
interfaces:
- Mpo7
- Mpo7
trust: L3
rewrite_pcp: disabled
rewrite_dscp: enabled
- name: configure QoS
onyx_QoS:
interfaces:
- Eth1/1
- Eth1/2
trust: both
rewrite_pcp: disabled
rewrite_dscp: enabled
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- interface ethernet 1/16 qos trust L3
- interface mlag-port-channel 7 qos trust L3
- interface port-channel 1 qos trust L3
- interface mlag-port-channel 7 qos trust L2
- interface mlag-port-channel 7 qos rewrite dscp
- interface ethernet 1/16 qos rewrite pcp
- interface ethernet 1/1 no qos rewrite pcp
"""
import re
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.onyx.onyx import show_cmd
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
class OnyxQosModule(BaseOnyxModule):
TRUST_CMD = "interface {0} {1} qos trust {2}"
NO_REWRITE_PCP_CMD = "interface {0} {1} no qos rewrite pcp"
NO_REWRITE_DSCP_CMD = "interface {0} {1} no qos rewrite dscp"
REWRITE_PCP_CMD = "interface {0} {1} qos rewrite pcp"
REWRITE_DSCP_CMD = "interface {0} {1} qos rewrite dscp"
REWRITE_PCP = "pcp"
REWRITE_DSCP = "dscp"
IF_ETH_REGEX = re.compile(r"^Eth(\d+\/\d+|Eth\d+\/\d+\d+)$")
IF_PO_REGEX = re.compile(r"^Po(\d+)$")
MLAG_NAME_REGEX = re.compile(r"^Mpo(\d+)$")
IF_TYPE_ETH = "ethernet"
PORT_CHANNEL = "port-channel"
MLAG_PORT_CHANNEL = "mlag-port-channel"
IF_TYPE_MAP = {
IF_TYPE_ETH: IF_ETH_REGEX,
PORT_CHANNEL: IF_PO_REGEX,
MLAG_PORT_CHANNEL: MLAG_NAME_REGEX
}
def init_module(self):
""" initialize module
"""
element_spec = dict(
interfaces=dict(type='list', required=True),
trust=dict(choices=['L2', 'L3', 'both'], default='L2'),
rewrite_pcp=dict(choices=['enabled', 'disabled'], default='disabled'),
rewrite_dscp=dict(choices=['enabled', 'disabled'], default='disabled')
)
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(module_params)
self.validate_param_values(self._required_config)
def _get_interface_type(self, if_name):
if_type = None
if_id = None
for interface_type, interface_regex in iteritems(self.IF_TYPE_MAP):
match = interface_regex.match(if_name)
if match:
if_type = interface_type
if_id = match.group(1)
break
return if_type, if_id
def _set_interface_qos_config(self, interface_qos_config, interface, if_type, if_id):
interface_qos_config = interface_qos_config[0].get(interface)
trust = interface_qos_config[0].get("Trust mode")
rewrite_dscp = interface_qos_config[0].get("DSCP rewrite")
rewrite_pcp = interface_qos_config[0].get("PCP,DEI rewrite")
self._current_config[interface] = dict(trust=trust, rewrite_dscp=rewrite_dscp,
rewrite_pcp=rewrite_pcp, if_type=if_type, if_id=if_id)
def _show_interface_qos(self, if_type, interface):
cmd = "show qos interface {0} {1}".format(if_type, interface)
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
self._current_config = dict()
for interface in self._required_config.get("interfaces"):
if_type, if_id = self._get_interface_type(interface)
if not if_id:
self._module.fail_json(
msg='unsupported interface: {0}'.format(interface))
interface_qos_config = self._show_interface_qos(if_type, if_id)
if interface_qos_config is not None:
self._set_interface_qos_config(interface_qos_config, interface, if_type, if_id)
else:
self._module.fail_json(
msg='Interface {0} does not exist on switch'.format(interface))
def generate_commands(self):
trust = self._required_config.get("trust")
rewrite_pcp = self._required_config.get("rewrite_pcp")
rewrite_dscp = self._required_config.get("rewrite_dscp")
for interface in self._required_config.get("interfaces"):
ignored1, ignored2, current_trust, if_type, if_id = self._get_current_rewrite_config(interface)
self._add_interface_trust_cmds(if_type, if_id, interface, trust, current_trust)
self._add_interface_rewrite_cmds(if_type, if_id, interface,
rewrite_pcp, rewrite_dscp)
def _get_current_rewrite_config(self, interface):
current_interface_qos_config = self._current_config.get(interface)
current_rewrite_pcp = current_interface_qos_config.get('rewrite_pcp')
current_rewrite_dscp = current_interface_qos_config.get('rewrite_dscp')
if_type = current_interface_qos_config.get("if_type")
if_id = current_interface_qos_config.get("if_id")
current_trust = current_interface_qos_config.get('trust')
return current_rewrite_pcp, current_rewrite_dscp, current_trust, if_type, if_id
def _add_interface_trust_cmds(self, if_type, if_id, interface, trust, current_trust):
current_rewrite_pcp, current_rewrite_dscp, ignored1, ignored2, ignored3 = self._get_current_rewrite_config(
interface)
if trust == "L3" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_DSCP, current_rewrite_dscp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
elif trust == "L2" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_PCP, current_rewrite_pcp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
elif trust == "both" and trust != current_trust:
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_DSCP, current_rewrite_dscp)
self._add_no_rewrite_cmd(if_type, if_id, interface, self.REWRITE_PCP, current_rewrite_pcp)
self._commands.append(self.TRUST_CMD.format(if_type, if_id, trust))
def _add_interface_rewrite_cmds(self, if_type, if_id, interface, rewrite_pcp, rewrite_dscp):
current_rewrite_pcp, current_rewrite_dscp, ignored1, ignored2, ignored3 = self._get_current_rewrite_config(
interface)
if rewrite_pcp == "enabled" and rewrite_pcp != current_rewrite_pcp:
self._commands.append(self.REWRITE_PCP_CMD.format(if_type, if_id))
elif rewrite_pcp == "disabled" and rewrite_pcp != current_rewrite_pcp:
self._commands.append(self.NO_REWRITE_PCP_CMD.format(if_type, if_id))
if rewrite_dscp == "enabled" and rewrite_dscp != current_rewrite_dscp:
self._commands.append(self.REWRITE_DSCP_CMD.format(if_type, if_id))
elif rewrite_dscp == "disabled" and rewrite_dscp != current_rewrite_dscp:
self._commands.append(self.NO_REWRITE_DSCP_CMD.format(if_type, if_id))
def _add_no_rewrite_cmd(self, if_type, if_id, interface, rewrite_type, current_rewrite):
if rewrite_type == self.REWRITE_PCP and current_rewrite == "enabled":
self._commands.append(self.NO_REWRITE_PCP_CMD.format(if_type, if_id))
self._current_config[interface]["rewrite_pcp"] = "disabled"
elif rewrite_type == self.REWRITE_DSCP and current_rewrite == "enabled":
self._commands.append(self.NO_REWRITE_DSCP_CMD.format(if_type, if_id))
self._current_config[interface]["rewrite_dscp"] = "disabled"
def main():
""" main entry point for module execution
"""
OnyxQosModule.main()
if __name__ == '__main__':
main()
|
ceache/treadmill | refs/heads/master | lib/python/treadmill/services/_linux_base_service.py | 2 | """Linux base service implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import contextlib
import errno
import functools
import logging
import os
import select
import socket
import struct
import time
import six
from treadmill import dirwatch
from treadmill import fs
from treadmill import yamlwrapper as yaml
from treadmill.syscall import eventfd
from . import _base_service
_LOGGER = logging.getLogger(__name__)
#: Name of service status file
_STATUS_SOCK = 'status.sock'
class LinuxResourceService(_base_service.ResourceService):
"""Linux server class for all Treadmill services.
/service_dir/resources/<containerid>-<uid>/ ->
/apps/<containerid>/rsrc/req-<svc_name>/
/apps/<container>/rsrc/<svc_name>/
request.yml
reply.yml
svc_req_id
"""
__slots__ = (
'_io_eventfd',
)
_IO_EVENT_PENDING = struct.pack('@Q', 1)
def __init__(self, service_dir, impl):
super(LinuxResourceService, self).__init__(service_dir, impl)
self._io_eventfd = None
@property
def status_sock(self):
"""status socket of the service.
"""
return os.path.join(self._dir, _STATUS_SOCK)
def status(self, timeout=30):
"""Query the status of the resource service.
:param ``float`` timeout:
Wait at least timeout seconds for the service to reply.
:raises ``ResourceServiceTimeoutError``:
If the requested service does not come up before timeout.
:raises ``socket.error``:
If there is a communication error with the service.
"""
backoff = 0
while backoff <= (timeout / 2):
with contextlib.closing(socket.socket(socket.AF_UNIX,
type=socket.SOCK_STREAM,
proto=0)) as status_socket:
try:
status_socket.connect(self.status_sock)
status = yaml.load(stream=status_socket.makefile('r'))
except socket.error as err:
if err.errno in (errno.ECONNREFUSED, errno.ENOENT):
status = None
else:
raise
if status is not None:
break
_LOGGER.info('Waiting for service %r to become available',
self.name)
# Implement a backoff mechanism
backoff += (backoff or 1)
time.sleep(backoff)
else:
raise _base_service.ResourceServiceTimeoutError(
'Service %r timed out' % (self.name),
)
return status
def _run(self, impl, watchdog_lease):
"""Linux implementation of run.
"""
# Run initialization
impl.initialize(self._dir)
# Create the status socket
ss = self._create_status_socket()
watcher = dirwatch.DirWatcher(self._rsrc_dir)
# Call all the callbacks with the implementation instance
watcher.on_created = functools.partial(self._on_created, impl)
watcher.on_deleted = functools.partial(self._on_deleted, impl)
# NOTE: A modified request is treated as a brand new request
watcher.on_modified = functools.partial(self._on_created, impl)
self._io_eventfd = eventfd.eventfd(0, eventfd.EFD_CLOEXEC)
# Before starting, check the request directory
svcs = self._check_requests()
# and "fake" a created event on all the existing requests
for existing_svcs in svcs:
self._on_created(impl, existing_svcs)
# Before starting, make sure backend state and service state are
# synchronized.
impl.synchronize()
# Report service status
status_info = {}
status_info.update(impl.report_status())
# Setup the poll object
loop_poll = select.poll()
loop_callbacks = {}
base_event_handlers = [
(
self._io_eventfd,
select.POLLIN,
functools.partial(
self._handle_queued_io_events,
watcher=watcher,
impl=impl,
)
),
(
watcher.inotify,
select.POLLIN,
functools.partial(
self._handle_io_events,
watcher=watcher,
impl=impl,
)
),
(
ss,
select.POLLIN,
functools.partial(
self._publish_status,
status_socket=ss,
status_info=status_info,
)
),
]
# Initial collection of implementation' event handlers
impl_event_handlers = impl.event_handlers()
self._update_poll_registration(
loop_poll,
loop_callbacks,
base_event_handlers + impl_event_handlers,
)
loop_timeout = impl.WATCHDOG_HEARTBEAT_SEC // 2
while not self._is_dead:
# Check for events
updated = self._run_events(
loop_poll,
loop_timeout,
loop_callbacks,
)
if updated:
# Report service status
status_info.clear()
status_info.update(impl.report_status())
# Update poll registration if needed
impl_event_handlers = impl.event_handlers()
self._update_poll_registration(
loop_poll, loop_callbacks,
base_event_handlers + impl_event_handlers,
)
# Clean up stale requests
self._check_requests()
# Heartbeat
watchdog_lease.heartbeat()
def _publish_status(self, status_socket, status_info):
"""Publish service status on the incomming connection on socket
"""
with contextlib.closing(status_socket.accept()[0]) as clt:
clt_stream = clt.makefile(mode='w')
try:
yaml.dump(status_info,
explicit_start=True, explicit_end=True,
default_flow_style=False,
stream=clt_stream)
clt_stream.flush()
except socket.error as err:
if err.errno == errno.EPIPE:
pass
else:
raise
@staticmethod
def _run_events(loop_poll, loop_timeout, loop_callbacks):
"""Wait for events up to `loop_timeout` and execute each of the
registered handlers.
:returns ``bool``:
True is any of the callbacks returned True
"""
pending_callbacks = []
try:
# poll timeout is in milliseconds
for (fd, _event) in loop_poll.poll(loop_timeout * 1000):
fd_data = loop_callbacks[fd]
_LOGGER.debug('Event on %r: %r', fd, fd_data)
pending_callbacks.append(
fd_data['callback']
)
except select.error as err:
# Ignore signal interruptions
if six.PY2:
# pylint: disable=W1624,E1136,indexing-exception
if err[0] != errno.EINTR:
raise
else:
if err.errno != errno.EINTR:
raise
results = [
callback()
for callback in pending_callbacks
]
return any(results)
@staticmethod
def _update_poll_registration(poll, poll_callbacks, handlers):
"""Setup the poll object and callbacks based on handlers.
"""
def _normalize_fd(filedescriptor):
"""Return the fd number or filedescriptor.
"""
if isinstance(filedescriptor, int):
# Already a fd number. Use that.
fd = filedescriptor
else:
fd = filedescriptor.fileno()
return fd
handlers = [
(_normalize_fd(fd), events, callback)
for (fd, events, callback) in handlers
]
for (fd, events, callback) in handlers:
fd_data = {'callback': callback, 'events': events}
if fd not in poll_callbacks:
poll.register(fd, events)
poll_callbacks[fd] = fd_data
_LOGGER.debug('Registered %r: %r', fd, fd_data)
elif poll_callbacks[fd] != fd_data:
poll.modify(fd, events)
poll_callbacks[fd] = fd_data
_LOGGER.debug('Updated %r: %r', fd, fd_data)
all_fds = set(handler[0] for handler in handlers)
for fd in list(poll_callbacks.keys()):
if fd not in all_fds:
_LOGGER.debug('Unregistered %r: %r', fd, poll_callbacks[fd])
poll.unregister(fd)
del poll_callbacks[fd]
def clt_update_request(self, req_id):
"""Update an existing request.
This should only be called by the client instance.
"""
_update_request(self._rsrc_dir, req_id)
def _create_status_socket(self):
"""Create a listening socket to process status requests.
"""
fs.rm_safe(self.status_sock)
status_socket = socket.socket(
family=socket.AF_UNIX,
type=socket.SOCK_STREAM,
proto=0
)
status_socket.bind(self.status_sock)
os.chmod(self.status_sock, 0o666)
status_socket.listen(5)
return status_socket
def _handle_queued_io_events(self, watcher, impl):
"""Process queued IO events.
Base service IO event handler (dispatches to on_created/on_deleted.
:returns ``bool``:
``True`` if any of the event handlers returns ``True``.
"""
# Always start by clearing the IO event fd. We will reset it if we need
# below (there is always 8 bytes in a eventfd).
os.read(self._io_eventfd, 8)
return self._handle_io_events(watcher=watcher, impl=impl, resume=True)
def _handle_io_events(self, watcher, impl, resume=False):
"""Process IO events.
Base service IO event handler (dispatches to on_created/on_deleted.
:returns ``bool``:
``True`` if any of the event handlers returns ``True``.
"""
io_res = watcher.process_events(
max_events=impl.MAX_REQUEST_PER_CYCLE,
resume=resume
)
# Check if there were more events to process
if io_res and io_res[-1][0] == dirwatch.DirWatcherEvent.MORE_PENDING:
_LOGGER.debug('More requests events pending')
os.write(self._io_eventfd, self._IO_EVENT_PENDING)
return any(
[
callback_res
for (_, _, callback_res) in
io_res
]
)
class LinuxBaseResourceServiceImpl(_base_service.BaseResourceServiceImpl):
"""Base interface of Resource Service implementations.
"""
__slots__ = ()
@abc.abstractmethod
def report_status(self):
"""Record service status information.
Will be called at least once after initialization is complete.
"""
return {}
def event_handlers(self):
"""Returns a list of `(fileno, event, callback)` to be registered in
the event loop.
"""
return []
def retry_request(self, rsrc_id):
"""Force re-evaluation of a request.
"""
_update_request(self._service_rsrc_dir, rsrc_id)
def _update_request(rsrc_dir, req_id):
"""Update an existing request.
This should only be called by the client instance.
"""
svc_req_lnk = os.path.join(rsrc_dir, req_id)
_LOGGER.debug('Updating %r: %r', req_id, svc_req_lnk)
# Remove any reply if it exists
fs.rm_safe(os.path.join(svc_req_lnk, _base_service.REP_FILE))
# NOTE: This does the equivalent of a touch on the symlink
try:
os.lchown(
svc_req_lnk,
os.getuid(),
os.getgid()
)
except OSError as err:
if err.errno != errno.ENOENT:
raise
|
bwrsandman/OpenUpgrade | refs/heads/8.0 | addons/purchase_analytic_plans/purchase_analytic_plans.py | 378 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class purchase_order_line(osv.osv):
_name='purchase.order.line'
_inherit='purchase.order.line'
_columns = {
'analytics_id':fields.many2one('account.analytic.plan.instance','Analytic Distribution'),
}
class purchase_order(osv.osv):
_name='purchase.order'
_inherit='purchase.order'
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
res = super(purchase_order, self)._prepare_inv_line(cr, uid, account_id, order_line, context=context)
res['analytics_id'] = order_line.analytics_id.id
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
antoinecarme/pyaf | refs/heads/master | tests/artificial/transf_RelativeDifference/trend_MovingAverage/cycle_30/ar_/test_artificial_1024_RelativeDifference_MovingAverage_30__0.py | 1 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 0, ar_order = 0); |
catapult-project/catapult | refs/heads/master | trace_processor/trace_uploader/cloud_config.py | 8 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import logging
from google.appengine.api import app_identity
from google.appengine.ext import ndb
def _is_devserver():
server_software = os.environ.get('SERVER_SOFTWARE', '')
return server_software and server_software.startswith('Development')
_DEFAULT_CATAPULT_PATH = '/catapult'
_DEFAULT_TARGET = 'prod'
if _is_devserver():
_DEFAULT_TARGET = 'test'
_CONFIG_KEY_NAME = 'pi_cloud_mapper_config_%s' % _DEFAULT_TARGET
_DEFAULT_CONTROL_BUCKET_PATH = 'gs://%s/%s' % (
app_identity.get_default_gcs_bucket_name(), _DEFAULT_TARGET)
_DEFAULT_SOURCE_DISK_IMAGE = ('https://www.googleapis.com/compute/v1/projects/'
'debian-cloud/global/images/debian-8-jessie-v20151104')
_GCE_DEFAULT_ZONE = 'us-central1-f'
_GCE_DEFAULT_MACHINE_TYPE = 'n1-standard-1'
class CloudConfig(ndb.Model):
control_bucket_path = ndb.StringProperty(default=_DEFAULT_CONTROL_BUCKET_PATH)
setup_scheme = 'http' if _is_devserver() else 'https'
default_corpus = ndb.StringProperty(
default='%s://%s' % (
setup_scheme, app_identity.get_default_version_hostname()))
urlfetch_service_id = ndb.StringProperty(default='')
gce_project_name = ndb.StringProperty(
default=app_identity.get_application_id())
gce_source_disk_image = ndb.StringProperty(default=_DEFAULT_SOURCE_DISK_IMAGE)
gce_zone = ndb.StringProperty(default=_GCE_DEFAULT_ZONE)
gce_machine_type = ndb.StringProperty(default=_GCE_DEFAULT_MACHINE_TYPE)
trace_upload_bucket = ndb.StringProperty(
default='%s/traces' % app_identity.get_default_gcs_bucket_name())
catapult_path = ndb.StringProperty(default=_DEFAULT_CATAPULT_PATH)
def Get():
config = CloudConfig.get_by_id(_CONFIG_KEY_NAME)
if not config:
logging.warning('CloudConfig found, creating a default one.')
config = CloudConfig(id=_CONFIG_KEY_NAME)
if 'GCS_BUCKET_NAME' in os.environ:
config.trace_upload_bucket = os.environ['GCS_BUCKET_NAME']
config.put()
return config
|
spreeker/democracygame | refs/heads/master | democracy/profiles/models.py | 1 | from django.db import models
from django.contrib.auth.models import User
from gamelogic.models import roles
from gamelogic.models import human_roles
from django.db.models.signals import post_save
class UserProfile(models.Model):
user = models.OneToOneField(User)
score = models.IntegerField(default = 0 )
# game activity
total_for = models.IntegerField(default = 0)
total_against = models.IntegerField(default = 0)
total_blank = models.IntegerField(default = 0)
role = models.CharField(max_length = 30, choices=roles.items() )
# description
title = models.CharField(max_length = 100, blank = True)
description = models.TextField(blank = True)
url = models.URLField(verify_exists = False, blank = True)
#privacy
votes_public = models.BooleanField(default = False)
id_is_verified = models.BooleanField(default = False)
show_identity = models.BooleanField(default = False)
def ranking(self):
return UserProfile.objects.filter(
role__in=human_roles.keys(),
score__gt =self.score).count() + 1
def create_userprofile(sender, **kwargs):
"""
When a User model instance is saved this function is called to create
a UserProfile instance if none exists already. (This function listens for
post_save signals coming from the User model.)
If you create a user anywhere , in the admin or
official registration way , this code will make sure there is a userprofile.
"""
new_user = kwargs['instance']
if kwargs["created"]:
new_profile = UserProfile(user=new_user, score=0, role='citizen')
new_profile.save()
post_save.connect(create_userprofile, sender=User, dispatch_uid="users-profilecreation-signal")
|
aleonliao/depot_tools | refs/heads/master | third_party/boto/gs/bucket.py | 51 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import urllib
import xml.sax
import boto
from boto import handler
from boto.resultset import ResultSet
from boto.exception import InvalidAclError
from boto.gs.acl import ACL, CannedACLStrings
from boto.gs.acl import SupportedPermissions as GSPermissions
from boto.gs.bucketlistresultset import VersionedBucketListResultSet
from boto.gs.cors import Cors
from boto.gs.key import Key as GSKey
from boto.s3.acl import Policy
from boto.s3.bucket import Bucket as S3Bucket
# constants for http query args
DEF_OBJ_ACL = 'defaultObjectAcl'
STANDARD_ACL = 'acl'
CORS_ARG = 'cors'
class Bucket(S3Bucket):
"""Represents a Google Cloud Storage bucket."""
VersioningBody = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<VersioningConfiguration><Status>%s</Status>'
'</VersioningConfiguration>')
WebsiteBody = ('<?xml version="1.0" encoding="UTF-8"?>\n'
'<WebsiteConfiguration>%s%s</WebsiteConfiguration>')
WebsiteMainPageFragment = '<MainPageSuffix>%s</MainPageSuffix>'
WebsiteErrorFragment = '<NotFoundPage>%s</NotFoundPage>'
def __init__(self, connection=None, name=None, key_class=GSKey):
super(Bucket, self).__init__(connection, name, key_class)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'CreationDate':
self.creation_date = value
else:
setattr(self, name, value)
def get_key(self, key_name, headers=None, version_id=None,
response_headers=None, generation=None):
"""Returns a Key instance for an object in this bucket.
Note that this method uses a HEAD request to check for the existence of
the key.
:type key_name: string
:param key_name: The name of the key to retrieve
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/06N3b for details.
:type version_id: string
:param version_id: Unused in this subclass.
:type generation: int
:param generation: A specific generation number to fetch the key at. If
not specified, the latest generation is fetched.
:rtype: :class:`boto.gs.key.Key`
:returns: A Key object from this bucket.
"""
query_args_l = []
if generation:
query_args_l.append('generation=%s' % generation)
if response_headers:
for rk, rv in response_headers.iteritems():
query_args_l.append('%s=%s' % (rk, urllib.quote(rv)))
key, resp = self._get_key_internal(key_name, headers,
query_args_l=query_args_l)
return key
def copy_key(self, new_key_name, src_bucket_name, src_key_name,
metadata=None, src_version_id=None, storage_class='STANDARD',
preserve_acl=False, encrypt_key=False, headers=None,
query_args=None, src_generation=None):
"""Create a new key in the bucket by copying an existing key.
:type new_key_name: string
:param new_key_name: The name of the new key
:type src_bucket_name: string
:param src_bucket_name: The name of the source bucket
:type src_key_name: string
:param src_key_name: The name of the source key
:type src_generation: int
:param src_generation: The generation number of the source key to copy.
If not specified, the latest generation is copied.
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type version_id: string
:param version_id: Unused in this subclass.
:type storage_class: string
:param storage_class: The storage class of the new key. By
default, the new key will use the standard storage class.
Possible values are: STANDARD | DURABLE_REDUCED_AVAILABILITY
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to GCS, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL (or if you have a default ACL set
on the bucket), a value of False will be significantly more
efficient.
:type encrypt_key: bool
:param encrypt_key: Included for compatibility with S3. This argument is
ignored.
:type headers: dict
:param headers: A dictionary of header name/value pairs.
:type query_args: string
:param query_args: A string of additional querystring arguments
to append to the request
:rtype: :class:`boto.gs.key.Key`
:returns: An instance of the newly created key object
"""
if src_generation:
headers = headers or {}
headers['x-goog-copy-source-generation'] = str(src_generation)
return super(Bucket, self).copy_key(
new_key_name, src_bucket_name, src_key_name, metadata=metadata,
storage_class=storage_class, preserve_acl=preserve_acl,
encrypt_key=encrypt_key, headers=headers, query_args=query_args)
def list_versions(self, prefix='', delimiter='', marker='',
generation_marker='', headers=None):
"""
List versioned objects within a bucket. This returns an
instance of an VersionedBucketListResultSet that automatically
handles all of the result paging, etc. from GCS. You just need
to keep iterating until there are no more results. Called
with no arguments, this will return an iterator object across
all keys within the bucket.
:type prefix: string
:param prefix: allows you to limit the listing to a particular
prefix. For example, if you call the method with
prefix='/foo/' then the iterator will only cycle through
the keys that begin with the string '/foo/'.
:type delimiter: string
:param delimiter: can be used in conjunction with the prefix
to allow you to organize and browse your keys
hierarchically. See:
https://developers.google.com/storage/docs/reference-headers#delimiter
for more details.
:type marker: string
:param marker: The "marker" of where you are in the result set
:type generation_marker: string
:param generation_marker: The "generation marker" of where you are in
the result set.
:type headers: dict
:param headers: A dictionary of header name/value pairs.
:rtype:
:class:`boto.gs.bucketlistresultset.VersionedBucketListResultSet`
:return: an instance of a BucketListResultSet that handles paging, etc.
"""
return VersionedBucketListResultSet(self, prefix, delimiter,
marker, generation_marker,
headers)
def delete_key(self, key_name, headers=None, version_id=None,
mfa_token=None, generation=None):
"""
Deletes a key from the bucket.
:type key_name: string
:param key_name: The key name to delete
:type headers: dict
:param headers: A dictionary of header name/value pairs.
:type version_id: string
:param version_id: Unused in this subclass.
:type mfa_token: tuple or list of strings
:param mfa_token: Unused in this subclass.
:type generation: int
:param generation: The generation number of the key to delete. If not
specified, the latest generation number will be deleted.
:rtype: :class:`boto.gs.key.Key`
:returns: A key object holding information on what was
deleted.
"""
query_args_l = []
if generation:
query_args_l.append('generation=%s' % generation)
self._delete_key_internal(key_name, headers=headers,
version_id=version_id, mfa_token=mfa_token,
query_args_l=query_args_l)
def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None,
generation=None, if_generation=None, if_metageneration=None):
"""Sets or changes a bucket's or key's ACL.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type key_name: string
:param key_name: A key name within the bucket to set the ACL for. If not
specified, the ACL for the bucket will be set.
:type headers: dict
:param headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if isinstance(acl_or_str, Policy):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
self.set_xml_acl(acl_or_str.to_xml(), key_name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
else:
self.set_canned_acl(acl_or_str, key_name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_def_acl(self, acl_or_str, headers=None):
"""Sets or changes a bucket's default ACL.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type headers: dict
:param headers: Additional headers to set during the request.
"""
if isinstance(acl_or_str, Policy):
raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
elif isinstance(acl_or_str, ACL):
self.set_def_xml_acl(acl_or_str.to_xml(), headers=headers)
else:
self.set_def_canned_acl(acl_or_str, headers=headers)
def _get_xml_acl_helper(self, key_name, headers, query_args):
"""Provides common functionality for get_xml_acl and _get_acl_helper."""
response = self.connection.make_request('GET', self.name, key_name,
query_args=query_args,
headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
return body
def _get_acl_helper(self, key_name, headers, query_args):
"""Provides common functionality for get_acl and get_def_acl."""
body = self._get_xml_acl_helper(key_name, headers, query_args)
acl = ACL(self)
h = handler.XmlHandler(acl, self)
xml.sax.parseString(body, h)
return acl
def get_acl(self, key_name='', headers=None, version_id=None,
generation=None):
"""Returns the ACL of the bucket or an object in the bucket.
:param str key_name: The name of the object to get the ACL for. If not
specified, the ACL for the bucket will be returned.
:param dict headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned. This parameter is only valid when retrieving
the ACL of an object, not a bucket.
:rtype: :class:`.gs.acl.ACL`
"""
query_args = STANDARD_ACL
if generation:
query_args += '&generation=%s' % generation
return self._get_acl_helper(key_name, headers, query_args)
def get_xml_acl(self, key_name='', headers=None, version_id=None,
generation=None):
"""Returns the ACL string of the bucket or an object in the bucket.
:param str key_name: The name of the object to get the ACL for. If not
specified, the ACL for the bucket will be returned.
:param dict headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned. This parameter is only valid when retrieving
the ACL of an object, not a bucket.
:rtype: str
"""
query_args = STANDARD_ACL
if generation:
query_args += '&generation=%s' % generation
return self._get_xml_acl_helper(key_name, headers, query_args)
def get_def_acl(self, headers=None):
"""Returns the bucket's default ACL.
:param dict headers: Additional headers to set during the request.
:rtype: :class:`.gs.acl.ACL`
"""
return self._get_acl_helper('', headers, DEF_OBJ_ACL)
def _set_acl_helper(self, acl_or_str, key_name, headers, query_args,
generation, if_generation, if_metageneration,
canned=False):
"""Provides common functionality for set_acl, set_xml_acl,
set_canned_acl, set_def_acl, set_def_xml_acl, and
set_def_canned_acl()."""
headers = headers or {}
data = ''
if canned:
headers[self.connection.provider.acl_header] = acl_or_str
else:
data = acl_or_str.encode('UTF-8')
if generation:
query_args += '&generation=%s' % generation
if if_metageneration is not None and if_generation is None:
raise ValueError("Received if_metageneration argument with no "
"if_generation argument. A meta-generation has no "
"meaning without a content generation.")
if not key_name and (if_generation or if_metageneration):
raise ValueError("Received if_generation or if_metageneration "
"parameter while setting the ACL of a bucket.")
if if_generation is not None:
headers['x-goog-if-generation-match'] = str(if_generation)
if if_metageneration is not None:
headers['x-goog-if-metageneration-match'] = str(if_metageneration)
response = self.connection.make_request('PUT', self.name, key_name,
data=data, headers=headers, query_args=query_args)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None,
query_args='acl', generation=None, if_generation=None,
if_metageneration=None):
"""Sets a bucket's or objects's ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type key_name: string
:param key_name: A key name within the bucket to set the ACL for. If not
specified, the ACL for the bucket will be set.
:type headers: dict
:param headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:type query_args: str
:param query_args: The query parameters to pass with the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
return self._set_acl_helper(acl_str, key_name=key_name, headers=headers,
query_args=query_args,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, key_name='', headers=None,
version_id=None, generation=None, if_generation=None,
if_metageneration=None):
"""Sets a bucket's or objects's ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type key_name: string
:param key_name: A key name within the bucket to set the ACL for. If not
specified, the ACL for the bucket will be set.
:type headers: dict
:param headers: Additional headers to set during the request.
:type version_id: string
:param version_id: Unused in this subclass.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if acl_str not in CannedACLStrings:
raise ValueError("Provided canned ACL string (%s) is not valid."
% acl_str)
query_args = STANDARD_ACL
return self._set_acl_helper(acl_str, key_name, headers, query_args,
generation, if_generation,
if_metageneration, canned=True)
def set_def_canned_acl(self, acl_str, headers=None):
"""Sets a bucket's default ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type headers: dict
:param headers: Additional headers to set during the request.
"""
if acl_str not in CannedACLStrings:
raise ValueError("Provided canned ACL string (%s) is not valid."
% acl_str)
query_args = DEF_OBJ_ACL
return self._set_acl_helper(acl_str, '', headers, query_args,
generation=None, if_generation=None,
if_metageneration=None, canned=True)
def set_def_xml_acl(self, acl_str, headers=None):
"""Sets a bucket's default ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
"""
return self.set_xml_acl(acl_str, '', headers,
query_args=DEF_OBJ_ACL)
def get_cors(self, headers=None):
"""Returns a bucket's CORS XML document.
:param dict headers: Additional headers to send with the request.
:rtype: :class:`~.cors.Cors`
"""
response = self.connection.make_request('GET', self.name,
query_args=CORS_ARG,
headers=headers)
body = response.read()
if response.status == 200:
# Success - parse XML and return Cors object.
cors = Cors()
h = handler.XmlHandler(cors, self)
xml.sax.parseString(body, h)
return cors
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def set_cors(self, cors, headers=None):
"""Sets a bucket's CORS XML document.
:param str cors: A string containing the CORS XML.
:param dict headers: Additional headers to send with the request.
"""
cors_xml = cors.encode('UTF-8')
response = self.connection.make_request('PUT', self.name,
data=cors_xml,
query_args=CORS_ARG,
headers=headers)
body = response.read()
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_storage_class(self):
"""
Returns the StorageClass for the bucket.
:rtype: str
:return: The StorageClass for the bucket.
"""
response = self.connection.make_request('GET', self.name,
query_args='storageClass')
body = response.read()
if response.status == 200:
rs = ResultSet(self)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs.StorageClass
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
# Method with same signature as boto.s3.bucket.Bucket.add_email_grant(),
# to allow polymorphic treatment at application layer.
def add_email_grant(self, permission, email_address,
recursive=False, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the GS
account your are granting the permission to.
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
acl = self.get_acl(headers=headers)
acl.add_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_email_grant(permission, email_address, headers=headers)
# Method with same signature as boto.s3.bucket.Bucket.add_user_grant(),
# to allow polymorphic treatment at application layer.
def add_user_grant(self, permission, user_id, recursive=False,
headers=None):
"""
Convenience method that provides a quick way to add a canonical user
grant to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUTs the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ|WRITE|FULL_CONTROL)
:type user_id: string
:param user_id: The canonical user id associated with the GS account
you are granting the permission to.
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
acl = self.get_acl(headers=headers)
acl.add_user_grant(permission, user_id)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_user_grant(permission, user_id, headers=headers)
def add_group_email_grant(self, permission, email_address, recursive=False,
headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a bucket. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GCS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|WRITE|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
:type recursive: bool
:param recursive: A boolean value to controls whether the call
will apply the grant to all keys within the bucket
or not. The default value is False. By passing a
True value, the call will iterate through all keys
in the bucket and apply the same grant to each key.
CAUTION: If you have a lot of keys, this could take
a long time!
"""
if permission not in GSPermissions:
raise self.connection.provider.storage_permissions_error(
'Unknown Permission: %s' % permission)
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
if recursive:
for key in self:
key.add_group_email_grant(permission, email_address,
headers=headers)
# Method with same input signature as boto.s3.bucket.Bucket.list_grants()
# (but returning different object type), to allow polymorphic treatment
# at application layer.
def list_grants(self, headers=None):
"""Returns the ACL entries applied to this bucket.
:param dict headers: Additional headers to send with the request.
:rtype: list containing :class:`~.gs.acl.Entry` objects.
"""
acl = self.get_acl(headers=headers)
return acl.entries
def disable_logging(self, headers=None):
"""Disable logging on this bucket.
:param dict headers: Additional headers to send with the request.
"""
xml_str = '<?xml version="1.0" encoding="UTF-8"?><Logging/>'
self.set_subresource('logging', xml_str, headers=headers)
def enable_logging(self, target_bucket, target_prefix=None, headers=None):
"""Enable logging on a bucket.
:type target_bucket: bucket or string
:param target_bucket: The bucket to log to.
:type target_prefix: string
:param target_prefix: The prefix which should be prepended to the
generated log files written to the target_bucket.
:param dict headers: Additional headers to send with the request.
"""
if isinstance(target_bucket, Bucket):
target_bucket = target_bucket.name
xml_str = '<?xml version="1.0" encoding="UTF-8"?><Logging>'
xml_str = (xml_str + '<LogBucket>%s</LogBucket>' % target_bucket)
if target_prefix:
xml_str = (xml_str +
'<LogObjectPrefix>%s</LogObjectPrefix>' % target_prefix)
xml_str = xml_str + '</Logging>'
self.set_subresource('logging', xml_str, headers=headers)
def configure_website(self, main_page_suffix=None, error_key=None,
headers=None):
"""Configure this bucket to act as a website
:type main_page_suffix: str
:param main_page_suffix: Suffix that is appended to a request that is
for a "directory" on the website endpoint (e.g. if the suffix is
index.html and you make a request to samplebucket/images/ the data
that is returned will be for the object with the key name
images/index.html). The suffix must not be empty and must not
include a slash character. This parameter is optional and the
property is disabled if excluded.
:type error_key: str
:param error_key: The object key name to use when a 400 error occurs.
This parameter is optional and the property is disabled if excluded.
:param dict headers: Additional headers to send with the request.
"""
if main_page_suffix:
main_page_frag = self.WebsiteMainPageFragment % main_page_suffix
else:
main_page_frag = ''
if error_key:
error_frag = self.WebsiteErrorFragment % error_key
else:
error_frag = ''
body = self.WebsiteBody % (main_page_frag, error_frag)
response = self.connection.make_request('PUT', self.name, data=body,
query_args='websiteConfig',
headers=headers)
body = response.read()
if response.status == 200:
return True
else:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
def get_website_configuration(self, headers=None):
"""Returns the current status of website configuration on the bucket.
:param dict headers: Additional headers to send with the request.
:rtype: dict
:returns: A dictionary containing a Python representation
of the XML response from GCS. The overall structure is:
* WebsiteConfiguration
* MainPageSuffix: suffix that is appended to request that
is for a "directory" on the website endpoint.
* NotFoundPage: name of an object to serve when site visitors
encounter a 404.
"""
return self.get_website_configuration_xml(self, headers)[0]
def get_website_configuration_with_xml(self, headers=None):
"""Returns the current status of website configuration on the bucket as
unparsed XML.
:param dict headers: Additional headers to send with the request.
:rtype: 2-Tuple
:returns: 2-tuple containing:
1) A dictionary containing a Python representation of the XML
response from GCS. The overall structure is:
* WebsiteConfiguration
* MainPageSuffix: suffix that is appended to request that is for
a "directory" on the website endpoint.
* NotFoundPage: name of an object to serve when site visitors
encounter a 404
2) Unparsed XML describing the bucket's website configuration.
"""
response = self.connection.make_request('GET', self.name,
query_args='websiteConfig', headers=headers)
body = response.read()
boto.log.debug(body)
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e, body
def delete_website_configuration(self, headers=None):
"""Remove the website configuration from this bucket.
:param dict headers: Additional headers to send with the request.
"""
self.configure_website(headers=headers)
def get_versioning_status(self, headers=None):
"""Returns the current status of versioning configuration on the bucket.
:rtype: bool
"""
response = self.connection.make_request('GET', self.name,
query_args='versioning',
headers=headers)
body = response.read()
boto.log.debug(body)
if response.status != 200:
raise self.connection.provider.storage_response_error(
response.status, response.reason, body)
resp_json = boto.jsonresponse.Element()
boto.jsonresponse.XmlHandler(resp_json, None).parse(body)
resp_json = resp_json['VersioningConfiguration']
return ('Status' in resp_json) and (resp_json['Status'] == 'Enabled')
def configure_versioning(self, enabled, headers=None):
"""Configure versioning for this bucket.
:param bool enabled: If set to True, enables versioning on this bucket.
If set to False, disables versioning.
:param dict headers: Additional headers to send with the request.
"""
if enabled == True:
req_body = self.VersioningBody % ('Enabled')
else:
req_body = self.VersioningBody % ('Suspended')
self.set_subresource('versioning', req_body, headers=headers)
|
gonboy/sl4a | refs/heads/master | python/src/Lib/CGIHTTPServer.py | 59 | """CGI-savvy HTTP Server.
This module builds on SimpleHTTPServer by implementing GET and POST
requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
os.popen2() is used as a fallback, with slightly altered semantics; if
that function is not present either (e.g. on Macintosh), only Python
scripts are supported, and they are executed by the current process.
In all cases, the implementation is intentionally naive -- all
requests are executed sychronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
-- it may execute arbitrary Python code or external programs.
Note that status code 200 is sent prior to execution of a CGI script, so
scripts cannot send other status codes such as 302 (redirect).
"""
__version__ = "0.4"
__all__ = ["CGIHTTPRequestHandler"]
import os
import sys
import urllib
import BaseHTTPServer
import SimpleHTTPServer
import select
class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
have_popen3 = hasattr(os, 'popen3')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script,
and return a boolean.
This function sets self.cgi_info to a tuple (dir, rest)
when it returns True, where dir is the directory part before
the CGI script name. Note that rest begins with a
slash if it is not empty.
The default implementation tests whether the path
begins with one of the strings in the list
self.cgi_directories (and the next character is a '/'
or the end of the string).
"""
path = self.path
for x in self.cgi_directories:
i = len(x)
if path[:i] == x and (not path[i:] or path[i] == '/'):
self.cgi_info = path[:i], path[i+1:]
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
# find an explicit query string, if present.
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
# dissect the part after the directory name into a script name &
# a possible additional path, to be stored in PATH_INFO.
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2 or self.have_popen3):
self.send_error(403, "CGI script is not a Python script (%r)" %
scriptname)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.getheader("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = base64.decodestring(authorization[1])
except binascii.Error:
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.getheader('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
os.environ.update(env)
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, os.environ)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
elif self.have_popen2 or self.have_popen3:
# Windows -- use popen2 or popen3 to create a subprocess
import shutil
if self.have_popen3:
popenx = os.popen3
else:
popenx = os.popen2
cmdline = scriptfile
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = "%s -u %s" % (interp, cmdline)
if '=' not in query and '"' not in query:
cmdline = '%s "%s"' % (cmdline, query)
self.log_message("command: %s", cmdline)
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
files = popenx(cmdline, 'b')
fi = files[0]
fo = files[1]
if self.have_popen3:
fe = files[2]
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
fi.write(data)
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
fi.close()
shutil.copyfileobj(fo, self.wfile)
if self.have_popen3:
errors = fe.read()
fe.close()
if errors:
self.log_error('%s', errors)
sts = fo.close()
if sts:
self.log_error("CGI script exit status %#x", sts)
else:
self.log_message("CGI script exited OK")
else:
# Other O.S. -- execute script in this process
save_argv = sys.argv
save_stdin = sys.stdin
save_stdout = sys.stdout
save_stderr = sys.stderr
try:
save_cwd = os.getcwd()
try:
sys.argv = [scriptfile]
if '=' not in decoded_query:
sys.argv.append(decoded_query)
sys.stdout = self.wfile
sys.stdin = self.rfile
execfile(scriptfile, {"__name__": "__main__"})
finally:
sys.argv = save_argv
sys.stdin = save_stdin
sys.stdout = save_stdout
sys.stderr = save_stderr
os.chdir(save_cwd)
except SystemExit, sts:
self.log_error("CGI script exit status %s", str(sts))
else:
self.log_message("CGI script exited OK")
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0111 != 0
def test(HandlerClass = CGIHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
SimpleHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
|
jbaginski/androguard | refs/heads/master | androguard/decompiler/dad/ast.py | 20 | # This file is part of Androguard.
#
# Copyright (C) 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This file is a simplified version of writer.py that outputs an AST instead of source code.'''
import struct
from androguard.decompiler.dad import basic_blocks, instruction, opcode_ins
def array_access(arr, ind): return ['ArrayAccess', [arr, ind]]
def array_creation(tn, params, dim): return ['ArrayCreation', [tn] + params, dim]
def array_initializer(params, tn=None): return ['ArrayInitializer', params, tn]
def assignment(lhs, rhs, op=''): return ['Assignment', [lhs, rhs], op]
def binary_infix(op, left, right): return ['BinaryInfix', [left, right], op]
def cast(tn, arg): return ['Cast', [tn, arg]]
def field_access(triple, left): return ['FieldAccess', [left], triple]
def literal(result, tt): return ['Literal', result, tt]
def local(name): return ['Local', name]
def method_invocation(triple, name, base, params):
if base is None:
return ['MethodInvocation', params, triple, name, False]
return ['MethodInvocation', [base]+params, triple, name, True]
def parenthesis(expr): return ['Parenthesis', [expr]]
def typen(baset, dim): return ['TypeName', (baset, dim)]
def unary_prefix(op, left): return ['Unary', [left], op, False]
def unary_postfix(left, op): return ['Unary', [left], op, True]
def var_decl(typen, var): return [typen, var]
def dummy(*args): return ['Dummy', args]
################################################################################
def expression_stmt(expr): return ['ExpressionStatement', expr]
def local_decl_stmt(expr, decl): return ['LocalDeclarationStatement', expr, decl]
def return_stmt(expr): return ['ReturnStatement', expr]
def throw_stmt(expr): return ['ThrowStatement', expr]
def jump_stmt(keyword): return ['JumpStatement', keyword, None]
def loop_stmt(isdo, cond_expr, body):
type_ = 'DoStatement' if isdo else 'WhileStatement'
return [type_, None, cond_expr, body]
def try_stmt(tryb, pairs): return ['TryStatement', None, tryb, pairs]
def if_stmt(cond_expr, scopes): return ['IfStatement', None, cond_expr, scopes]
def switch_stmt(cond_expr, ksv_pairs):
return ['SwitchStatement', None, cond_expr, ksv_pairs]
# Create empty statement block (statements to be appended later)
# Note, the code below assumes this can be modified in place
def statement_block(): return ['BlockStatement', None, []]
# Add a statement to the end of a statement block
def _append(sb, stmt):
assert(sb[0] == 'BlockStatement')
if stmt is not None:
sb[2].append(stmt)
################################################################################
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
}
def parse_descriptor(desc):
dim = 0
while desc and desc[0] == '[':
desc = desc[1:]
dim += 1
if desc in TYPE_DESCRIPTOR:
return typen('.'+TYPE_DESCRIPTOR[desc], dim)
if desc and desc[0] == 'L' and desc[-1] == ';':
return typen(desc[1:-1], dim)
# invalid descriptor (probably None)
return dummy(str(desc))
# Note: the literal_foo functions (and dummy) are also imported by decompile.py
def literal_string(s):
escapes = {
'\0':'\\0',
'\t':'\\t',
'\r':'\\r',
'\n':'\\n',
'"':'\\"',
'\\':'\\\\'
}
buf = ['"']
for c in s.decode('utf8'):
if c in escapes:
buf.append(escapes[c])
elif ' ' <= c < '\x7f':
buf.append(c)
else:
buf.append('\u{:04x}'.format(ord(c)))
buf.append('"')
return literal(''.join(buf), ('java/lang/String', 0))
def literal_class(desc):
return literal(parse_descriptor(desc), ('java/lang/Class', 0))
def literal_bool(b): return literal(str(b).lower(), ('.boolean', 0))
def literal_int(b): return literal(str(b), ('.int', 0))
def literal_hex_int(b): return literal(hex(b), ('.int', 0))
def literal_long(b): return literal(str(b)+'L', ('.long', 0))
def literal_float(f): return literal(str(f)+'f', ('.float', 0))
def literal_double(f): return literal(str(f), ('.double', 0))
def literal_null(): return literal('null', ('.null', 0))
def visit_decl(var, init_expr=None):
t = parse_descriptor(var.get_type())
v = local('v{}'.format(var.name))
return local_decl_stmt(init_expr, var_decl(t, v))
def visit_arr_data(value):
data = value.get_data()
tab = []
elem_size = value.element_width
if elem_size == 4:
for i in range(0, value.size * 4, 4):
tab.append(struct.unpack('<i', data[i:i + 4])[0])
else: # FIXME: other cases
for i in range(value.size):
tab.append(struct.unpack('<b', data[i])[0])
return array_initializer(map(literal_int, tab))
def write_inplace_if_possible(lhs, rhs):
if isinstance(rhs, instruction.BinaryExpression) and lhs == rhs.var_map[rhs.arg1]:
exp_rhs = rhs.var_map[rhs.arg2]
# post increment/decrement
if rhs.op in '+-' and isinstance(exp_rhs, instruction.Constant) and exp_rhs.get_int_value() == 1:
return unary_postfix(visit_expr(lhs), rhs.op * 2)
# compound assignment
return assignment(visit_expr(lhs), visit_expr(exp_rhs), op=rhs.op)
return assignment(visit_expr(lhs), visit_expr(rhs))
def visit_expr(op):
if isinstance(op, instruction.ArrayLengthExpression):
expr = visit_expr(op.var_map[op.array])
return field_access([None, 'length', None], expr)
if isinstance(op, instruction.ArrayLoadExpression):
array_expr = visit_expr(op.var_map[op.array])
index_expr = visit_expr(op.var_map[op.idx])
return array_access(array_expr, index_expr)
if isinstance(op, instruction.ArrayStoreInstruction):
array_expr = visit_expr(op.var_map[op.array])
index_expr = visit_expr(op.var_map[op.index])
rhs = visit_expr(op.var_map[op.rhs])
return assignment(array_access(array_expr, index_expr), rhs)
if isinstance(op, instruction.AssignExpression):
lhs = op.var_map.get(op.lhs)
rhs = op.rhs
if lhs is None:
return visit_expr(rhs)
return write_inplace_if_possible(lhs, rhs)
if isinstance(op, instruction.BaseClass):
if op.clsdesc is None:
assert(op.cls == "super")
return local(op.cls)
return parse_descriptor(op.clsdesc)
if isinstance(op, instruction.BinaryExpression):
lhs = op.var_map.get(op.arg1)
rhs = op.var_map.get(op.arg2)
expr = binary_infix(op.op, visit_expr(lhs), visit_expr(rhs))
if not isinstance(op, instruction.BinaryCompExpression):
expr = parenthesis(expr)
return expr
if isinstance(op, instruction.CheckCastExpression):
lhs = op.var_map.get(op.arg)
return parenthesis(cast(parse_descriptor(op.clsdesc), visit_expr(lhs)))
if isinstance(op, instruction.ConditionalExpression):
lhs = op.var_map.get(op.arg1)
rhs = op.var_map.get(op.arg2)
return binary_infix(op.op, visit_expr(lhs), visit_expr(rhs))
if isinstance(op, instruction.ConditionalZExpression):
arg = op.var_map[op.arg]
if isinstance(arg, instruction.BinaryCompExpression):
arg.op = op.op
return visit_expr(arg)
expr = visit_expr(arg)
atype = arg.get_type()
if atype == 'Z':
if op.op == opcode_ins.Op.EQUAL:
expr = unary_prefix('!', expr)
elif atype in 'VBSCIJFD':
expr = binary_infix(op.op, expr, literal_int(0))
else:
expr = binary_infix(op.op, expr, literal_null())
return expr
if isinstance(op, instruction.Constant):
if op.type == 'Ljava/lang/String;':
return literal_string(op.cst)
elif op.type == 'Z':
return literal_bool(op.cst == 0)
elif op.type in 'ISCB':
return literal_int(op.cst2)
elif op.type in 'J':
return literal_long(op.cst2)
elif op.type in 'F':
return literal_float(op.cst)
elif op.type in 'D':
return literal_double(op.cst)
elif op.type == 'Ljava/lang/Class;':
return literal_class(op.clsdesc)
return dummy('???')
if isinstance(op, instruction.FillArrayExpression):
array_expr = visit_expr(op.var_map[op.reg])
rhs = visit_arr_data(op.value)
return assignment(array_expr, rhs)
if isinstance(op, instruction.FilledArrayExpression):
tn = parse_descriptor(op.type)
params = [visit_expr(op.var_map[x]) for x in op.args]
return array_initializer(params, tn)
if isinstance(op, instruction.InstanceExpression):
triple = op.clsdesc[1:-1], op.name, op.ftype
expr = visit_expr(op.var_map[op.arg])
return field_access(triple, expr)
if isinstance(op, instruction.InstanceInstruction):
triple = op.clsdesc[1:-1], op.name, op.atype
lhs = field_access(triple, visit_expr(op.var_map[op.lhs]))
rhs = visit_expr(op.var_map[op.rhs])
return assignment(lhs, rhs)
if isinstance(op, instruction.InvokeInstruction):
base = op.var_map[op.base]
params = [op.var_map[arg] for arg in op.args]
params = map(visit_expr, params)
if op.name == '<init>':
if isinstance(base, instruction.ThisParam):
return method_invocation(op.triple, 'this', None, params)
elif isinstance(base, instruction.NewInstance):
return ['ClassInstanceCreation', params, parse_descriptor(base.type)]
else:
assert(isinstance(base, instruction.Variable))
# fallthrough to create dummy <init> call
return method_invocation(op.triple, op.name, visit_expr(base), params)
# for unmatched monitor instructions, just create dummy expressions
if isinstance(op, instruction.MonitorEnterExpression):
return dummy("monitor enter(", visit_expr(op.var_map[op.ref]), ")")
if isinstance(op, instruction.MonitorExitExpression):
return dummy("monitor exit(", visit_expr(op.var_map[op.ref]), ")")
if isinstance(op, instruction.MoveExpression):
lhs = op.var_map.get(op.lhs)
rhs = op.var_map.get(op.rhs)
return write_inplace_if_possible(lhs, rhs)
if isinstance(op, instruction.MoveResultExpression):
lhs = op.var_map.get(op.lhs)
rhs = op.var_map.get(op.rhs)
return assignment(visit_expr(lhs), visit_expr(rhs))
if isinstance(op, instruction.NewArrayExpression):
tn = parse_descriptor(op.type[1:])
expr = visit_expr(op.var_map[op.size])
return array_creation(tn, [expr], 1)
# create dummy expression for unmatched newinstance
if isinstance(op, instruction.NewInstance):
return dummy("new ", parse_descriptor(op.type))
if isinstance(op, instruction.Param):
if isinstance(op, instruction.ThisParam):
return local('this')
return local('p{}'.format(op.v))
if isinstance(op, instruction.StaticExpression):
triple = op.clsdesc[1:-1], op.name, op.ftype
return field_access(triple, parse_descriptor(op.clsdesc))
if isinstance(op, instruction.StaticInstruction):
triple = op.clsdesc[1:-1], op.name, op.ftype
lhs = field_access(triple, parse_descriptor(op.clsdesc))
rhs = visit_expr(op.var_map[op.rhs])
return assignment(lhs, rhs)
if isinstance(op, instruction.SwitchExpression):
return visit_expr(op.var_map[op.src])
if isinstance(op, instruction.UnaryExpression):
lhs = op.var_map.get(op.arg)
if isinstance(op, instruction.CastExpression):
expr = cast(parse_descriptor(op.clsdesc), visit_expr(lhs))
else:
expr = unary_prefix(op.op, visit_expr(lhs))
return parenthesis(expr)
if isinstance(op, instruction.Variable):
# assert(op.declared)
return local('v{}'.format(op.name))
return dummy('???')
def visit_ins(op, isCtor=False):
if isinstance(op, instruction.ReturnInstruction):
expr = None if op.arg is None else visit_expr(op.var_map[op.arg])
return return_stmt(expr)
elif isinstance(op, instruction.ThrowExpression):
return throw_stmt(visit_expr(op.var_map[op.ref]))
elif isinstance(op, instruction.NopExpression):
return None
# Local var decl statements
if isinstance(op, (instruction.AssignExpression, instruction.MoveExpression, instruction.MoveResultExpression)):
lhs = op.var_map.get(op.lhs)
rhs = op.rhs if isinstance(op, instruction.AssignExpression) else op.var_map.get(op.rhs)
if isinstance(lhs, instruction.Variable) and not lhs.declared:
lhs.declared = True
expr = visit_expr(rhs)
return visit_decl(lhs, expr)
# skip this() at top of constructors
if isCtor and isinstance(op, instruction.AssignExpression):
op2 = op.rhs
if op.lhs is None and isinstance(op2, instruction.InvokeInstruction):
if op2.name == '<init>' and len(op2.args) == 0:
if isinstance(op2.var_map[op2.base], instruction.ThisParam):
return None
# MoveExpression is skipped when lhs = rhs
if isinstance(op, instruction.MoveExpression):
if op.var_map.get(op.lhs) is op.var_map.get(op.rhs):
return None
return expression_stmt(visit_expr(op))
class JSONWriter(object):
def __init__(self, graph, method):
self.graph = graph
self.method = method
self.visited_nodes = set()
self.loop_follow = [None]
self.if_follow = [None]
self.switch_follow = [None]
self.latch_node = [None]
self.try_follow = [None]
self.next_case = None
self.need_break = True
self.constructor = False
self.context = []
# This class is created as a context manager so that it can be used like
# with self as foo:
# ...
# which pushes a statement block on to the context stack and assigns it to foo
# within the with block, all added instructions will be added to foo
def __enter__(self):
self.context.append(statement_block())
return self.context[-1]
def __exit__(self, *args):
self.context.pop()
return False
# Add a statement to the current context
def add(self, val): _append(self.context[-1], val)
def visit_ins(self, op):
self.add(visit_ins(op, isCtor=self.constructor))
# Note: this is a mutating operation
def get_ast(self):
m = self.method
flags = m.access
if 'constructor' in flags:
flags.remove('constructor')
self.constructor = True
params = m.lparams[:]
if 'static' not in m.access:
params = params[1:]
# DAD doesn't create any params for abstract methods
if len(params) != len(m.params_type):
assert('abstract' in flags or 'native' in flags)
assert(not params)
params = range(len(m.params_type))
paramdecls = []
for ptype, name in zip(m.params_type, params):
t = parse_descriptor(ptype)
v = local('p{}'.format(name))
paramdecls.append(var_decl(t, v))
if self.graph is None:
body = None
else:
with self as body:
self.visit_node(self.graph.entry)
return {
'triple': m.triple,
'flags': flags,
'ret': parse_descriptor(m.type),
'params': paramdecls,
'comments': [],
'body': body,
}
def _visit_condition(self, cond):
if cond.isnot:
cond.cond1.neg()
left = parenthesis(self.get_cond(cond.cond1))
right = parenthesis(self.get_cond(cond.cond2))
op = '&&' if cond.isand else '||'
res = binary_infix(op, left, right)
return res
def get_cond(self, node):
if isinstance(node, basic_blocks.ShortCircuitBlock):
return self._visit_condition(node.cond)
elif isinstance(node, basic_blocks.LoopBlock):
return self.get_cond(node.cond)
else:
assert(type(node) == basic_blocks.CondBlock)
assert(len(node.ins) == 1)
return visit_expr(node.ins[-1])
def visit_node(self, node):
if node in (self.if_follow[-1], self.switch_follow[-1],
self.loop_follow[-1], self.latch_node[-1],
self.try_follow[-1]):
return
if not node.type.is_return and node in self.visited_nodes:
return
self.visited_nodes.add(node)
for var in node.var_to_declare:
if not var.declared:
self.add(visit_decl(var))
var.declared = True
node.visit(self)
def visit_loop_node(self, loop):
isDo = cond_expr = body = None
follow = loop.follow['loop']
if loop.looptype.is_pretest:
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
isDo = False
cond_expr = self.get_cond(loop)
elif loop.looptype.is_posttest:
isDo = True
self.latch_node.append(loop.latch)
elif loop.looptype.is_endless:
isDo = False
cond_expr = literal_bool(True)
with self as body:
self.loop_follow.append(follow)
if loop.looptype.is_pretest:
self.visit_node(loop.true)
else:
self.visit_node(loop.cond)
self.loop_follow.pop()
if loop.looptype.is_pretest:
pass
elif loop.looptype.is_posttest:
self.latch_node.pop()
cond_expr = self.get_cond(loop.latch)
else:
self.visit_node(loop.latch)
assert(cond_expr is not None and isDo is not None)
self.add(loop_stmt(isDo, cond_expr, body))
if follow is not None:
self.visit_node(follow)
def visit_cond_node(self, cond):
cond_expr = None
scopes = []
follow = cond.follow['if']
if cond.false is cond.true:
self.add(expression_stmt(self.get_cond(cond)))
self.visit_node(cond.true)
return
if cond.false is self.loop_follow[-1]:
cond.neg()
cond.true, cond.false = cond.false, cond.true
if self.loop_follow[-1] in (cond.true, cond.false):
cond_expr = self.get_cond(cond)
with self as scope:
self.add(jump_stmt('break'))
scopes.append(scope)
with self as scope:
self.visit_node(cond.false)
scopes.append(scope)
self.add(if_stmt(cond_expr, scopes))
elif follow is not None:
if cond.true in (follow, self.next_case) or\
cond.num > cond.true.num:
# or cond.true.num > cond.false.num:
cond.neg()
cond.true, cond.false = cond.false, cond.true
self.if_follow.append(follow)
if cond.true: # in self.visited_nodes:
cond_expr = self.get_cond(cond)
with self as scope:
self.visit_node(cond.true)
scopes.append(scope)
is_else = not (follow in (cond.true, cond.false))
if is_else and not cond.false in self.visited_nodes:
with self as scope:
self.visit_node(cond.false)
scopes.append(scope)
self.if_follow.pop()
self.add(if_stmt(cond_expr, scopes))
self.visit_node(follow)
else:
cond_expr = self.get_cond(cond)
with self as scope:
self.visit_node(cond.true)
scopes.append(scope)
with self as scope:
self.visit_node(cond.false)
scopes.append(scope)
self.add(if_stmt(cond_expr, scopes))
def visit_switch_node(self, switch):
lins = switch.get_ins()
for ins in lins[:-1]:
self.visit_ins(ins)
switch_ins = switch.get_ins()[-1]
cond_expr = visit_expr(switch_ins)
ksv_pairs = []
follow = switch.follow['switch']
cases = switch.cases
self.switch_follow.append(follow)
default = switch.default
for i, node in enumerate(cases):
if node in self.visited_nodes:
continue
cur_ks = switch.node_to_case[node][:]
if i + 1 < len(cases):
self.next_case = cases[i + 1]
else:
self.next_case = None
if node is default:
cur_ks.append(None)
default = None
with self as body:
self.visit_node(node)
if self.need_break:
self.add(jump_stmt('break'))
else:
self.need_break = True
ksv_pairs.append((cur_ks, body))
if default not in (None, follow):
with self as body:
self.visit_node(default)
ksv_pairs.append(([None], body))
self.add(switch_stmt(cond_expr, ksv_pairs))
self.switch_follow.pop()
self.visit_node(follow)
def visit_statement_node(self, stmt):
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs) == 1:
if sucs[0] is self.loop_follow[-1]:
self.add(jump_stmt('break'))
elif sucs[0] is self.next_case:
self.need_break = False
else:
self.visit_node(sucs[0])
def visit_try_node(self, try_node):
with self as tryb:
self.try_follow.append(try_node.follow)
self.visit_node(try_node.try_start)
pairs = []
for catch_node in try_node.catch:
if catch_node.exception_ins:
ins = catch_node.exception_ins
assert(isinstance(ins, instruction.MoveExceptionExpression))
var = ins.var_map[ins.ref]
var.declared = True
ctype = var.get_type()
name = 'v{}'.format(var.name)
else:
ctype = catch_node.catch_type
name = '_'
catch_decl = var_decl(parse_descriptor(ctype), local(name))
with self as body:
self.visit_node(catch_node.catch_start)
pairs.append((catch_decl, body))
self.add(try_stmt(tryb, pairs))
self.visit_node(self.try_follow.pop())
def visit_return_node(self, ret):
self.need_break = False
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_throw_node(self, throw):
for ins in throw.get_ins():
self.visit_ins(ins) |
muku42/bokeh | refs/heads/master | bokeh/sampledata/sprint.py | 45 | from __future__ import absolute_import, print_function
from os.path import dirname, join
try:
import pandas as pd
except ImportError as e:
raise RuntimeError("sprint data requires pandas (http://pandas.pydata.org) to be installed")
sprint = pd.read_csv(join(dirname(__file__), 'sprint.csv'), skipinitialspace=True, escapechar="\\")
|
nanolearningllc/edx-platform-cypress-2 | refs/heads/master | lms/djangoapps/certificates/migrations/0009_auto__del_field_generatedcertificate_graded_download_url__del_field_ge.py | 188 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'GeneratedCertificate.graded_download_url'
db.delete_column('certificates_generatedcertificate', 'graded_download_url')
# Deleting field 'GeneratedCertificate.graded_certificate_id'
db.delete_column('certificates_generatedcertificate', 'graded_certificate_id')
# Adding field 'GeneratedCertificate.distinction'
db.add_column('certificates_generatedcertificate', 'distinction',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding unique constraint on 'GeneratedCertificate', fields ['course_id', 'user']
db.create_unique('certificates_generatedcertificate', ['course_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'GeneratedCertificate', fields ['course_id', 'user']
db.delete_unique('certificates_generatedcertificate', ['course_id', 'user_id'])
# Adding field 'GeneratedCertificate.graded_download_url'
db.add_column('certificates_generatedcertificate', 'graded_download_url',
self.gf('django.db.models.fields.CharField')(default=False, max_length=128),
keep_default=False)
# Adding field 'GeneratedCertificate.graded_certificate_id'
db.add_column('certificates_generatedcertificate', 'graded_certificate_id',
self.gf('django.db.models.fields.CharField')(default=False, max_length=32),
keep_default=False)
# Deleting field 'GeneratedCertificate.distinction'
db.delete_column('certificates_generatedcertificate', 'distinction')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '32'}),
'course_id': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '255'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '128'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grade': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
|
ecino/compassion-switzerland | refs/heads/10.0-emanuel | sync_mail_multi_attach/controllers/__init__.py | 3 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
# Copyright (C) 2011-today Synconics Technologies Pvt. Ltd. (<http://www.synconics.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import main
|
kvar/ansible | refs/heads/seas_master_2.9.5 | lib/ansible/modules/network/fortios/fortios_web_proxy_profile.py | 13 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_web_proxy_profile
short_description: Configure web proxy profiles in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify web_proxy feature and profile category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
web_proxy_profile:
description:
- Configure web proxy profiles.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
header_client_ip:
description:
- "Action to take on the HTTP client-IP header in forwarded requests: forwards (pass), adds, or removes the HTTP header."
type: str
choices:
- pass
- add
- remove
header_front_end_https:
description:
- "Action to take on the HTTP front-end-HTTPS header in forwarded requests: forwards (pass), adds, or removes the HTTP header."
type: str
choices:
- pass
- add
- remove
header_via_request:
description:
- "Action to take on the HTTP via header in forwarded requests: forwards (pass), adds, or removes the HTTP header."
type: str
choices:
- pass
- add
- remove
header_via_response:
description:
- "Action to take on the HTTP via header in forwarded responses: forwards (pass), adds, or removes the HTTP header."
type: str
choices:
- pass
- add
- remove
header_x_authenticated_groups:
description:
- "Action to take on the HTTP x-authenticated-groups header in forwarded requests: forwards (pass), adds, or removes the HTTP header."
type: str
choices:
- pass
- add
- remove
header_x_authenticated_user:
description:
- "Action to take on the HTTP x-authenticated-user header in forwarded requests: forwards (pass), adds, or removes the HTTP header."
type: str
choices:
- pass
- add
- remove
header_x_forwarded_for:
description:
- "Action to take on the HTTP x-forwarded-for header in forwarded requests: forwards (pass), adds, or removes the HTTP header."
type: str
choices:
- pass
- add
- remove
headers:
description:
- Configure HTTP forwarded requests headers.
type: list
suboptions:
action:
description:
- Action when HTTP the header forwarded.
type: str
choices:
- add-to-request
- add-to-response
- remove-from-request
- remove-from-response
content:
description:
- HTTP header's content.
type: str
id:
description:
- HTTP forwarded header id.
required: true
type: int
name:
description:
- HTTP forwarded header name.
type: str
log_header_change:
description:
- Enable/disable logging HTTP header changes.
type: str
choices:
- enable
- disable
name:
description:
- Profile name.
required: true
type: str
strip_encoding:
description:
- Enable/disable stripping unsupported encoding from the request header.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure web proxy profiles.
fortios_web_proxy_profile:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
web_proxy_profile:
header_client_ip: "pass"
header_front_end_https: "pass"
header_via_request: "pass"
header_via_response: "pass"
header_x_authenticated_groups: "pass"
header_x_authenticated_user: "pass"
header_x_forwarded_for: "pass"
headers:
-
action: "add-to-request"
content: "<your_own_value>"
id: "13"
name: "default_name_14"
log_header_change: "enable"
name: "default_name_16"
strip_encoding: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_web_proxy_profile_data(json):
option_list = ['header_client_ip', 'header_front_end_https', 'header_via_request',
'header_via_response', 'header_x_authenticated_groups', 'header_x_authenticated_user',
'header_x_forwarded_for', 'headers', 'log_header_change',
'name', 'strip_encoding']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def web_proxy_profile(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['web_proxy_profile'] and data['web_proxy_profile']:
state = data['web_proxy_profile']['state']
else:
state = True
web_proxy_profile_data = data['web_proxy_profile']
filtered_data = underscore_to_hyphen(filter_web_proxy_profile_data(web_proxy_profile_data))
if state == "present":
return fos.set('web-proxy',
'profile',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('web-proxy',
'profile',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_web_proxy(data, fos):
if data['web_proxy_profile']:
resp = web_proxy_profile(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"web_proxy_profile": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"header_client_ip": {"required": False, "type": "str",
"choices": ["pass", "add", "remove"]},
"header_front_end_https": {"required": False, "type": "str",
"choices": ["pass", "add", "remove"]},
"header_via_request": {"required": False, "type": "str",
"choices": ["pass", "add", "remove"]},
"header_via_response": {"required": False, "type": "str",
"choices": ["pass", "add", "remove"]},
"header_x_authenticated_groups": {"required": False, "type": "str",
"choices": ["pass", "add", "remove"]},
"header_x_authenticated_user": {"required": False, "type": "str",
"choices": ["pass", "add", "remove"]},
"header_x_forwarded_for": {"required": False, "type": "str",
"choices": ["pass", "add", "remove"]},
"headers": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["add-to-request", "add-to-response", "remove-from-request",
"remove-from-response"]},
"content": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"}
}},
"log_header_change": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"name": {"required": True, "type": "str"},
"strip_encoding": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_web_proxy(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_web_proxy(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
joelowj/NeuralTest | refs/heads/master | neuraltest/main.py | 2 | import os
import tensorflow as tf
import argparse
from cgan import cgan
from utils import *
from function_test import *
def parse_args():
description = "Generative Adversarial Networks for Software Testing"
parser = argparse.ArgumentParser(description=description)
# parser.add_argument('--gan_type', type=str, default='CGAN',
# choices=['CGAN'],
# help='The type of GAN',
# required=True)
parser.add_argument('--epoch', type=int, default=100,
help='The number of epoch to run')
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='The size of batch')
parser.add_argument(
'--checkpoint_dir',
type=str,
default='checkpoint',
help='Directory name to save the checkpoints')
parser.add_argument(
'--result_dir',
type=str,
default='results',
help='Directory name to save the generated vectors')
parser.add_argument(
'--log-dir',
type=str,
default='logs',
help='Directory name to save training logs')
return check_args(parser.parse_args())
def check_args(args):
# --checkpoint_dir
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
# --result_dir
if not os.path.exists(args.result_dir):
os.makedirs(args.result_dir)
# --epoch
try:
assert args.epoch >= 1
except BaseException:
print('Number of epochs must be greater than or equal to one...')
# --batch_size
try:
assert args.batch_size >= 1
except BaseException:
print('Batch size must be larger than or equal to one...')
return args
def main():
# parse arguments
args = parse_args()
if args is None:
exit()
predicate1 = [predicate_100, predicate_101, predicate_102, predicate_103, predicate_104, predicate_105,
predicate_106, predicate_107, predicate_108, predicate_109, predicate_110, predicate_111, predicate_113]
predicate2 = [predicate_200, predicate_201, predicate_202, predicate_203, predicate_204,
predicate_205, predicate_206, predicate_207, predicate_208, predicate_209, predicate_210]
predicate3 = [predicate_300, predicate_301, predicate_302]
# predicate1 = [predicate_113]
# predicate2 = []
# predicate3 = []
predicate_array = []
predicate_array.append(predicate1)
predicate_array.append(predicate2)
predicate_array.append(predicate3)
neurons_per_layer = [512, 256, 64]
# open session
for i in range(len(predicate_array)):
for predicate in predicate_array[i]:
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
gan = cgan(
sess,
epoch=args.epoch,
batch_size=args.batch_size,
predicate = predicate,
neurons_per_layer = neurons_per_layer,
checkpoint_dir=args.checkpoint_dir,
result_dir=args.result_dir,
log_dir=args.log_dir,
input_size=i+1)
# build graph
gan.construct_model()
# show network architecture
# show_all_variables()
# launch the graph in the session
gan.train()
print("[*] Training completed!\n")
if __name__ == '__main__':
main()
|
nwjs/chromium.src | refs/heads/nw45-log | build/toolchain/win/rc/rc.py | 3 | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""usage: rc.py [options] input.res
A resource compiler for .rc files.
options:
-h, --help Print this message.
-I<dir> Add include path, used for both headers and resources.
-imsvc<dir> Add system include path, used for preprocessing only.
-D<sym> Define a macro for the preprocessor.
/fo<out> Set path of output .res file.
/nologo Ignored (rc.py doesn't print a logo by default).
/showIncludes Print referenced header and resource files."""
from __future__ import print_function
from collections import namedtuple
import codecs
import os
import re
import subprocess
import sys
import tempfile
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
SRC_DIR = \
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(THIS_DIR))))
def ParseFlags():
"""Parses flags off sys.argv and returns the parsed flags."""
# Can't use optparse / argparse because of /fo flag :-/
includes = []
imsvcs = []
defines = []
output = None
input = None
show_includes = False
# Parse.
for flag in sys.argv[1:]:
if flag == '-h' or flag == '--help':
print(__doc__)
sys.exit(0)
if flag.startswith('-I'):
includes.append(flag)
elif flag.startswith('-imsvc'):
imsvcs.append(flag)
elif flag.startswith('-D'):
defines.append(flag)
elif flag.startswith('/fo'):
if output:
print('rc.py: error: multiple /fo flags', '/fo' + output, flag,
file=sys.stderr)
sys.exit(1)
output = flag[3:]
elif flag == '/nologo':
pass
elif flag == '/showIncludes':
show_includes = True
elif (flag.startswith('-') or
(flag.startswith('/') and not os.path.exists(flag))):
print('rc.py: error: unknown flag', flag, file=sys.stderr)
print(__doc__, file=sys.stderr)
sys.exit(1)
else:
if input:
print('rc.py: error: multiple inputs:', input, flag, file=sys.stderr)
sys.exit(1)
input = flag
# Validate and set default values.
if not input:
print('rc.py: error: no input file', file=sys.stderr)
sys.exit(1)
if not output:
output = os.path.splitext(input)[0] + '.res'
Flags = namedtuple('Flags', ['includes', 'defines', 'output', 'imsvcs',
'input', 'show_includes'])
return Flags(includes=includes, defines=defines, output=output, imsvcs=imsvcs,
input=input, show_includes=show_includes)
def ReadInput(input):
""""Reads input and returns it. For UTF-16LEBOM input, converts to UTF-8."""
# Microsoft's rc.exe only supports unicode in the form of UTF-16LE with a BOM.
# Our rc binary sniffs for UTF-16LE. If that's not found, if /utf-8 is
# passed, the input is treated as UTF-8. If /utf-8 is not passed and the
# input is not UTF-16LE, then our rc errors out on characters outside of
# 7-bit ASCII. Since the driver always converts UTF-16LE to UTF-8 here (for
# the preprocessor, which doesn't support UTF-16LE), our rc will either see
# UTF-8 with the /utf-8 flag (for UTF-16LE input), or ASCII input.
# This is compatible with Microsoft rc.exe. If we wanted, we could expose
# a /utf-8 flag for the driver for UTF-8 .rc inputs too.
# TODO(thakis): Microsoft's rc.exe supports BOM-less UTF-16LE. We currently
# don't, but for chrome it currently doesn't matter.
is_utf8 = False
try:
with open(input, 'rb') as rc_file:
rc_file_data = rc_file.read()
if rc_file_data.startswith(codecs.BOM_UTF16_LE):
rc_file_data = rc_file_data[2:].decode('utf-16le').encode('utf-8')
is_utf8 = True
except IOError:
print('rc.py: failed to open', input, file=sys.stderr)
sys.exit(1)
except UnicodeDecodeError:
print('rc.py: failed to decode UTF-16 despite BOM', input, file=sys.stderr)
sys.exit(1)
return rc_file_data, is_utf8
def Preprocess(rc_file_data, flags):
"""Runs the input file through the preprocessor."""
clang = os.path.join(SRC_DIR, 'third_party', 'llvm-build',
'Release+Asserts', 'bin', 'clang-cl')
# Let preprocessor write to a temp file so that it doesn't interfere
# with /showIncludes output on stdout.
if sys.platform == 'win32':
clang += '.exe'
temp_handle, temp_file = tempfile.mkstemp(suffix='.i')
# Closing temp_handle immediately defeats the purpose of mkstemp(), but I
# can't figure out how to let write to the temp file on Windows otherwise.
os.close(temp_handle)
clang_cmd = [clang, '/P', '/DRC_INVOKED', '/TC', '-', '/Fi' + temp_file]
if flags.imsvcs:
clang_cmd += ['/X']
if os.path.dirname(flags.input):
# This must precede flags.includes.
clang_cmd.append('-I' + os.path.dirname(flags.input))
if flags.show_includes:
clang_cmd.append('/showIncludes')
clang_cmd += flags.imsvcs + flags.includes + flags.defines
p = subprocess.Popen(clang_cmd, stdin=subprocess.PIPE)
p.communicate(input=rc_file_data)
if p.returncode != 0:
sys.exit(p.returncode)
preprocessed_output = open(temp_file, 'rb').read()
os.remove(temp_file)
# rc.exe has a wacko preprocessor:
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa381033(v=vs.85).aspx
# """RC treats files with the .c and .h extensions in a special manner. It
# assumes that a file with one of these extensions does not contain
# resources. If a file has the .c or .h file name extension, RC ignores all
# lines in the file except the preprocessor directives."""
# Thankfully, the Microsoft headers are mostly good about putting everything
# in the system headers behind `if !defined(RC_INVOKED)`, so regular
# preprocessing with RC_INVOKED defined works.
return preprocessed_output
def RunRc(preprocessed_output, is_utf8, flags):
if sys.platform.startswith('linux'):
rc = os.path.join(THIS_DIR, 'linux64', 'rc')
elif sys.platform == 'darwin':
rc = os.path.join(THIS_DIR, 'mac', 'rc')
elif sys.platform == 'win32':
rc = os.path.join(THIS_DIR, 'win', 'rc.exe')
else:
print('rc.py: error: unsupported platform', sys.platform, file=sys.stderr)
sys.exit(1)
rc_cmd = [rc]
# Make sure rc-relative resources can be found:
if os.path.dirname(flags.input):
rc_cmd.append('/cd' + os.path.dirname(flags.input))
rc_cmd.append('/fo' + flags.output)
if is_utf8:
rc_cmd.append('/utf-8')
# TODO(thakis): cl currently always prints full paths for /showIncludes,
# but clang-cl /P doesn't. Which one is right?
if flags.show_includes:
rc_cmd.append('/showIncludes')
# Microsoft rc.exe searches for referenced files relative to -I flags in
# addition to the pwd, so -I flags need to be passed both to both
# the preprocessor and rc.
rc_cmd += flags.includes
p = subprocess.Popen(rc_cmd, stdin=subprocess.PIPE)
p.communicate(input=preprocessed_output)
if flags.show_includes and p.returncode == 0:
TOOL_DIR = os.path.dirname(os.path.relpath(THIS_DIR)).replace("\\", "/")
# Since tool("rc") can't have deps, add deps on this script and on rc.py
# and its deps here, so that rc edges become dirty if rc.py changes.
print('Note: including file: {}/tool_wrapper.py'.format(TOOL_DIR))
print('Note: including file: {}/rc/rc.py'.format(TOOL_DIR))
print(
'Note: including file: {}/rc/linux64/rc.sha1'.format(TOOL_DIR))
print('Note: including file: {}/rc/mac/rc.sha1'.format(TOOL_DIR))
print(
'Note: including file: {}/rc/win/rc.exe.sha1'.format(TOOL_DIR))
return p.returncode
def CompareToMsRcOutput(preprocessed_output, is_utf8, flags):
msrc_in = flags.output + '.preprocessed.rc'
# Strip preprocessor line markers.
preprocessed_output = re.sub(br'^#.*$', b'', preprocessed_output, flags=re.M)
if is_utf8:
preprocessed_output = preprocessed_output.decode('utf-8').encode('utf-16le')
with open(msrc_in, 'wb') as f:
f.write(preprocessed_output)
msrc_out = flags.output + '_ms_rc'
msrc_cmd = ['rc', '/nologo', '/x', '/fo' + msrc_out]
# Make sure rc-relative resources can be found. rc.exe looks for external
# resource files next to the file, but the preprocessed file isn't where the
# input was.
# Note that rc searches external resource files in the order of
# 1. next to the input file
# 2. relative to cwd
# 3. next to -I directories
# Changing the cwd means we'd have to rewrite all -I flags, so just add
# the input file dir as -I flag. That technically gets the order of 1 and 2
# wrong, but in Chromium's build the cwd is the gn out dir, and generated
# files there are in obj/ and gen/, so this difference doesn't matter in
# practice.
if os.path.dirname(flags.input):
msrc_cmd += [ '-I' + os.path.dirname(flags.input) ]
# Microsoft rc.exe searches for referenced files relative to -I flags in
# addition to the pwd, so -I flags need to be passed both to both
# the preprocessor and rc.
msrc_cmd += flags.includes
# Input must come last.
msrc_cmd += [ msrc_in ]
rc_exe_exit_code = subprocess.call(msrc_cmd)
# Assert Microsoft rc.exe and rc.py produced identical .res files.
if rc_exe_exit_code == 0:
import filecmp
assert filecmp.cmp(msrc_out, flags.output)
return rc_exe_exit_code
def main():
# This driver has to do these things:
# 1. Parse flags.
# 2. Convert the input from UTF-16LE to UTF-8 if needed.
# 3. Pass the input through a preprocessor (and clean up the preprocessor's
# output in minor ways).
# 4. Call rc for the heavy lifting.
flags = ParseFlags()
rc_file_data, is_utf8 = ReadInput(flags.input)
preprocessed_output = Preprocess(rc_file_data, flags)
rc_exe_exit_code = RunRc(preprocessed_output, is_utf8, flags)
# 5. On Windows, we also call Microsoft's rc.exe and check that we produced
# the same output.
# Since Microsoft's rc has a preprocessor that only accepts 32 characters
# for macro names, feed the clang-preprocessed source into it instead
# of using ms rc's preprocessor.
if sys.platform == 'win32' and rc_exe_exit_code == 0:
rc_exe_exit_code = CompareToMsRcOutput(preprocessed_output, is_utf8, flags)
return rc_exe_exit_code
if __name__ == '__main__':
sys.exit(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.