repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
kingvuplus/TT-gui | lib/python/Plugins/SystemPlugins/NetworkWizard/NetworkWizard.py | 8 | 13872 | from Screens.Wizard import wizardManager, WizardSummary
from Screens.WizardLanguage import WizardLanguage
from Screens.Rc import Rc
from Screens.MessageBox import MessageBox
from Components.Pixmap import Pixmap, MovingPixmap, MultiPixmap
from Components.Sources.Boolean import Boolean
from Components.Network import iNetwork
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_SKIN_IMAGE
from enigma import eTimer
from os import system
class NetworkWizard(WizardLanguage, Rc):
skin = """
<screen position="0,0" size="720,576" title="Welcome..." flags="wfNoBorder" >
<widget name="text" position="153,40" size="340,300" font="Regular;22" />
<widget source="list" render="Listbox" position="53,340" size="440,180" scrollbarMode="showOnDemand" >
<convert type="StringList" />
</widget>
<widget name="config" position="53,340" zPosition="1" size="440,180" transparent="1" scrollbarMode="showOnDemand" />
<ePixmap pixmap="skin_default/buttons/button_red.png" position="40,225" zPosition="0" size="15,16" transparent="1" alphatest="on" />
<widget name="languagetext" position="55,225" size="95,30" font="Regular;18" />
<widget name="wizard" pixmap="skin_default/wizard.png" position="40,50" zPosition="10" size="110,174" alphatest="on" />
<widget name="rc" pixmaps="skin_default/rc.png,skin_default/rcold.png" position="500,50" zPosition="10" size="154,500" alphatest="on" />
<widget name="arrowdown" pixmap="skin_default/arrowdown.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget name="arrowdown2" pixmap="skin_default/arrowdown.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget name="arrowup" pixmap="skin_default/arrowup.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget name="arrowup2" pixmap="skin_default/arrowup.png" position="-100,-100" zPosition="11" size="37,70" alphatest="on" />
<widget source="VKeyIcon" render="Pixmap" pixmap="skin_default/buttons/key_text.png" position="40,260" zPosition="0" size="35,25" transparent="1" alphatest="on" >
<convert type="ConditionalShowHide" />
</widget>
<widget name="HelpWindow" pixmap="skin_default/buttons/key_text.png" position="125,170" zPosition="1" size="1,1" transparent="1" alphatest="on" />
</screen>"""
def __init__(self, session, interface = None):
self.xmlfile = resolveFilename(SCOPE_PLUGINS, "SystemPlugins/NetworkWizard/networkwizard.xml")
WizardLanguage.__init__(self, session, showSteps = False, showStepSlider = False)
Rc.__init__(self)
self.session = session
self["wizard"] = Pixmap()
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["VKeyIcon"] = Boolean(False)
self.InstalledInterfaceCount = None
self.Adapterlist = None
self.InterfaceState = None
self.isInterfaceUp = None
self.WlanPluginInstalled = False
self.ap = None
self.w = None
if interface is not None:
self.selectedInterface = interface
else:
self.selectedInterface = None
self.NextStep = None
self.resetRef = None
self.checkRef = None
self.AdapterRef = None
self.APList = None
self.newAPlist = None
self.oldlist = None
self.originalInterfaceState = {}
self.originalInterfaceStateChanged = False
self.Text = None
self.rescanTimer = eTimer()
self.rescanTimer.callback.append(self.rescanTimerFired)
self.getInstalledInterfaceCount()
self.isWlanPluginInstalled()
def exitWizardQuestion(self, ret = False):
if (ret):
self.markDone()
self.close()
def markDone(self):
self.stopScan()
del self.rescanTimer
self.checkOldInterfaceState()
self.exit()
pass
def back(self):
self.stopScan()
self.ap = None
WizardLanguage.back(self)
def stopScan(self):
self.rescanTimer.stop()
if self.w is not None:
from Plugins.SystemPlugins.WirelessLan.Wlan import iWlan
iWlan.stopGetNetworkList()
self.w = None
def getInstalledInterfaceCount(self):
self.originalInterfaceState = {}
self.Adapterlist = iNetwork.getAdapterList()
self.InstalledInterfaceCount = len(self.Adapterlist)
if self.Adapterlist is not None:
if self.InstalledInterfaceCount == 1 and self.selectedInterface is None:
self.selectedInterface = self.Adapterlist[0]
for interface in iNetwork.getAdapterList():
self.originalInterfaceState[interface] = {}
self.originalInterfaceState[interface]["up"] = iNetwork.getAdapterAttribute(interface, 'up')
def selectInterface(self):
self.InterfaceState = None
if self.selectedInterface is None:
if self.InstalledInterfaceCount <= 1:
if not iNetwork.isWirelessInterface(self.selectedInterface):
self.NextStep = 'nwconfig'
else:
self.NextStep = 'asknetworktype'
self.checkInterface(self.selectedInterface)
else:
self.NextStep = 'selectinterface'
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
else:
if not iNetwork.isWirelessInterface(self.selectedInterface):
self.NextStep = 'nwconfig'
else:
self.NextStep = 'asknetworktype'
self.checkInterface(self.selectedInterface)
def checkOldInterfaceState(self):
# disable up interface if it was originally down and config is unchanged.
if self.originalInterfaceStateChanged is False:
for interface in self.originalInterfaceState.keys():
if interface == self.selectedInterface:
if self.originalInterfaceState[interface]["up"] is False:
if iNetwork.checkforInterface(interface) is True:
system("ifconfig " + interface + " down")
def listInterfaces(self):
self.checkOldInterfaceState()
list = [(iNetwork.getFriendlyAdapterName(x),x) for x in iNetwork.getAdapterList()]
list.append((_("Exit network wizard"), "end"))
return list
def InterfaceSelectionMade(self, index):
self.selectedInterface = index
self.InterfaceSelect(index)
def InterfaceSelect(self, index):
if index == 'end':
self.NextStep = 'end'
elif index == 'eth0':
self.NextStep = 'nwconfig'
else:
self.NextStep = 'asknetworktype'
def InterfaceSelectionMoved(self):
self.InterfaceSelect(self.selection)
def checkInterface(self,iface):
self.stopScan()
if self.Adapterlist is None:
self.Adapterlist = iNetwork.getAdapterList()
if self.NextStep is not 'end':
if len(self.Adapterlist) == 0:
#Reset Network to defaults if network broken
iNetwork.resetNetworkConfig('lan', self.resetNetworkConfigCB)
self.resetRef = self.session.openWithCallback(self.resetNetworkConfigFinished, MessageBox, _("Please wait while we prepare your network interfaces..."), type = MessageBox.TYPE_INFO, enable_input = False)
if iface in iNetwork.getInstalledAdapters():
if iface in iNetwork.configuredNetworkAdapters and len(iNetwork.configuredNetworkAdapters) == 1:
if iNetwork.getAdapterAttribute(iface, 'up') is True:
self.isInterfaceUp = True
else:
self.isInterfaceUp = False
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
else:
self.isInterfaceUp = iNetwork.checkforInterface(iface)
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
else:
self.resetNetworkConfigFinished(False)
def resetNetworkConfigFinished(self,data):
if data is True:
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
else:
self.currStep = self.getStepWithID(self.NextStep)
self.afterAsyncCode()
def resetNetworkConfigCB(self,callback,iface):
if callback is not None:
if callback is True:
iNetwork.getInterfaces(self.getInterfacesFinished)
def getInterfacesFinished(self, data):
if data is True:
if iNetwork.getAdapterAttribute(self.selectedInterface, 'up') is True:
self.isInterfaceUp = True
else:
self.isInterfaceUp = False
self.resetRef.close(True)
else:
print "we should never come here!"
def AdapterSetupEnd(self, iface):
self.originalInterfaceStateChanged = True
if iNetwork.getAdapterAttribute(iface, "dhcp") is True:
iNetwork.checkNetworkState(self.AdapterSetupEndFinished)
self.AdapterRef = self.session.openWithCallback(self.AdapterSetupEndCB, MessageBox, _("Please wait while we test your network..."), type = MessageBox.TYPE_INFO, enable_input = False)
else:
self.currStep = self.getStepWithID("confdns")
self.afterAsyncCode()
def AdapterSetupEndCB(self,data):
if data is True:
if iNetwork.isWirelessInterface(self.selectedInterface):
if self.WlanPluginInstalled == True:
from Plugins.SystemPlugins.WirelessLan.Wlan import iStatus
iStatus.getDataForInterface(self.selectedInterface,self.checkWlanStateCB)
else:
self.currStep = self.getStepWithID("checklanstatusend")
self.afterAsyncCode()
else:
self.currStep = self.getStepWithID("checklanstatusend")
self.afterAsyncCode()
def AdapterSetupEndFinished(self,data):
if data <= 2:
self.InterfaceState = True
else:
self.InterfaceState = False
self.AdapterRef.close(True)
def checkWlanStateCB(self,data,status):
if data is not None:
if data is True:
if status is not None:
text1 = _("Your receiver is now ready to be used.\n\nYour internet connection is working.\n\n")
text2 = _('Accesspoint:') + "\t" + str(status[self.selectedInterface]["accesspoint"]) + "\n"
text3 = _('SSID:') + "\t" + str(status[self.selectedInterface]["essid"]) + "\n"
text4 = _('Link quality:') + "\t" + str(status[self.selectedInterface]["quality"])+ "\n"
text5 = _('Signal strength:') + "\t" + str(status[self.selectedInterface]["signal"]) + "\n"
text6 = _('Bitrate:') + "\t" + str(status[self.selectedInterface]["bitrate"]) + "\n"
text7 = _('Encryption:') + " " + str(status[self.selectedInterface]["encryption"]) + "\n"
text8 = _("Please press OK to continue.")
infotext = text1 + text2 + text3 + text4 + text5 + text7 +"\n" + text8
self.currStep = self.getStepWithID("checkWlanstatusend")
self.Text = infotext
if str(status[self.selectedInterface]["accesspoint"]) == "Not-Associated":
self.InterfaceState = False
self.afterAsyncCode()
def checkNetwork(self):
iNetwork.checkNetworkState(self.checkNetworkStateCB)
self.checkRef = self.session.openWithCallback(self.checkNetworkCB, MessageBox, _("Please wait while we test your network..."), type = MessageBox.TYPE_INFO, enable_input = False)
def checkNetworkCB(self,data):
if data is True:
if iNetwork.isWirelessInterface(self.selectedInterface):
if self.WlanPluginInstalled == True:
from Plugins.SystemPlugins.WirelessLan.Wlan import iStatus
iStatus.getDataForInterface(self.selectedInterface,self.checkWlanStateCB)
else:
self.currStep = self.getStepWithID("checklanstatusend")
self.afterAsyncCode()
else:
self.currStep = self.getStepWithID("checklanstatusend")
self.afterAsyncCode()
def checkNetworkStateCB(self,data):
if data <= 2:
self.InterfaceState = True
else:
self.InterfaceState = False
self.checkRef.close(True)
def rescanTimerFired(self):
self.rescanTimer.stop()
self.updateAPList()
def updateAPList(self):
self.oldlist = self.APList
self.newAPlist = []
newList = []
newListIndex = None
currentListEntry = None
newList = self.listAccessPoints()
for oldentry in self.oldlist:
if oldentry not in newList:
newList.append(oldentry)
for newentry in newList:
self.newAPlist.append(newentry)
if len(self.newAPlist):
if (self.wizard[self.currStep].has_key("dynamiclist")):
currentListEntry = self["list"].getCurrent()
if currentListEntry is not None:
idx = 0
for entry in self.newAPlist:
if entry == currentListEntry:
newListIndex = idx
idx +=1
self.wizard[self.currStep]["evaluatedlist"] = self.newAPlist
self['list'].setList(self.newAPlist)
if newListIndex is not None:
self["list"].setIndex(newListIndex)
self["list"].updateList(self.newAPlist)
def listAccessPoints(self):
self.APList = []
if self.WlanPluginInstalled is False:
self.APList.append( ( _("No networks found"), None ) )
else:
from Plugins.SystemPlugins.WirelessLan.Wlan import iWlan
iWlan.setInterface(self.selectedInterface)
self.w = iWlan.getInterface()
aps = iWlan.getNetworkList()
if aps is not None:
print "[NetworkWizard.py] got Accespoints!"
tmplist = []
complist = []
for ap in aps:
a = aps[ap]
if a['active']:
tmplist.append( (a['bssid'], a['essid']) )
complist.append( (a['bssid'], a['essid']) )
for entry in tmplist:
if entry[1] == "":
for compentry in complist:
if compentry[0] == entry[0]:
complist.remove(compentry)
for entry in complist:
self.APList.append( (entry[1], entry[1]) )
if not len(aps):
self.APList.append( ( _("No networks found"), None ) )
self.rescanTimer.start(4000)
return self.APList
def AccessPointsSelectionMoved(self):
self.ap = self.selection
self.NextStep = 'wlanconfig'
def checkWlanSelection(self):
self.stopScan()
self.currStep = self.getStepWithID(self.NextStep)
def isWlanPluginInstalled(self):
try:
from Plugins.SystemPlugins.WirelessLan.Wlan import iWlan
except ImportError:
self.WlanPluginInstalled = False
else:
self.WlanPluginInstalled = True
def listChoices(self):
self.stopScan()
list = []
if self.WlanPluginInstalled == True:
list.append((_("Configure your wireless LAN again"), "scanwlan"))
list.append((_("Configure your internal LAN"), "nwconfig"))
list.append((_("Exit network wizard"), "end"))
return list
def ChoicesSelectionMade(self, index):
self.ChoicesSelect(index)
def ChoicesSelect(self, index):
if index == 'end':
self.NextStep = 'end'
elif index == 'nwconfig':
self.selectedInterface = "eth0"
self.NextStep = 'nwconfig'
else:
self.NextStep = 'asknetworktype'
def ChoicesSelectionMoved(self):
pass
| gpl-2.0 | -399,704,221,532,327,740 | 35.601583 | 207 | 0.717488 | false |
google-research/ott | tests/core/sinkhorn_unbalanced_test.py | 1 | 2940 | # coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for the Policy."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import jax.test_util
from ott.core import sinkhorn
from ott.geometry import pointcloud
class SinkhornUnbalancedTest(jax.test_util.JaxTestCase):
def setUp(self):
super().setUp()
self.rng = jax.random.PRNGKey(0)
self.dim = 4
self.n = 68
self.m = 123
self.rng, *rngs = jax.random.split(self.rng, 5)
self.x = jax.random.uniform(rngs[0], (self.n, self.dim))
self.y = jax.random.uniform(rngs[1], (self.m, self.dim))
a = jax.random.uniform(rngs[2], (self.n,))
b = jax.random.uniform(rngs[3], (self.m,))
self.a = a / jnp.sum(a)
self.b = b / jnp.sum(b)
@parameterized.named_parameters(
dict(
testcase_name='lse-no-mom',
lse_mode=True,
momentum=1.0,
inner_iterations=10,
norm_error=1,
tau_a=0.8,
tau_b=0.9
),
dict(
testcase_name='lse-high-mom',
lse_mode=True,
momentum=1.5,
inner_iterations=10,
norm_error=1,
tau_a=0.8,
tau_b=0.9
),
dict(
testcase_name='scal-no-mom',
lse_mode=False,
momentum=1.0,
inner_iterations=10,
norm_error=1,
tau_a=0.8,
tau_b=0.9
),
dict(
testcase_name='scal-high-mom',
lse_mode=False,
momentum=1.5,
inner_iterations=10,
norm_error=1,
tau_a=0.8,
tau_b=0.9
))
def test_euclidean_point_cloud(self, lse_mode, momentum,
inner_iterations, norm_error, tau_a, tau_b):
"""Two point clouds, tested with various parameters."""
threshold = 1e-3
geom = pointcloud.PointCloud(self.x, self.y, epsilon=0.1)
errors = sinkhorn.sinkhorn(
geom,
a=self.a,
b=self.b,
threshold=threshold,
momentum=momentum,
inner_iterations=inner_iterations,
norm_error=norm_error,
lse_mode=lse_mode,
tau_a=tau_a,
tau_b=tau_b).errors
err = errors[errors > -1][-1]
self.assertGreater(threshold, err)
self.assertGreater(err, 0)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | 5,401,304,138,509,941,000 | 27.269231 | 77 | 0.591497 | false |
shibanis1/spark-tk | python/sparktk/models/classification/random_forest_classifier.py | 1 | 9387 | from sparktk.loggers import log_load; log_load(__name__); del log_load
from sparktk.propobj import PropertiesObject
from sparktk.frame.ops.classification_metrics_value import ClassificationMetricsValue
import os
def train(frame,
label_column,
observation_columns,
num_classes = 2,
num_trees = 1,
impurity = "gini",
max_depth = 4,
max_bins = 100,
seed = None,
categorical_features_info = None,
feature_subset_category = None):
"""
Creates a Random Forest Classifier Model by training on the given frame
Parameters
----------
:param frame: (Frame) frame frame of training data
:param label_column: (str) Column name containing the label for each observation
:param observation_columns: (list(str)) Column(s) containing the observations
:param num_classes: (int) Number of classes for classification. Default is 2
:param num_trees: (int) Number of tress in the random forest. Default is 1
:param impurity: (str) Criterion used for information gain calculation. Supported values "gini" or "entropy".
Default is "gini"
:param max_depth: (int) Maximum depth of the tree. Default is 4
:param max_bins: (int) Maximum number of bins used for splitting features. Default is 100
:param seed: (Optional(int)) Random seed for bootstrapping and choosing feature subsets. Default is a randomly chosen seed
:param categorical_features_info: (Optional(Dict(Int -> Int)) Arity of categorical features. Entry (n-> k) indicates that feature 'n' is categorical
with 'k' categories indexed from 0:{0,1,...,k-1}
:param feature_subset_category: (Optional(str)) Number of features to consider for splits at each node.
Supported values "auto","all","sqrt","log2","onethird".
If "auto" is set, this is based on num_trees: if num_trees == 1, set to "all"
; if num_trees > 1, set to "sqrt"
:return: (RandomForestClassifierModel) The trained random forest classifier model
Notes
-----
Random Forest is a supervised ensemble learning algorithm which can be used to perform binary and
multi-class classification. The Random Forest Classifier model is initialized, trained on columns of a frame,
used to predict the labels of observations in a frame, and tests the predicted labels against the true labels.
This model runs the MLLib implementation of Random Forest. During training, the decision trees are trained
in parallel. During prediction, each tree's prediction is counted as vote for one class. The label is predicted
to be the class which receives the most votes. During testing, labels of the observations are predicted and
tested against the true labels using built-in binary and multi-class Classification Metrics.
"""
tc = frame._tc
_scala_obj = get_scala_obj(tc)
seed = int(os.urandom(2).encode('hex'), 16) if seed is None else seed
scala_model = _scala_obj.train(frame._scala,
label_column,
tc.jutils.convert.to_scala_list_string(observation_columns),
num_classes,
num_trees,
impurity,
max_depth,
max_bins,
seed,
__get_categorical_features_info(tc, categorical_features_info),
tc.jutils.convert.to_scala_option(feature_subset_category))
return RandomForestClassifierModel(tc, scala_model)
def __get_categorical_features_info(tc, c):
if c is not None:
c = tc.jutils.convert.to_scala_map(c)
return tc.jutils.convert.to_scala_option(c)
def get_scala_obj(tc):
"""Gets reference to the scala object"""
return tc.sc._jvm.org.trustedanalytics.sparktk.models.classification.random_forest_classifier.RandomForestClassifierModel
class RandomForestClassifierModel(PropertiesObject):
"""
A trained Random Forest Classifier model
Example
-------
>>> frame = tc.frame.create([[1,19.8446136104,2.2985856384],[1,16.8973559126,2.6933495054],
... [1,5.5548729596,2.7777687995],[0,46.1810010826,3.1611961917],
... [0,44.3117586448,3.3458963222],[0,34.6334526911,3.6429838715]],
... [('Class', int), ('Dim_1', float), ('Dim_2', float)])
>>> frame.inspect()
[#] Class Dim_1 Dim_2
=======================================
[0] 1 19.8446136104 2.2985856384
[1] 1 16.8973559126 2.6933495054
[2] 1 5.5548729596 2.7777687995
[3] 0 46.1810010826 3.1611961917
[4] 0 44.3117586448 3.3458963222
[5] 0 34.6334526911 3.6429838715
>>> model = tc.models.classification.random_forest_classifier.train(frame, 'Class', ['Dim_1', 'Dim_2'], num_classes=2, num_trees=1, impurity="entropy", max_depth=4, max_bins=100)
>>> model.predict(frame, ['Dim_1', 'Dim_2'])
>>> frame.inspect()
[#] Class Dim_1 Dim_2 predicted_class
========================================================
[0] 1 19.8446136104 2.2985856384 1
[1] 1 16.8973559126 2.6933495054 1
[2] 1 5.5548729596 2.7777687995 1
[3] 0 46.1810010826 3.1611961917 0
[4] 0 44.3117586448 3.3458963222 0
[5] 0 34.6334526911 3.6429838715 0
>>> test_metrics = model.test(frame, ['Dim_1','Dim_2'])
>>> test_metrics
accuracy = 1.0
confusion_matrix = Predicted_Pos Predicted_Neg
Actual_Pos 3 0
Actual_Neg 0 3
f_measure = 1.0
precision = 1.0
recall = 1.0
>>> model.save("sandbox/randomforestclassifier")
>>> restored = tc.load("sandbox/randomforestclassifier")
>>> restored.label_column == model.label_column
True
>>> restored.seed == model.seed
True
>>> set(restored.observation_columns) == set(model.observation_columns)
True
"""
def __init__(self, tc, scala_model):
self._tc = tc
tc.jutils.validate_is_jvm_instance_of(scala_model, get_scala_obj(tc))
self._scala = scala_model
@staticmethod
def load(tc, scala_model):
"""Loads a random forest classifier model from a scala model"""
return RandomForestClassifierModel(tc, scala_model)
@property
def label_column(self):
"""column containing the label used for model training"""
return self._scala.labelColumn()
@property
def observation_columns(self):
"""observation columns used for model training"""
return self._tc.jutils.convert.from_scala_seq(self._scala.observationColumns())
@property
def num_classes(self):
"""number of classes in the trained model"""
return self._scala.numClasses()
@property
def num_trees(self):
"""number of trees in the trained model"""
return self._scala.numTrees()
@property
def impurity(self):
"""impurity value of the trained model"""
return self._scala.impurity()
@property
def max_depth(self):
"""maximum depth of the trained model"""
return self._scala.maxDepth()
@property
def max_bins(self):
"""maximum bins in the trained model"""
return self._scala.maxBins()
@property
def seed(self):
"""seed used during training of the model"""
return self._scala.seed()
@property
def categorical_features_info(self):
"""categorical feature dictionary used during model training"""
s = self._tc.jutils.convert.from_scala_option(self._scala.categoricalFeaturesInfo())
if s:
return self._tc.jutils.convert.scala_map_to_python(s)
return None
@property
def feature_subset_category(self):
"""feature subset category of the trained model"""
return self._tc.jutils.convert.from_scala_option(self._scala.featureSubsetCategory())
def predict(self, frame, columns=None):
"""predict the frame given the trained model"""
c = self.__columns_to_option(columns)
self._scala.predict(frame._scala, c)
def test(self, frame, columns=None):
"""test the frame given the trained model"""
c = self.__columns_to_option(columns)
return ClassificationMetricsValue(self._tc, self._scala.test(frame._scala, c))
def __columns_to_option(self, c):
if c is not None:
c = self._tc.jutils.convert.to_scala_list_string(c)
return self._tc.jutils.convert.to_scala_option(c)
def save(self, path):
"""save the trained model to path"""
self._scala.save(self._tc._scala_sc, path)
del PropertiesObject
| apache-2.0 | 7,923,857,274,872,778,000 | 40.535398 | 186 | 0.587302 | false |
sysadminmatmoz/OCB | addons/warning/warning.py | 6 | 10871 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import api
from openerp.osv import fields,osv
from openerp.tools.translate import _
WARNING_MESSAGE = [
('no-message','No Message'),
('warning','Warning'),
('block','Blocking Message')
]
WARNING_HELP = _('Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.')
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'sale_warn' : fields.selection(WARNING_MESSAGE, 'Sales Order', help=WARNING_HELP, required=True),
'sale_warn_msg' : fields.text('Message for Sales Order'),
'purchase_warn' : fields.selection(WARNING_MESSAGE, 'Purchase Order', help=WARNING_HELP, required=True),
'purchase_warn_msg' : fields.text('Message for Purchase Order'),
'picking_warn' : fields.selection(WARNING_MESSAGE, 'Stock Picking', help=WARNING_HELP, required=True),
'picking_warn_msg' : fields.text('Message for Stock Picking'),
'invoice_warn' : fields.selection(WARNING_MESSAGE, 'Invoice', help=WARNING_HELP, required=True),
'invoice_warn_msg' : fields.text('Message for Invoice'),
}
_defaults = {
'sale_warn' : 'no-message',
'purchase_warn' : 'no-message',
'picking_warn' : 'no-message',
'invoice_warn' : 'no-message',
}
class sale_order(osv.Model):
_inherit = 'sale.order'
@api.onchange('partner_id')
def onchange_partner_id_warning(self):
if not self.partner_id:
return
warning = {}
title = False
message = False
partner = self.partner_id
# If partner has no warning, check its company
if partner.sale_warn == 'no-message' and partner.parent_id:
partner = partner.parent_id
if partner.sale_warn != 'no-message':
# Block if partner only has warning but parent company is blocked
if partner.sale_warn != 'block' and partner.parent_id and partner.parent_id.sale_warn == 'block':
partner = partner.parent_id
title = _("Warning for %s") % partner.name
message = partner.sale_warn_msg
warning = {
'title': title,
'message': message,
}
if self.partner_id.sale_warn == 'block':
self.update({'partner_id': False, 'partner_invoice_id': False, 'partner_shipping_id': False, 'pricelist_id': False})
return {'warning': warning}
if warning:
return {'warning': warning}
class purchase_order(osv.Model):
_inherit = 'purchase.order'
@api.onchange('partner_id')
def onchange_partner_id_warning(self):
if not self.partner_id:
return
result = {}
warning = {}
title = False
message = False
partner = self.partner_id
# If partner has no warning, check its company
if partner.purchase_warn == 'no-message' and partner.parent_id:
partner = partner.parent_id
if partner.purchase_warn != 'no-message':
# Block if partner only has warning but parent company is blocked
if partner.purchase_warn != 'block' and partner.parent_id and partner.parent_id.purchase_warn == 'block':
partner = partner.parent_id
title = _("Warning for %s") % partner.name
message = partner.purchase_warn_msg
warning = {
'title': title,
'message': message
}
if partner.purchase_warn == 'block':
self.update({'partner_id': False})
return {'warning': warning}
if warning:
result['warning'] = warning
return result
class account_invoice(osv.osv):
_inherit = 'account.invoice'
@api.onchange('partner_id', 'company_id')
def _onchange_partner_id(self):
result = super(account_invoice, self)._onchange_partner_id()
partner = self.partner_id
res = {}
if not partner:
self.account_id = False
self.payment_term_id = False
return result
# If partner has no warning, check its company
if partner.invoice_warn == 'no-message' and partner.parent_id:
partner = partner.parent_id
if partner.invoice_warn != 'no-message':
# Block if partner only has warning but parent company is blocked
if partner.invoice_warn != 'block' and partner.parent_id and partner.parent_id.invoice_warn == 'block':
partner = partner.parent_id
res['warning'] = {
'title': _("Warning for %s") % partner.name,
'message': partner.invoice_warn_msg
}
if partner.invoice_warn == 'block':
self.partner_id = False
return res
return result
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def onchange_picking_type(self, cr, uid, ids, picking_type_id, partner_id, context=None):
if not partner_id:
return {}
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
warning = {}
title = False
message = False
# If partner has no warning, check its company
if partner.picking_warn == 'no-message' and partner.parent_id:
partner = partner.parent_id
if partner.picking_warn != 'no-message':
# Block if partner only has warning but parent company is blocked
if partner.picking_warn != 'block' and partner.parent_id and partner.parent_id.picking_warn == 'block':
partner = partner.parent_id
title = _("Warning for %s") % partner.name
message = partner.picking_warn_msg
warning = {
'title': title,
'message': message
}
if partner.picking_warn == 'block':
return {'value': {'partner_id': False}, 'warning': warning}
result = super(stock_picking, self).onchange_picking_type(cr, uid, ids, picking_type_id, partner_id, context=context)
if result.get('warning', False):
warning['title'] = title and title + ' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
# FORWARD-PORT UP TO SAAS-10, REMOVE THIS METHOD IN MASTER
def onchange_partner_id(self, cr, uid, ids, partner_id=None, context=None):
return self.onchange_picking_type(cr, uid, ids, False, partner_id, context=context)
class product_product(osv.osv):
_inherit = 'product.template'
_columns = {
'sale_line_warn' : fields.selection(WARNING_MESSAGE,'Sales Order Line', help=WARNING_HELP, required=True),
'sale_line_warn_msg' : fields.text('Message for Sales Order Line'),
'purchase_line_warn' : fields.selection(WARNING_MESSAGE,'Purchase Order Line', help=WARNING_HELP, required=True),
'purchase_line_warn_msg' : fields.text('Message for Purchase Order Line'),
}
_defaults = {
'sale_line_warn' : 'no-message',
'purchase_line_warn' : 'no-message',
}
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def product_id_change_with_wh(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False,
fiscal_position_id=False, flag=False, warehouse_id=False, context=None):
warning = {}
if not product:
return {'value': {'product_packaging': False,
'product_uom_qty': qty}, 'domain': {'product_uom': [],
'product_uom': []}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.sale_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.sale_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.sale_line_warn == 'block':
return {'value': {'product_id': False}, 'warning': warning}
result = super(sale_order_line, self).product_id_change_with_wh( cr, uid, ids, pricelist, product, qty,
uom, qty_uom, uom, name, partner_id,
lang, update_tax, date_order, packaging, fiscal_position_id, flag, warehouse_id=warehouse_id, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
if warning:
result['warning'] = warning
return result
@api.onchange('product_id')
def onchange_product_id_warning(self):
if not self.product_id:
return
result = {}
warning = {}
title = False
message = False
product_info = self.product_id
if product_info.sale_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.sale_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.sale_line_warn == 'block':
return {'warning': warning}
if warning:
result['warning'] = warning
return result
class purchase_order_line(osv.Model):
_inherit = 'purchase.order.line'
@api.onchange('product_id')
def onchange_product_id_warning(self):
if not self.product_id:
return
result = {}
warning = {}
title = False
message = False
product_info = self.product_id
if product_info.purchase_line_warn != 'no-message':
title = _("Warning for %s") % product_info.name
message = product_info.purchase_line_warn_msg
warning['title'] = title
warning['message'] = message
if product_info.purchase_line_warn == 'block':
return {'warning': warning}
if warning:
result['warning'] = warning
return result
| agpl-3.0 | -4,465,848,662,373,873,700 | 37.413428 | 224 | 0.579156 | false |
sanjeevtripurari/hue | apps/rdbms/setup.py | 37 | 1209 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "rdbms",
version = VERSION,
author = "Hue",
url = 'http://github.com/cloudera/hue',
description = "Queries against an RDBMS.",
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': 'rdbms=rdbms' },
)
| apache-2.0 | -5,244,022,926,272,756,000 | 40.689655 | 74 | 0.713813 | false |
jankoslavic/numpy | tools/swig/test/testMatrix.py | 116 | 14309 | #! /usr/bin/env python
from __future__ import division, absolute_import, print_function
# System imports
from distutils.util import get_platform
import os
import sys
import unittest
# Import NumPy
import numpy as np
major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
if major == 0: BadListError = TypeError
else: BadListError = ValueError
import Matrix
######################################################################
class MatrixTestCase(unittest.TestCase):
def __init__(self, methodName="runTests"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
# Test (type IN_ARRAY2[ANY][ANY]) typemap
def testDet(self):
"Test det function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
det = Matrix.__dict__[self.typeStr + "Det"]
matrix = [[8, 7], [6, 9]]
self.assertEquals(det(matrix), 30)
# Test (type IN_ARRAY2[ANY][ANY]) typemap
def testDetBadList(self):
"Test det function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
det = Matrix.__dict__[self.typeStr + "Det"]
matrix = [[8, 7], ["e", "pi"]]
self.assertRaises(BadListError, det, matrix)
# Test (type IN_ARRAY2[ANY][ANY]) typemap
def testDetWrongDim(self):
"Test det function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
det = Matrix.__dict__[self.typeStr + "Det"]
matrix = [8, 7]
self.assertRaises(TypeError, det, matrix)
# Test (type IN_ARRAY2[ANY][ANY]) typemap
def testDetWrongSize(self):
"Test det function with wrong size"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
det = Matrix.__dict__[self.typeStr + "Det"]
matrix = [[8, 7, 6], [5, 4, 3], [2, 1, 0]]
self.assertRaises(TypeError, det, matrix)
# Test (type IN_ARRAY2[ANY][ANY]) typemap
def testDetNonContainer(self):
"Test det function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
det = Matrix.__dict__[self.typeStr + "Det"]
self.assertRaises(TypeError, det, None)
# Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
def testMax(self):
"Test max function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Matrix.__dict__[self.typeStr + "Max"]
matrix = [[6, 5, 4], [3, 2, 1]]
self.assertEquals(max(matrix), 6)
# Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
def testMaxBadList(self):
"Test max function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Matrix.__dict__[self.typeStr + "Max"]
matrix = [[6, "five", 4], ["three", 2, "one"]]
self.assertRaises(BadListError, max, matrix)
# Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
def testMaxNonContainer(self):
"Test max function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Matrix.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, None)
# Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
def testMaxWrongDim(self):
"Test max function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Matrix.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, [0, 1, 2, 3])
# Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
def testMin(self):
"Test min function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Matrix.__dict__[self.typeStr + "Min"]
matrix = [[9, 8], [7, 6], [5, 4]]
self.assertEquals(min(matrix), 4)
# Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
def testMinBadList(self):
"Test min function with bad list"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Matrix.__dict__[self.typeStr + "Min"]
matrix = [["nine", "eight"], ["seven", "six"]]
self.assertRaises(BadListError, min, matrix)
# Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
def testMinWrongDim(self):
"Test min function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Matrix.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, [1, 3, 5, 7, 9])
# Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
def testMinNonContainer(self):
"Test min function with non-container"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Matrix.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, False)
# Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
def testScale(self):
"Test scale function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Matrix.__dict__[self.typeStr + "Scale"]
matrix = np.array([[1, 2, 3], [2, 1, 2], [3, 2, 1]], self.typeCode)
scale(matrix, 4)
self.assertEquals((matrix == [[4, 8, 12], [8, 4, 8], [12, 8, 4]]).all(), True)
# Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
def testScaleWrongDim(self):
"Test scale function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Matrix.__dict__[self.typeStr + "Scale"]
matrix = np.array([1, 2, 2, 1], self.typeCode)
self.assertRaises(TypeError, scale, matrix)
# Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
def testScaleWrongSize(self):
"Test scale function with wrong size"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Matrix.__dict__[self.typeStr + "Scale"]
matrix = np.array([[1, 2], [2, 1]], self.typeCode)
self.assertRaises(TypeError, scale, matrix)
# Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
def testScaleWrongType(self):
"Test scale function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Matrix.__dict__[self.typeStr + "Scale"]
matrix = np.array([[1, 2, 3], [2, 1, 2], [3, 2, 1]], 'c')
self.assertRaises(TypeError, scale, matrix)
# Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
def testScaleNonArray(self):
"Test scale function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Matrix.__dict__[self.typeStr + "Scale"]
matrix = [[1, 2, 3], [2, 1, 2], [3, 2, 1]]
self.assertRaises(TypeError, scale, matrix)
# Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap
def testFloor(self):
"Test floor function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Matrix.__dict__[self.typeStr + "Floor"]
matrix = np.array([[6, 7], [8, 9]], self.typeCode)
floor(matrix, 7)
np.testing.assert_array_equal(matrix, np.array([[7, 7], [8, 9]]))
# Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap
def testFloorWrongDim(self):
"Test floor function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Matrix.__dict__[self.typeStr + "Floor"]
matrix = np.array([6, 7, 8, 9], self.typeCode)
self.assertRaises(TypeError, floor, matrix)
# Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap
def testFloorWrongType(self):
"Test floor function with wrong type"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Matrix.__dict__[self.typeStr + "Floor"]
matrix = np.array([[6, 7], [8, 9]], 'c')
self.assertRaises(TypeError, floor, matrix)
# Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap
def testFloorNonArray(self):
"Test floor function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Matrix.__dict__[self.typeStr + "Floor"]
matrix = [[6, 7], [8, 9]]
self.assertRaises(TypeError, floor, matrix)
# Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap
def testCeil(self):
"Test ceil function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Matrix.__dict__[self.typeStr + "Ceil"]
matrix = np.array([[1, 2], [3, 4]], self.typeCode)
ceil(matrix, 3)
np.testing.assert_array_equal(matrix, np.array([[1, 2], [3, 3]]))
# Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap
def testCeilWrongDim(self):
"Test ceil function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Matrix.__dict__[self.typeStr + "Ceil"]
matrix = np.array([1, 2, 3, 4], self.typeCode)
self.assertRaises(TypeError, ceil, matrix)
# Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap
def testCeilWrongType(self):
"Test ceil function with wrong dimensions"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Matrix.__dict__[self.typeStr + "Ceil"]
matrix = np.array([[1, 2], [3, 4]], 'c')
self.assertRaises(TypeError, ceil, matrix)
# Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap
def testCeilNonArray(self):
"Test ceil function with non-array"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Matrix.__dict__[self.typeStr + "Ceil"]
matrix = [[1, 2], [3, 4]]
self.assertRaises(TypeError, ceil, matrix)
# Test (type ARGOUT_ARRAY2[ANY][ANY]) typemap
def testLUSplit(self):
"Test luSplit function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
luSplit = Matrix.__dict__[self.typeStr + "LUSplit"]
lower, upper = luSplit([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
self.assertEquals((lower == [[1, 0, 0], [4, 5, 0], [7, 8, 9]]).all(), True)
self.assertEquals((upper == [[0, 2, 3], [0, 0, 6], [0, 0, 0]]).all(), True)
######################################################################
class scharTestCase(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
######################################################################
class ucharTestCase(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "uchar"
self.typeCode = "B"
######################################################################
class shortTestCase(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "short"
self.typeCode = "h"
######################################################################
class ushortTestCase(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "ushort"
self.typeCode = "H"
######################################################################
class intTestCase(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "int"
self.typeCode = "i"
######################################################################
class uintTestCase(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "uint"
self.typeCode = "I"
######################################################################
class longTestCase(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
######################################################################
class ulongTestCase(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "ulong"
self.typeCode = "L"
######################################################################
class longLongTestCase(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "longLong"
self.typeCode = "q"
######################################################################
class ulongLongTestCase(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "ulongLong"
self.typeCode = "Q"
######################################################################
class floatTestCase(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "float"
self.typeCode = "f"
######################################################################
class doubleTestCase(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite( scharTestCase))
suite.addTest(unittest.makeSuite( ucharTestCase))
suite.addTest(unittest.makeSuite( shortTestCase))
suite.addTest(unittest.makeSuite( ushortTestCase))
suite.addTest(unittest.makeSuite( intTestCase))
suite.addTest(unittest.makeSuite( uintTestCase))
suite.addTest(unittest.makeSuite( longTestCase))
suite.addTest(unittest.makeSuite( ulongTestCase))
suite.addTest(unittest.makeSuite( longLongTestCase))
suite.addTest(unittest.makeSuite(ulongLongTestCase))
suite.addTest(unittest.makeSuite( floatTestCase))
suite.addTest(unittest.makeSuite( doubleTestCase))
# Execute the test suite
print("Testing 2D Functions of Module Matrix")
print("NumPy version", np.__version__)
print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures))
| bsd-3-clause | -104,149,469,292,760,270 | 38.527624 | 86 | 0.557411 | false |
kidburglar/youtube-dl | youtube_dl/extractor/nosvideo.py | 64 | 2480 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
sanitized_Request,
urlencode_postdata,
xpath_text,
xpath_with_ns,
)
_x = lambda p: xpath_with_ns(p, {'xspf': 'http://xspf.org/ns/0/'})
class NosVideoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nosvideo\.com/' + \
r'(?:embed/|\?v=)(?P<id>[A-Za-z0-9]{12})/?'
_PLAYLIST_URL = 'http://nosvideo.com/xml/{xml_id:s}.xml'
_FILE_DELETED_REGEX = r'<b>File Not Found</b>'
_TEST = {
'url': 'http://nosvideo.com/?v=mu8fle7g7rpq',
'md5': '6124ed47130d8be3eacae635b071e6b6',
'info_dict': {
'id': 'mu8fle7g7rpq',
'ext': 'mp4',
'title': 'big_buck_bunny_480p_surround-fix.avi.mp4',
'thumbnail': r're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
fields = {
'id': video_id,
'op': 'download1',
'method_free': 'Continue to Video',
}
req = sanitized_Request(url, urlencode_postdata(fields))
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(req, video_id,
'Downloading download page')
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
raise ExtractorError('Video %s does not exist' % video_id,
expected=True)
xml_id = self._search_regex(r'php\|([^\|]+)\|', webpage, 'XML ID')
playlist_url = self._PLAYLIST_URL.format(xml_id=xml_id)
playlist = self._download_xml(playlist_url, video_id)
track = playlist.find(_x('.//xspf:track'))
if track is None:
raise ExtractorError(
'XML playlist is missing the \'track\' element',
expected=True)
title = xpath_text(track, _x('./xspf:title'), 'title')
url = xpath_text(track, _x('./xspf:file'), 'URL', fatal=True)
thumbnail = xpath_text(track, _x('./xspf:image'), 'thumbnail')
if title is not None:
title = title.strip()
formats = [{
'format_id': 'sd',
'url': url,
}]
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'formats': formats,
}
| unlicense | -3,327,583,181,065,467,000 | 32.066667 | 75 | 0.523387 | false |
knifenomad/django | tests/template_backends/test_jinja2.py | 315 | 3048 | # Since this package contains a "jinja2" directory, this is required to
# silence an ImportWarning warning on Python 2.
from __future__ import absolute_import
from unittest import skipIf
from django.template import TemplateSyntaxError
from .test_dummy import TemplateStringsTests
try:
import jinja2
except ImportError:
jinja2 = None
Jinja2 = None
else:
from django.template.backends.jinja2 import Jinja2
@skipIf(jinja2 is None, "this test requires jinja2")
class Jinja2Tests(TemplateStringsTests):
engine_class = Jinja2
backend_name = 'jinja2'
options = {'keep_trailing_newline': True}
def test_origin(self):
template = self.engine.get_template('template_backends/hello.html')
self.assertTrue(template.origin.name.endswith('hello.html'))
self.assertEqual(template.origin.template_name, 'template_backends/hello.html')
def test_origin_from_string(self):
template = self.engine.from_string('Hello!\n')
self.assertEqual(template.origin.name, '<template>')
self.assertEqual(template.origin.template_name, None)
def test_self_context(self):
"""
Using 'self' in the context should not throw errors (#24538).
"""
# self will be overridden to be a TemplateReference, so the self
# variable will not come through. Attempting to use one though should
# not throw an error.
template = self.engine.from_string('hello {{ foo }}!')
content = template.render(context={'self': 'self', 'foo': 'world'})
self.assertEqual(content, 'hello world!')
def test_exception_debug_info_min_context(self):
with self.assertRaises(TemplateSyntaxError) as e:
self.engine.get_template('template_backends/syntax_error.html')
debug = e.exception.template_debug
self.assertEqual(debug['after'], '')
self.assertEqual(debug['before'], '')
self.assertEqual(debug['during'], '{% block %}')
self.assertEqual(debug['bottom'], 1)
self.assertEqual(debug['top'], 0)
self.assertEqual(debug['line'], 1)
self.assertEqual(debug['total'], 1)
self.assertEqual(len(debug['source_lines']), 1)
self.assertTrue(debug['name'].endswith('syntax_error.html'))
self.assertTrue('message' in debug)
def test_exception_debug_info_max_context(self):
with self.assertRaises(TemplateSyntaxError) as e:
self.engine.get_template('template_backends/syntax_error2.html')
debug = e.exception.template_debug
self.assertEqual(debug['after'], '')
self.assertEqual(debug['before'], '')
self.assertEqual(debug['during'], '{% block %}')
self.assertEqual(debug['bottom'], 26)
self.assertEqual(debug['top'], 5)
self.assertEqual(debug['line'], 16)
self.assertEqual(debug['total'], 31)
self.assertEqual(len(debug['source_lines']), 21)
self.assertTrue(debug['name'].endswith('syntax_error2.html'))
self.assertTrue('message' in debug)
| bsd-3-clause | 46,215,310,352,167,190 | 39.105263 | 87 | 0.662402 | false |
anksp21/Community-Zenpacks | ZenPacks.example.Techniques/setup.py | 2 | 2673 | ################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = "ZenPacks.example.Techniques"
VERSION = "1.4.1"
AUTHOR = "Chet Luther"
LICENSE = ""
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.example']
PACKAGES = ['ZenPacks', 'ZenPacks.example', 'ZenPacks.example.Techniques']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = ">=2.3"
PREV_ZENPACK_NAME = ""
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# The MANIFEST.in file is the recommended way of including additional files
# in your ZenPack. package_data is another.
#package_data = {}
package_data = {
'':['../COPYRIGHT.txt','../LICENSE.txt'],
},
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
| gpl-2.0 | -7,889,431,797,147,808,000 | 35.616438 | 79 | 0.685746 | false |
shaunbrady/boto | boto/redshift/exceptions.py | 151 | 8236 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class ClusterNotFoundFault(JSONResponseError):
pass
class InvalidClusterSnapshotStateFault(JSONResponseError):
pass
class ClusterSnapshotNotFoundFault(JSONResponseError):
pass
class ClusterSecurityGroupQuotaExceededFault(JSONResponseError):
pass
class ReservedNodeOfferingNotFoundFault(JSONResponseError):
pass
class InvalidSubnet(JSONResponseError):
pass
class ClusterSubnetGroupQuotaExceededFault(JSONResponseError):
pass
class InvalidClusterStateFault(JSONResponseError):
pass
class InvalidClusterParameterGroupStateFault(JSONResponseError):
pass
class ClusterParameterGroupAlreadyExistsFault(JSONResponseError):
pass
class InvalidClusterSecurityGroupStateFault(JSONResponseError):
pass
class InvalidRestoreFault(JSONResponseError):
pass
class AuthorizationNotFoundFault(JSONResponseError):
pass
class ResizeNotFoundFault(JSONResponseError):
pass
class NumberOfNodesQuotaExceededFault(JSONResponseError):
pass
class ClusterSnapshotAlreadyExistsFault(JSONResponseError):
pass
class AuthorizationQuotaExceededFault(JSONResponseError):
pass
class AuthorizationAlreadyExistsFault(JSONResponseError):
pass
class ClusterSnapshotQuotaExceededFault(JSONResponseError):
pass
class ReservedNodeNotFoundFault(JSONResponseError):
pass
class ReservedNodeAlreadyExistsFault(JSONResponseError):
pass
class ClusterSecurityGroupAlreadyExistsFault(JSONResponseError):
pass
class ClusterParameterGroupNotFoundFault(JSONResponseError):
pass
class ReservedNodeQuotaExceededFault(JSONResponseError):
pass
class ClusterQuotaExceededFault(JSONResponseError):
pass
class ClusterSubnetQuotaExceededFault(JSONResponseError):
pass
class UnsupportedOptionFault(JSONResponseError):
pass
class InvalidVPCNetworkStateFault(JSONResponseError):
pass
class ClusterSecurityGroupNotFoundFault(JSONResponseError):
pass
class InvalidClusterSubnetGroupStateFault(JSONResponseError):
pass
class ClusterSubnetGroupAlreadyExistsFault(JSONResponseError):
pass
class NumberOfNodesPerClusterLimitExceededFault(JSONResponseError):
pass
class ClusterSubnetGroupNotFoundFault(JSONResponseError):
pass
class ClusterParameterGroupQuotaExceededFault(JSONResponseError):
pass
class ClusterAlreadyExistsFault(JSONResponseError):
pass
class InsufficientClusterCapacityFault(JSONResponseError):
pass
class InvalidClusterSubnetStateFault(JSONResponseError):
pass
class SubnetAlreadyInUse(JSONResponseError):
pass
class InvalidParameterCombinationFault(JSONResponseError):
pass
class AccessToSnapshotDeniedFault(JSONResponseError):
pass
class UnauthorizedOperationFault(JSONResponseError):
pass
class SnapshotCopyAlreadyDisabled(JSONResponseError):
pass
class ClusterNotFound(JSONResponseError):
pass
class UnknownSnapshotCopyRegion(JSONResponseError):
pass
class InvalidClusterSubnetState(JSONResponseError):
pass
class ReservedNodeQuotaExceeded(JSONResponseError):
pass
class InvalidClusterState(JSONResponseError):
pass
class HsmClientCertificateQuotaExceeded(JSONResponseError):
pass
class SubscriptionCategoryNotFound(JSONResponseError):
pass
class HsmClientCertificateNotFound(JSONResponseError):
pass
class SubscriptionEventIdNotFound(JSONResponseError):
pass
class ClusterSecurityGroupAlreadyExists(JSONResponseError):
pass
class HsmConfigurationAlreadyExists(JSONResponseError):
pass
class NumberOfNodesQuotaExceeded(JSONResponseError):
pass
class ReservedNodeOfferingNotFound(JSONResponseError):
pass
class BucketNotFound(JSONResponseError):
pass
class InsufficientClusterCapacity(JSONResponseError):
pass
class InvalidRestore(JSONResponseError):
pass
class UnauthorizedOperation(JSONResponseError):
pass
class ClusterQuotaExceeded(JSONResponseError):
pass
class InvalidVPCNetworkState(JSONResponseError):
pass
class ClusterSnapshotNotFound(JSONResponseError):
pass
class AuthorizationQuotaExceeded(JSONResponseError):
pass
class InvalidHsmClientCertificateState(JSONResponseError):
pass
class SNSTopicArnNotFound(JSONResponseError):
pass
class ResizeNotFound(JSONResponseError):
pass
class ClusterSubnetGroupNotFound(JSONResponseError):
pass
class SNSNoAuthorization(JSONResponseError):
pass
class ClusterSnapshotQuotaExceeded(JSONResponseError):
pass
class AccessToSnapshotDenied(JSONResponseError):
pass
class InvalidClusterSecurityGroupState(JSONResponseError):
pass
class NumberOfNodesPerClusterLimitExceeded(JSONResponseError):
pass
class ClusterSubnetQuotaExceeded(JSONResponseError):
pass
class SNSInvalidTopic(JSONResponseError):
pass
class ClusterSecurityGroupNotFound(JSONResponseError):
pass
class InvalidElasticIp(JSONResponseError):
pass
class InvalidClusterParameterGroupState(JSONResponseError):
pass
class InvalidHsmConfigurationState(JSONResponseError):
pass
class ClusterAlreadyExists(JSONResponseError):
pass
class HsmConfigurationQuotaExceeded(JSONResponseError):
pass
class ClusterSnapshotAlreadyExists(JSONResponseError):
pass
class SubscriptionSeverityNotFound(JSONResponseError):
pass
class SourceNotFound(JSONResponseError):
pass
class ReservedNodeAlreadyExists(JSONResponseError):
pass
class ClusterSubnetGroupQuotaExceeded(JSONResponseError):
pass
class ClusterParameterGroupNotFound(JSONResponseError):
pass
class InvalidS3BucketName(JSONResponseError):
pass
class InvalidS3KeyPrefix(JSONResponseError):
pass
class SubscriptionAlreadyExist(JSONResponseError):
pass
class HsmConfigurationNotFound(JSONResponseError):
pass
class AuthorizationNotFound(JSONResponseError):
pass
class ClusterSecurityGroupQuotaExceeded(JSONResponseError):
pass
class EventSubscriptionQuotaExceeded(JSONResponseError):
pass
class AuthorizationAlreadyExists(JSONResponseError):
pass
class InvalidClusterSnapshotState(JSONResponseError):
pass
class ClusterParameterGroupQuotaExceeded(JSONResponseError):
pass
class SnapshotCopyDisabled(JSONResponseError):
pass
class ClusterSubnetGroupAlreadyExists(JSONResponseError):
pass
class ReservedNodeNotFound(JSONResponseError):
pass
class HsmClientCertificateAlreadyExists(JSONResponseError):
pass
class InvalidClusterSubnetGroupState(JSONResponseError):
pass
class SubscriptionNotFound(JSONResponseError):
pass
class InsufficientS3BucketPolicy(JSONResponseError):
pass
class ClusterParameterGroupAlreadyExists(JSONResponseError):
pass
class UnsupportedOption(JSONResponseError):
pass
class CopyToRegionDisabled(JSONResponseError):
pass
class SnapshotCopyAlreadyEnabled(JSONResponseError):
pass
class IncompatibleOrderableOptions(JSONResponseError):
pass
class InvalidSubscriptionState(JSONResponseError):
pass
| mit | 972,764,550,539,484,300 | 16.943355 | 77 | 0.809131 | false |
android-ia/platform_external_chromium_org | tools/perf/measurements/session_restore.py | 26 | 2907 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from measurements import startup
from metrics import cpu
from metrics import startup_metric
from telemetry.core import util
from telemetry.value import histogram_util
class SessionRestore(startup.Startup):
"""Performs a measurement of Chromium's Session restore performance.
This test is meant to be run against a generated profile.
This test inherits support for the --warm or --cold command line options -
see startup.py for details.
"""
def __init__(self, action_name_to_run = ''):
super(SessionRestore, self).__init__(action_name_to_run=action_name_to_run)
self.close_tabs_before_run = False
self._cpu_metric = None
def CustomizeBrowserOptions(self, options):
super(SessionRestore, self).CustomizeBrowserOptions(options)
histogram_util.CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs([
'--restore-last-session'
])
def TabForPage(self, page, browser):
# Detect that the session restore has completed.
util.WaitFor(lambda: browser.tabs and
histogram_util.GetHistogramCount(
histogram_util.BROWSER_HISTOGRAM,
'SessionRestore.AllTabsLoaded',
browser.foreground_tab),
60)
return browser.foreground_tab
def CanRunForPage(self, page):
# No matter how many pages in the pageset, just perform one test iteration.
return page.page_set.pages.index(page) == 0
def RunNavigateSteps(self, page, tab):
# Overriden so that no page navigation occurs.
pass
def ValidatePageSet(self, page_set):
wpr_archive_names_to_page_urls = collections.defaultdict(list)
# Construct the map from pages' wpr archive names to pages' urls.
for page in page_set:
if page.is_local:
continue
wpr_archive_name = page_set.WprFilePathForPage(page)
wpr_archive_names_to_page_urls[wpr_archive_name].append(page.url)
# Reject any pageset that contains more than one WPR archive.
if len(wpr_archive_names_to_page_urls.keys()) > 1:
raise Exception("Invalid pageset: more than 1 WPR archive found.: " +
repr(wpr_archive_names_to_page_urls))
def DidStartBrowser(self, browser):
self._cpu_metric = cpu.CpuMetric(browser)
self._cpu_metric.Start(None, None)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForDocumentReadyStateToBeComplete()
# Record CPU usage from browser start to when the foreground page is loaded.
self._cpu_metric.Stop(None, None)
self._cpu_metric.AddResults(tab, results, 'cpu_utilization')
startup_metric.StartupMetric().AddResults(tab, results)
# TODO(jeremy): Measure time to load - first, last and frontmost tab here.
| bsd-3-clause | -9,008,401,439,244,783,000 | 35.797468 | 80 | 0.706914 | false |
pigeonflight/strider-plone | docker/appengine/lib/django-1.4/django/contrib/sessions/backends/signed_cookies.py | 94 | 3019 | try:
import cPickle as pickle
except ImportError:
import pickle
from django.conf import settings
from django.core import signing
from django.contrib.sessions.backends.base import SessionBase
class PickleSerializer(object):
"""
Simple wrapper around pickle to be used in signing.dumps and
signing.loads.
"""
def dumps(self, obj):
return pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
def loads(self, data):
return pickle.loads(data)
class SessionStore(SessionBase):
def load(self):
"""
We load the data from the key itself instead of fetching from
some external data store. Opposite of _get_session_key(),
raises BadSignature if signature fails.
"""
try:
return signing.loads(self.session_key,
serializer=PickleSerializer,
max_age=settings.SESSION_COOKIE_AGE,
salt='django.contrib.sessions.backends.signed_cookies')
except (signing.BadSignature, ValueError):
self.create()
return {}
def create(self):
"""
To create a new key, we simply make sure that the modified flag is set
so that the cookie is set on the client for the current request.
"""
self.modified = True
def save(self, must_create=False):
"""
To save, we get the session key as a securely signed string and then
set the modified flag so that the cookie is set on the client for the
current request.
"""
self._session_key = self._get_session_key()
self.modified = True
def exists(self, session_key=None):
"""
This method makes sense when you're talking to a shared resource, but
it doesn't matter when you're storing the information in the client's
cookie.
"""
return False
def delete(self, session_key=None):
"""
To delete, we clear the session key and the underlying data structure
and set the modified flag so that the cookie is set on the client for
the current request.
"""
self._session_key = ''
self._session_cache = {}
self.modified = True
def cycle_key(self):
"""
Keeps the same data but with a new key. To do this, we just have to
call ``save()`` and it will automatically save a cookie with a new key
at the end of the request.
"""
self.save()
def _get_session_key(self):
"""
Most session backends don't need to override this method, but we do,
because instead of generating a random string, we want to actually
generate a secure url-safe Base64-encoded string of data as our
session key.
"""
session_cache = getattr(self, '_session_cache', {})
return signing.dumps(session_cache, compress=True,
salt='django.contrib.sessions.backends.signed_cookies',
serializer=PickleSerializer)
| mit | -5,196,731,043,719,268,000 | 31.462366 | 78 | 0.620404 | false |
ShadowKyogre/mypaint | gui/brushiconeditor.py | 3 | 12995 | # -*- encoding: utf-8 -*-
# This file is part of MyPaint.
# Copyright (C) 2009-2013 by Martin Renold <[email protected]>
# Copyright (C) 2013-2016 by the MyPaint Development Team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from __future__ import division, print_function
import logging
from gettext import gettext as _
from gi.repository import Gtk
from gi.repository import GLib
import tileddrawwidget
import windowing
import lib.document
from document import CanvasController
from freehand import FreehandMode
import brushmanager
from lib.observable import event
import drawutils
logger = logging.getLogger(__name__)
class BrushIconEditorWindow (windowing.SubWindow):
"""Main app subwindow for editing a brush's icon
See `BrushIconEditor` for details of how this operates.
"""
_TITLE_PREVIEWING = _('Brush Icon')
_TITLE_EDITING = _('Brush Icon (editing)')
def __init__(self):
from application import get_app
app = get_app()
self._app = app
windowing.SubWindow.__init__(self, app)
self._editor = BrushIconEditor()
self._editor.mode_changed += self._editor_mode_changed
self.add(self._editor)
self.set_title(self._TITLE_PREVIEWING)
def _editor_mode_changed(self, editor, editing):
if editing:
self.set_title(self._TITLE_EDITING)
else:
self.set_title(self._TITLE_PREVIEWING)
class BrushIconEditor (Gtk.Grid):
"""Widget for previewing and editing a brush's icon at a large size
The editor has two modes: previewing and editing. In preview mode, the
widget's view of the brush icon just tracks the current brush. When the
Edit button is clicked, the icon is locked for editing and made sensitive,
and the user can switch brushes and colors as necessary to draw a pretty
icon. The Clear, Save and Revert buttons do what you'd expect; saving and
reverting also exit the editing mode.
The name of the brush which will be affected is shown at all times, along
with an indication of the current mode.
"""
## Class constants
_SCALE = 2
_NO_BRUSH_NAME = _("No brush selected")
_ICON_INVALID_TMPL = _(
u'<b>%s</b>\n'
'<small>Select a valid brush first</small>')
_ICON_MODIFIED_TMPL = _(
u'<b>%s</b> <i>(modified)</i>\n'
u'<small>Changes are not yet saved</small>')
_ICON_MODIFIABLE_TMPL = _(
u'<b>%s</b> (editing)\n'
u'<small>Paint with any brush or color</small>')
_ICON_PREVIEWING_TMPL = _(
'<b>%s</b>\n'
u'<small>Click ‘Edit’ to make changes to the icon</small>')
## Construction
def __init__(self):
Gtk.Grid.__init__(self)
self.set_row_spacing(6)
self.set_column_spacing(12)
from application import get_app
app = get_app()
self._app = app
self._bm = app.brushmanager
self.set_border_width(12)
self._bm.brush_selected += self._brush_selected_cb
self._brush_to_edit = None
self._preview_modified = False
self._model = lib.document.Document(self._app.brush,
painting_only=True)
self._model.layer_stack.ensure_populated()
self._model.canvas_area_modified += self._preview_area_modified_cb
self._init_widgets()
@staticmethod
def _make_image_button(text, icon_name, cb):
b = Gtk.Button(text)
i = Gtk.Image()
i.set_from_icon_name(icon_name, Gtk.IconSize.BUTTON)
b.set_image(i)
b.set_image_position(Gtk.PositionType.TOP)
b.connect("clicked", cb)
b.set_can_focus(False)
b.set_can_default(False)
return b
def _init_widgets(self):
# Icon preview and edit TDW
self._tdw = tileddrawwidget.TiledDrawWidget()
self._tdw.set_model(self._model)
self._tdw.set_size_request(
brushmanager.PREVIEW_W * self._SCALE,
brushmanager.PREVIEW_H * self._SCALE
)
self._tdw.scale = 1 # it will be corrected later
self._tdw.scroll_on_allocate = False
self._tdw.pixelize_threshold = 0
tdw_align = Gtk.Alignment(xalign=0.5, yalign=0.0,
xscale=0.0, yscale=0.0)
tdw_align.add(self._tdw)
self.attach(tdw_align, 0, 0, 1, 1)
ctrlr = CanvasController(self._tdw)
ctrlr.init_pointer_events()
ctrlr.modes.default_mode_class = FreehandMode
# Brush name label
lbl = Gtk.Label()
lbl.set_alignment(0.5, 0.0)
lbl.set_justify(Gtk.Justification.CENTER)
lbl_tmpl = self._ICON_PREVIEWING_TMPL
lbl.set_markup(lbl_tmpl % (lib.xml.escape(self._NO_BRUSH_NAME),))
self.attach(lbl, 0, 1, 1, 1)
self.brush_name_label = lbl
# Action buttons
button_box = Gtk.VButtonBox()
button_box.set_homogeneous(False)
button_box.set_layout(Gtk.ButtonBoxStyle.START)
button_box.set_spacing(4)
# TRANSLATORS: begin editing a brush's preview icon
b = self._make_image_button(
_('Edit'), "mypaint-freehand-symbolic", self._edit_cb
)
b.set_tooltip_text(_("Begin editing this preview icon"))
button_box.pack_start(b, False, True, 0)
self._edit_button = b
# TRANSLATORS: revert edits to a brush icon
b = self._make_image_button(
_('Revert'), "mypaint-document-revert-symbolic", self._revert_cb
)
b.set_tooltip_text(_("Discard changes, and cancel editing"))
button_box.pack_start(b, False, True, 0)
button_box.set_child_secondary(b, False)
self._revert_button = b
# TRANSLATORS: clear the brush preview icon being edited
b = self._make_image_button(
_('Clear'), "mypaint-clear-all-symbolic", self._clear_cb
)
b.set_tooltip_text(_("Clear the preview icon"))
button_box.pack_start(b, False, True, 0)
self._clear_button = b
# TRANSLATORS: set the brush icon to a built-in default
b = self._make_image_button(
_('Auto'), "mypaint-document-new-symbolic", self._default_cb
)
b.set_tooltip_text(_("Use the default icon"))
button_box.pack_start(b, False, True, 0)
self._default_button = b
# TRANSLATORS: save edits to a brush icon
b = self._make_image_button(
_('Save'), "mypaint-document-save-symbolic", self._save_cb
)
b.set_tooltip_text(_("Save this preview icon, and finish editing"))
button_box.pack_start(b, False, True, 0)
button_box.set_child_secondary(b, True)
self._save_button = b
self.attach(button_box, 1, 0, 1, 2)
self.connect_after("show", self._show_cb)
mb = self._bm.selected_brush
preview = mb.preview
self._set_preview_pixbuf(preview)
name = mb.name
if name is None:
name = self._NO_BRUSH_NAME
self.brush_name_label.set_markup(lbl_tmpl % (lib.xml.escape(name),))
## Public subscriber interface
@event
def mode_changed(self, editing):
"""Event: called when the mode changes
:param editing: True if the editor is now in edit-mode.
"""
## Event handling
def _show_cb(self, widget):
self._update_widgets()
def _preview_area_modified_cb(self, preview_model, x, y, w, h):
"""Handles changes made to the preview canvas"""
self._preview_modified = True
GLib.idle_add(self._update_widgets)
def _brush_selected_cb(self, bm, managed_brush, brushinfo):
"""Updates the brush icon preview if it is not in edit mode"""
if not self._brush_to_edit:
self._set_preview_pixbuf(managed_brush.preview)
self._update_widgets()
## Button callbacks
def _clear_cb(self, button):
assert self._brush_to_edit
self._tdw.doc.clear_current_layer()
def _default_cb(self, button):
assert self._brush_to_edit
logger.debug("Set preview of %r to a procedural default",
self._brush_to_edit)
preview = drawutils.render_brush_preview_pixbuf(
self._brush_to_edit.get_brushinfo(),
)
self._set_preview_pixbuf(preview)
self.mode_changed(False)
def _edit_cb(self, button):
mb = self._bm.selected_brush
assert not self._brush_to_edit
self._brush_to_edit = mb
logger.debug("Started editing %r", self._brush_to_edit)
self._update_widgets()
self.mode_changed(True)
def _revert_cb(self, button):
assert self._brush_to_edit
logger.debug("Reverted edits to %r", self._brush_to_edit)
preview = self._bm.selected_brush.preview
self._set_preview_pixbuf(preview)
self._brush_to_edit = None
self._update_widgets()
self.mode_changed(False)
def _save_cb(self, button):
pixbuf = self._get_preview_pixbuf()
assert self._brush_to_edit is not None
b = self._brush_to_edit
assert b.name is not None
b.preview = pixbuf
try:
b.save()
except IOError as err:
logger.warning("Failed to save brush: %r (recoverable!)", err)
else:
for brushes in self._bm.groups.itervalues():
if b in brushes:
self._bm.brushes_changed(brushes)
logger.info("Saved %r", b)
self._brush_to_edit = None
self._update_widgets()
self._bm.select_brush(b)
self.mode_changed(False)
return
# Failed to save the icon.
# This can happen if the user deletes a brush whose icon is being
# edited. To recover, add the saved settings as a new brush
logger.info("Failed to save preview, so saving cached settings"
"as a new brush")
b = self._brush_to_edit.clone(name=None)
group = brushmanager.NEW_BRUSH_GROUP
brushes = self._bm.get_group_brushes(group)
brushes.insert(0, b)
b.save()
b.persistent = True
self._bm.brushes_changed(brushes)
self._bm.select_brush(b)
# Reveal the "New" group if needed
ws = self._app.workspace
ws.reveal_tool_widget("MyPaintBrushGroupTool", (group,))
logger.info("Saved %r (full)", b)
self._brush_to_edit = None
self._update_widgets()
self.mode_changed(False)
## Utility methods
def _update_widgets(self):
editing = self._brush_to_edit is not None
if editing:
brush = self._brush_to_edit
else:
brush = self._bm.selected_brush
# Fairly rare: corresponds to no brush being selected on startup
valid = brush.name is not None
# Button states
self._revert_button.set_sensitive(valid and editing)
self._edit_button.set_sensitive(valid and not editing)
self._clear_button.set_sensitive(valid and editing)
self._default_button.set_sensitive(valid and editing)
self._save_button.set_sensitive(valid and editing)
self._model.layer_stack.current.locked = not (valid and editing)
# Text to display in the various states
if not valid:
tmpl = self._ICON_INVALID_TMPL
elif editing:
if self._preview_modified:
tmpl = self._ICON_MODIFIED_TMPL
else:
tmpl = self._ICON_MODIFIABLE_TMPL
else:
tmpl = self._ICON_PREVIEWING_TMPL
# Update edit flag label
name = brush.name
if (not valid) or name is None:
name = self._NO_BRUSH_NAME
markup = tmpl % (lib.xml.escape(name),)
self.brush_name_label.set_markup(markup)
# TDWs now divide their transform's scale by the HiDPI scale
# factor to make 100% zoom match what the screen does. Correct
# for that correction, because the brush icon editor still needs
# the preview to fill the widget's whole area.
scale_factor = self.get_scale_factor()
scale = round(self._SCALE * scale_factor)
if scale != self._tdw.scale:
self._tdw.scale = scale
self._tdw.queue_draw()
def _set_preview_pixbuf(self, pixbuf):
if pixbuf is None:
self._tdw.doc.clear()
else:
self._tdw.doc.load_from_pixbuf(pixbuf)
self._preview_modified = False
def _get_preview_pixbuf(self):
w, h = brushmanager.PREVIEW_W, brushmanager.PREVIEW_H
rootstack = self._tdw.doc.layer_stack
return rootstack.render_as_pixbuf(0, 0, w, h, alpha=False)
| gpl-2.0 | 1,338,019,505,943,729,000 | 34.787879 | 78 | 0.603572 | false |
fangxingli/hue | desktop/core/ext-py/pysaml2-2.4.0/src/saml2/config.py | 8 | 16500 | #!/usr/bin/env python
__author__ = 'rolandh'
import copy
import sys
import os
import re
import logging
import logging.handlers
from importlib import import_module
from saml2 import root_logger, BINDING_URI, SAMLError
from saml2 import BINDING_SOAP
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_HTTP_ARTIFACT
from saml2.attribute_converter import ac_factory
from saml2.assertion import Policy
from saml2.mdstore import MetadataStore
from saml2.virtual_org import VirtualOrg
logger = logging.getLogger(__name__)
from saml2 import md
from saml2 import saml
from saml2.extension import mdui
from saml2.extension import idpdisc
from saml2.extension import dri
from saml2.extension import mdattr
from saml2.extension import ui
import xmldsig
import xmlenc
ONTS = {
saml.NAMESPACE: saml,
mdui.NAMESPACE: mdui,
mdattr.NAMESPACE: mdattr,
dri.NAMESPACE: dri,
ui.NAMESPACE: ui,
idpdisc.NAMESPACE: idpdisc,
md.NAMESPACE: md,
xmldsig.NAMESPACE: xmldsig,
xmlenc.NAMESPACE: xmlenc
}
COMMON_ARGS = [
"entityid",
"xmlsec_binary",
"debug",
"key_file",
"key_file_passphrase",
"cert_file",
"encryption_type",
"secret",
"accepted_time_diff",
"name",
"ca_certs",
"description",
"valid_for",
"verify_ssl_cert",
"organization",
"contact_person",
"name_form",
"virtual_organization",
"logger",
"only_use_keys_in_metadata",
"logout_requests_signed",
"disable_ssl_certificate_validation",
"referred_binding",
"session_storage",
"entity_category",
"xmlsec_path",
"extension_schemas",
"cert_handler_extra_class",
"generate_cert_func",
"generate_cert_info",
"verify_encrypt_cert",
"tmp_cert_file",
"tmp_key_file",
"validate_certificate",
"extensions",
"allow_unknown_attributes"
]
SP_ARGS = [
"required_attributes",
"optional_attributes",
"idp",
"aa",
"subject_data",
"want_response_signed",
"want_assertions_signed",
"authn_requests_signed",
"name_form",
"endpoints",
"ui_info",
"discovery_response",
"allow_unsolicited",
"ecp",
"name_id_format",
]
AA_IDP_ARGS = [
"sign_assertion",
"sign_response",
"encrypt_assertion",
"want_authn_requests_signed",
"want_authn_requests_only_with_valid_cert",
"provided_attributes",
"subject_data",
"sp",
"scope",
"endpoints",
"metadata",
"ui_info",
"name_id_format",
"domain",
"name_qualifier",
"edu_person_targeted_id",
]
PDP_ARGS = ["endpoints", "name_form", "name_id_format"]
AQ_ARGS = ["endpoints"]
AA_ARGS = ["attribute", "attribute_profile"]
COMPLEX_ARGS = ["attribute_converters", "metadata", "policy"]
ALL = set(COMMON_ARGS + SP_ARGS + AA_IDP_ARGS + PDP_ARGS + COMPLEX_ARGS +
AA_ARGS)
SPEC = {
"": COMMON_ARGS + COMPLEX_ARGS,
"sp": COMMON_ARGS + COMPLEX_ARGS + SP_ARGS,
"idp": COMMON_ARGS + COMPLEX_ARGS + AA_IDP_ARGS,
"aa": COMMON_ARGS + COMPLEX_ARGS + AA_IDP_ARGS + AA_ARGS,
"pdp": COMMON_ARGS + COMPLEX_ARGS + PDP_ARGS,
"aq": COMMON_ARGS + COMPLEX_ARGS + AQ_ARGS,
}
# --------------- Logging stuff ---------------
LOG_LEVEL = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
LOG_HANDLER = {
"rotating": logging.handlers.RotatingFileHandler,
"syslog": logging.handlers.SysLogHandler,
"timerotate": logging.handlers.TimedRotatingFileHandler,
"memory": logging.handlers.MemoryHandler,
}
LOG_FORMAT = "%(asctime)s %(name)s:%(levelname)s %(message)s"
_RPA = [BINDING_HTTP_REDIRECT, BINDING_HTTP_POST, BINDING_HTTP_ARTIFACT]
_PRA = [BINDING_HTTP_POST, BINDING_HTTP_REDIRECT, BINDING_HTTP_ARTIFACT]
_SRPA = [BINDING_SOAP, BINDING_HTTP_REDIRECT, BINDING_HTTP_POST,
BINDING_HTTP_ARTIFACT]
PREFERRED_BINDING = {
"single_logout_service": _SRPA,
"manage_name_id_service": _SRPA,
"assertion_consumer_service": _PRA,
"single_sign_on_service": _RPA,
"name_id_mapping_service": [BINDING_SOAP],
"authn_query_service": [BINDING_SOAP],
"attribute_service": [BINDING_SOAP],
"authz_service": [BINDING_SOAP],
"assertion_id_request_service": [BINDING_URI],
"artifact_resolution_service": [BINDING_SOAP],
"attribute_consuming_service": _RPA
}
class ConfigurationError(SAMLError):
pass
# -----------------------------------------------------------------
class Config(object):
def_context = ""
def __init__(self, homedir="."):
self._homedir = homedir
self.entityid = None
self.xmlsec_binary = None
self.xmlsec_path = []
self.debug = False
self.key_file = None
self.key_file_passphrase = None
self.cert_file = None
self.encryption_type = 'both'
self.secret = None
self.accepted_time_diff = None
self.name = None
self.ca_certs = None
self.verify_ssl_cert = False
self.description = None
self.valid_for = None
self.organization = None
self.contact_person = None
self.name_form = None
self.name_id_format = None
self.virtual_organization = None
self.logger = None
self.only_use_keys_in_metadata = True
self.logout_requests_signed = None
self.disable_ssl_certificate_validation = None
self.context = ""
self.attribute_converters = None
self.metadata = None
self.policy = None
self.serves = []
self.vorg = {}
self.preferred_binding = PREFERRED_BINDING
self.domain = ""
self.name_qualifier = ""
self.entity_category = ""
self.crypto_backend = 'xmlsec1'
self.scope = ""
self.allow_unknown_attributes = False
self.allow_unsolicited = False
self.extension_schema = {}
self.cert_handler_extra_class = None
self.verify_encrypt_cert = None
self.generate_cert_func = None
self.generate_cert_info = None
self.tmp_cert_file = None
self.tmp_key_file = None
self.validate_certificate = None
self.extensions = {}
self.attribute = []
self.attribute_profile = []
def setattr(self, context, attr, val):
if context == "":
setattr(self, attr, val)
else:
setattr(self, "_%s_%s" % (context, attr), val)
def getattr(self, attr, context=None):
if context is None:
context = self.context
if context == "":
return getattr(self, attr, None)
else:
return getattr(self, "_%s_%s" % (context, attr), None)
def load_special(self, cnf, typ, metadata_construction=False):
for arg in SPEC[typ]:
try:
self.setattr(typ, arg, cnf[arg])
except KeyError:
pass
self.context = typ
self.load_complex(cnf, typ, metadata_construction=metadata_construction)
self.context = self.def_context
def load_complex(self, cnf, typ="", metadata_construction=False):
try:
self.setattr(typ, "policy", Policy(cnf["policy"]))
except KeyError:
pass
# for srv, spec in cnf["service"].items():
# try:
# self.setattr(srv, "policy",
# Policy(cnf["service"][srv]["policy"]))
# except KeyError:
# pass
try:
try:
acs = ac_factory(cnf["attribute_map_dir"])
except KeyError:
acs = ac_factory()
if not acs:
raise ConfigurationError(
"No attribute converters, something is wrong!!")
_acs = self.getattr("attribute_converters", typ)
if _acs:
_acs.extend(acs)
else:
self.setattr(typ, "attribute_converters", acs)
except KeyError:
pass
if not metadata_construction:
try:
self.setattr(typ, "metadata",
self.load_metadata(cnf["metadata"]))
except KeyError:
pass
def unicode_convert(self, item):
try:
return unicode(item, "utf-8")
except TypeError:
_uc = self.unicode_convert
if isinstance(item, dict):
return dict([(key, _uc(val)) for key, val in item.items()])
elif isinstance(item, list):
return [_uc(v) for v in item]
elif isinstance(item, tuple):
return tuple([_uc(v) for v in item])
else:
return item
def load(self, cnf, metadata_construction=False):
""" The base load method, loads the configuration
:param cnf: The configuration as a dictionary
:param metadata_construction: Is this only to be able to construct
metadata. If so some things can be left out.
:return: The Configuration instance
"""
_uc = self.unicode_convert
for arg in COMMON_ARGS:
if arg == "virtual_organization":
if "virtual_organization" in cnf:
for key, val in cnf["virtual_organization"].items():
self.vorg[key] = VirtualOrg(None, key, val)
continue
elif arg == "extension_schemas":
# List of filename of modules representing the schemas
if "extension_schemas" in cnf:
for mod_file in cnf["extension_schemas"]:
_mod = self._load(mod_file)
self.extension_schema[_mod.NAMESPACE] = _mod
try:
setattr(self, arg, _uc(cnf[arg]))
except KeyError:
pass
except TypeError: # Something that can't be a string
setattr(self, arg, cnf[arg])
if "service" in cnf:
for typ in ["aa", "idp", "sp", "pdp", "aq"]:
try:
self.load_special(
cnf["service"][typ], typ,
metadata_construction=metadata_construction)
self.serves.append(typ)
except KeyError:
pass
if "extensions" in cnf:
self.do_extensions(cnf["extensions"])
self.load_complex(cnf, metadata_construction=metadata_construction)
self.context = self.def_context
return self
def _load(self, fil):
head, tail = os.path.split(fil)
if head == "":
if sys.path[0] != ".":
sys.path.insert(0, ".")
else:
sys.path.insert(0, head)
return import_module(tail)
def load_file(self, config_file, metadata_construction=False):
if config_file.endswith(".py"):
config_file = config_file[:-3]
mod = self._load(config_file)
#return self.load(eval(open(config_file).read()))
return self.load(copy.deepcopy(mod.CONFIG), metadata_construction)
def load_metadata(self, metadata_conf):
""" Loads metadata into an internal structure """
acs = self.attribute_converters
if acs is None:
raise ConfigurationError(
"Missing attribute converter specification")
try:
ca_certs = self.ca_certs
except:
ca_certs = None
try:
disable_validation = self.disable_ssl_certificate_validation
except:
disable_validation = False
mds = MetadataStore(
ONTS.values(), acs, self, ca_certs,
disable_ssl_certificate_validation=disable_validation)
mds.imp(metadata_conf)
return mds
def endpoint(self, service, binding=None, context=None):
""" Goes through the list of endpoint specifications for the
given type of service and returns a list of endpoint that matches
the given binding. If no binding is given all endpoints available for
that service will be returned.
:param service: The service the endpoint should support
:param binding: The expected binding
:return: All the endpoints that matches the given restrictions
"""
spec = []
unspec = []
endps = self.getattr("endpoints", context)
if endps and service in endps:
for endpspec in endps[service]:
try:
endp, bind = endpspec
if binding is None or bind == binding:
spec.append(endp)
except ValueError:
unspec.append(endpspec)
if spec:
return spec
else:
return unspec
def log_handler(self):
try:
_logconf = self.logger
except KeyError:
return None
handler = None
for htyp in LOG_HANDLER:
if htyp in _logconf:
if htyp == "syslog":
args = _logconf[htyp]
if "socktype" in args:
import socket
if args["socktype"] == "dgram":
args["socktype"] = socket.SOCK_DGRAM
elif args["socktype"] == "stream":
args["socktype"] = socket.SOCK_STREAM
else:
raise ConfigurationError("Unknown socktype!")
try:
handler = LOG_HANDLER[htyp](**args)
except TypeError: # difference between 2.6 and 2.7
del args["socktype"]
handler = LOG_HANDLER[htyp](**args)
else:
handler = LOG_HANDLER[htyp](**_logconf[htyp])
break
if handler is None:
# default if rotating logger
handler = LOG_HANDLER["rotating"]()
if "format" in _logconf:
formatter = logging.Formatter(_logconf["format"])
else:
formatter = logging.Formatter(LOG_FORMAT)
handler.setFormatter(formatter)
return handler
def setup_logger(self):
if root_logger.level != logging.NOTSET: # Someone got there before me
return root_logger
_logconf = self.logger
if _logconf is None:
return root_logger
try:
root_logger.setLevel(LOG_LEVEL[_logconf["loglevel"].lower()])
except KeyError: # reasonable default
root_logger.setLevel(logging.INFO)
root_logger.addHandler(self.log_handler())
root_logger.info("Logging started")
return root_logger
def endpoint2service(self, endpoint, context=None):
endps = self.getattr("endpoints", context)
for service, specs in endps.items():
for endp, binding in specs:
if endp == endpoint:
return service, binding
return None, None
def do_extensions(self, extensions):
for key, val in extensions.items():
self.extensions[key] = val
class SPConfig(Config):
def_context = "sp"
def __init__(self):
Config.__init__(self)
def vo_conf(self, vo_name):
try:
return self.virtual_organization[vo_name]
except KeyError:
return None
def ecp_endpoint(self, ipaddress):
"""
Returns the entity ID of the IdP which the ECP client should talk to
:param ipaddress: The IP address of the user client
:return: IdP entity ID or None
"""
_ecp = self.getattr("ecp")
if _ecp:
for key, eid in _ecp.items():
if re.match(key, ipaddress):
return eid
return None
class IdPConfig(Config):
def_context = "idp"
def __init__(self):
Config.__init__(self)
def config_factory(typ, filename):
if typ == "sp":
conf = SPConfig().load_file(filename)
conf.context = typ
elif typ in ["aa", "idp", "pdp", "aq"]:
conf = IdPConfig().load_file(filename)
conf.context = typ
else:
conf = Config().load_file(filename)
conf.context = typ
return conf
| apache-2.0 | 6,093,781,510,750,108,000 | 28.569892 | 80 | 0.56103 | false |
gdw2/zim | tests/tags.py | 1 | 11770 | # -*- coding: utf-8 -*-
# Copyright 2011 Jaap Karssenberg <[email protected]>
import tests
import gtk
import pango
from zim.index import Index, IndexPath, IndexTag
from zim.notebook import Path
from zim.gui.pageindex import FGCOLOR_COL, \
EMPTY_COL, NAME_COL, PATH_COL, STYLE_COL
# Explicitly don't import * from pageindex, make clear what we re-use
from zim.config import ConfigDict
from zim.plugins.tags import *
@tests.slowTest
class TestTaggedPageTreeStore(tests.TestCase):
def setUp(self):
self.storeclass = TaggedPageTreeStore
self.viewclass = TagsPageTreeView
self.notebook = tests.new_notebook()
self.index = self.notebook.index
def runTest(self):
'''Test TaggedPageTreeStore index interface'''
# This is one big test instead of seperate sub tests because in the
# subclass we generate a file based notebook in setUp, and we do not
# want to do that many times.
# Hooking up the treeview as well just to see if we get any errors
# From the order the signals are generated.
ui = MockUI()
ui.notebook = self.notebook
ui.page = Path('Test:foo')
self.assertTrue(self.notebook.get_page(ui.page).exists())
treestore = self.storeclass(self.index)
self.assertEqual(treestore.get_flags(), 0)
self.assertEqual(treestore.get_n_columns(), 8)
treeview = self.viewclass(ui, treestore)
model = treeview.get_model()
if isinstance(model, gtk.TreeModelFilter):
model = model.get_model() # look inside filtered model
self.assertEqual(model, treestore)
self.assertEqual(treestore.get_flags(), 0)
self.assertEqual(treestore.get_n_columns(), 8)
self.index.update(callback=tests.gtk_process_events)
tests.gtk_process_events()
#~ treeview = PageTreeView(None) # just run hidden to check errors
#~ treeview.set_model(treestore)
n = treestore.on_iter_n_children(None)
self.assertTrue(n > 0)
n = treestore.iter_n_children(None)
self.assertTrue(n > 0)
for i in range(treestore.get_n_columns()):
self.assertTrue(not treestore.get_column_type(i) is None)
# Quick check for basic methods
iter = treestore.on_get_iter((0,))
self.assertTrue(isinstance(iter, (PageTreeIter, PageTreeTagIter)))
if self.storeclass is TaggedPageTreeStore:
self.assertTrue(isinstance(iter, PageTreeIter))
self.assertTrue(isinstance(iter.indexpath, IndexPath))
self.assertFalse(iter.indexpath.isroot)
else:
self.assertTrue(isinstance(iter, PageTreeTagIter))
self.assertTrue(isinstance(iter.indextag, IndexTag))
basename = treestore.on_get_value(iter, 0)
self.assertTrue(len(basename) > 0)
self.assertEqual(iter.treepath, (0,))
self.assertEqual(treestore.on_get_path(iter), (0,))
if self.storeclass is TaggedPageTreeStore:
self.assertEqual(treestore.get_treepath(iter.indexpath), (0,))
self.assertEqual(treestore.get_treepath(Path(iter.indexpath.name)), (0,))
else:
self.assertEqual(treestore.get_treepath(iter.indextag), (0,))
iter2 = treestore.on_iter_children(None)
if self.storeclass is TaggedPageTreeStore:
self.assertEqual(iter2.indexpath, iter.indexpath)
else:
self.assertEqual(iter2.indextag, iter.indextag)
self.assertTrue(treestore.on_get_iter((20,20,20,20,20)) is None)
self.assertTrue(treestore.get_treepath(Path('nonexisting')) is None)
self.assertRaises(ValueError, treestore.get_treepath, Path(':'))
# Now walk through the whole tree testing the API
nitems = 0
path = (0,)
prevpath = None
while path:
#~ print 'PATH', path
assert path != prevpath, 'Prevent infinite loop'
nitems += 1
prevpath = path
iter = treestore.get_iter(path)
self.assertEqual(treestore.get_path(iter), tuple(path))
if isinstance(treestore.on_get_iter(path), PageTreeIter):
self._check_indexpath_iter(treestore, iter, path)
else:
self._check_indextag_iter(treestore, iter, path)
# Determine how to continue
if treestore.iter_has_child(iter):
path = path + (0,)
else:
path = path[:-1] + (path[-1]+1,) # increase last member
while path:
try:
treestore.get_iter(path)
except ValueError:
path = path[:-1]
if len(path):
path = path[:-1] + (path[-1]+1,) # increase last member
else:
break
self.assertTrue(nitems > 10) # double check sanity of loop
# Check if all the signals go OK
treestore.disconnect_index()
del treestore
self.index.flush()
treestore = self.storeclass(self.index)
treeview = TagsPageTreeView(ui, treestore)
self.index.update(callback=tests.gtk_process_events)
# Try some TreeView methods
path = Path('Test:foo')
self.assertTrue(treeview.select_page(path))
self.assertEqual(treeview.get_selected_path(), path)
treepath = treeview.get_model().get_treepath(path)
self.assertTrue(not treepath is None)
col = treeview.get_column(0)
treeview.row_activated(treepath, col)
#~ treeview.emit('popup-menu')
treeview.emit('insert-link', path)
treeview.emit('copy')
# Check if all the signals go OK in delete
for page in reversed(list(self.notebook.walk())): # delete bottom up
self.notebook.delete_page(page)
tests.gtk_process_events()
def _check_indexpath_iter(self, treestore, iter, path):
# checks specific for nodes that map to IndexPath object
indexpath = treestore.get_indexpath(iter)
self.assertTrue(path in treestore.get_treepaths(indexpath))
page = self.notebook.get_page(indexpath)
self.assertEqual(treestore.get_value(iter, NAME_COL), page.basename)
self.assertEqual(treestore.get_value(iter, PATH_COL), page)
if page.hascontent or page.haschildren:
self.assertEqual(treestore.get_value(iter, EMPTY_COL), False)
self.assertEqual(treestore.get_value(iter, STYLE_COL), pango.STYLE_NORMAL)
self.assertEqual(treestore.get_value(iter, FGCOLOR_COL), treestore.NORMAL_COLOR)
else:
self.assertEqual(treestore.get_value(iter, EMPTY_COL), True)
self.assertEqual(treestore.get_value(iter, STYLE_COL), pango.STYLE_ITALIC)
self.assertEqual(treestore.get_value(iter, FGCOLOR_COL), treestore.EMPTY_COLOR)
self._check_iter_children(treestore, iter, path, indexpath.haschildren)
def _check_indextag_iter(self, treestore, iter, path):
# checks specific for nodes that map to IndexTag object
self.assertTrue(treestore.get_indexpath(iter) is None)
indextag = treestore.get_indextag(iter)
self.assertTrue(path in treestore.get_treepaths(indextag))
self.assertEqual(treestore.get_value(iter, NAME_COL), indextag.name)
self.assertEqual(treestore.get_value(iter, PATH_COL), indextag)
if indextag == treestore.untagged:
self.assertEqual(treestore.get_value(iter, EMPTY_COL), True)
self.assertEqual(treestore.get_value(iter, STYLE_COL), pango.STYLE_ITALIC)
self.assertEqual(treestore.get_value(iter, FGCOLOR_COL), treestore.EMPTY_COLOR)
else:
self.assertEqual(treestore.get_value(iter, EMPTY_COL), False)
self.assertEqual(treestore.get_value(iter, STYLE_COL), pango.STYLE_NORMAL)
self.assertEqual(treestore.get_value(iter, FGCOLOR_COL), treestore.NORMAL_COLOR)
if indextag == treestore.untagged:
haschildren = self.index.n_list_untagged_root_pages() > 0
else:
haschildren = self.index.n_list_tagged_pages(indextag) > 0
self._check_iter_children(treestore, iter, path, haschildren)
def _check_iter_children(self, treestore, iter, path, haschildren):
# Check API for children is consistent
if haschildren:
self.assertTrue(treestore.iter_has_child(iter))
child = treestore.iter_children(iter)
self.assertTrue(not child is None)
child = treestore.iter_nth_child(iter, 0)
self.assertTrue(not child is None)
parent = treestore.iter_parent(child)
self.assertEqual(treestore.get_path(parent), path)
childpath = treestore.get_path(child)
self.assertEqual(
childpath, tuple(path) + (0,))
n = treestore.iter_n_children(iter)
for i in range(1, n):
child = treestore.iter_next(child)
childpath = treestore.get_path(child)
self.assertEqual(
childpath, tuple(path) + (i,))
child = treestore.iter_next(child)
self.assertTrue(child is None)
else:
self.assertTrue(not treestore.iter_has_child(iter))
child = treestore.iter_children(iter)
self.assertTrue(child is None)
child = treestore.iter_nth_child(iter, 0)
self.assertTrue(child is None)
@tests.slowTest
class TestTagsPageTreeStore(TestTaggedPageTreeStore):
def setUp(self):
TestTaggedPageTreeStore.setUp(self)
self.storeclass = TagsPageTreeStore
self.viewclass = TagsPageTreeView
def runTest(self):
'''Test TagsPageTreeStore index interface'''
TestTaggedPageTreeStore.runTest(self)
@tests.slowTest
class TestTagPluginWidget(tests.TestCase):
def runTest(self):
ui = MockUI()
ui.notebook = tests.new_notebook()
uistate = ConfigDict()
widget = TagsPluginWidget(ui.notebook.index, uistate, ui)
# Excersize all model switches and check we still have a sane state
widget.toggle_treeview()
widget.toggle_treeview()
path = Path('Test:foo')
treepath = widget.treeview.get_model().get_treepath(path)
self.assertTrue(not treepath is None)
widget.disconnect_model()
widget.reload_model()
path = Path('Test:foo')
treepath = widget.treeview.get_model().get_treepath(path)
self.assertTrue(not treepath is None)
# Check signals
#~ widget.treeview.emit('popup-menu')
widget.treeview.emit('insert-link', path)
# Check tag filtering
cloud = widget.tagcloud
self.assertEqual(cloud.get_tag_filter(), None)
tag = None
for button in cloud.get_children():
if button.indextag.name == 'tags':
tag = button.indextag
button.clicked()
break
else:
raise AssertionError, 'No button for @tags ?'
selected, filtered = cloud.get_tag_filter()
self.assertEqual(selected, [tag])
self.assertTrue(len(filtered) > 3)
self.assertTrue(tag in filtered)
self.assertTrue(not widget.treeview._tag_filter is None)
# check filtering in treestore
tagfilter = (selected, filtered)
selected = frozenset(selected)
filtered = frozenset(filtered)
def toplevel(model):
iter = model.get_iter_first()
assert not iter is None
while not iter is None:
yield iter
iter = model.iter_next(iter)
def childiter(model, iter):
iter = model.iter_children(iter)
assert not iter is None
while not iter is None:
yield iter
iter = model.iter_next(iter)
self.assertEqual(uistate['treeview'], 'tagged')
filteredmodel = widget.treeview.get_model()
for iter in toplevel(filteredmodel):
path = filteredmodel.get_indexpath(iter)
self.assertTrue(not path is None)
tags = list(ui.notebook.index.list_tags(path))
tags = frozenset(tags)
self.assertTrue(selected.issubset(tags)) # Needs to contains selected tags
self.assertTrue(tags.issubset(filtered)) # All other tags should be in the filter selection
treepaths = filteredmodel.get_treepaths(path)
self.assertTrue(filteredmodel.get_path(iter) in treepaths)
widget.toggle_treeview()
self.assertEqual(uistate['treeview'], 'tags')
filteredmodel = widget.treeview.get_model()
for iter in toplevel(filteredmodel):
self.assertEqual(filteredmodel.get_indexpath(iter), None)
# toplevel has tags, not pages
tag = filteredmodel[iter][PATH_COL]
self.assertTrue(tag in filtered)
for iter in childiter(filteredmodel, iter):
path = filteredmodel.get_indexpath(iter)
self.assertTrue(not path is None)
tags = list(ui.notebook.index.list_tags(path))
tags = frozenset(tags)
self.assertTrue(selected.issubset(tags)) # Needs to contains selected tags
self.assertTrue(tags.issubset(filtered)) # All other tags should be in the filter selection
treepaths = filteredmodel.get_treepaths(path)
self.assertTrue(filteredmodel.get_path(iter) in treepaths)
class MockUI(tests.MockObject):
page = None
notebook = None
| gpl-2.0 | -3,791,806,458,005,445,000 | 33.314869 | 95 | 0.729227 | false |
pipermerriam/django | tests/gis_tests/gdal_tests/test_geom.py | 256 | 20748 | import json
import unittest
from binascii import b2a_hex
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.utils.six.moves import range
from ..test_data import TestDataMixin
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
if HAS_GDAL:
from django.contrib.gis.gdal import (OGRGeometry, OGRGeomType,
GDALException, OGRIndexError, SpatialReference, CoordTransform,
GDAL_VERSION)
@skipUnless(HAS_GDAL, "GDAL is required")
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
OGRGeomType(1)
OGRGeomType(7)
OGRGeomType('point')
OGRGeomType('GeometrycollectioN')
OGRGeomType('LINearrING')
OGRGeomType('Unknown')
# Should throw TypeError on this input
self.assertRaises(GDALException, OGRGeomType, 23)
self.assertRaises(GDALException, OGRGeomType, 'fooD')
self.assertRaises(GDALException, OGRGeomType, 9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(OGRGeomType(1), OGRGeomType(1))
self.assertEqual(OGRGeomType(7), 'GeometryCollection')
self.assertEqual(OGRGeomType('point'), 'POINT')
self.assertNotEqual(OGRGeomType('point'), 2)
self.assertEqual(OGRGeomType('unknown'), 0)
self.assertEqual(OGRGeomType(6), 'MULtiPolyGON')
self.assertEqual(OGRGeomType(1), OGRGeomType('point'))
self.assertNotEqual(OGRGeomType('POINT'), OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Geometry').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertIsNone(OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertEqual(OGRGeomType(wkb25bit + 1), 'Point25D')
self.assertEqual(OGRGeomType('MultiLineString25D'), (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
if GDAL_VERSION >= (1, 8):
# In GDAL 1.8, the non-conformant GML tag <gml:GeometryCollection> was
# replaced with <gml:MultiGeometry>.
exp_gml = exp_gml.replace('GeometryCollection', 'MultiGeometry')
self.assertEqual(exp_gml, geom.gml)
def test_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex.encode(), geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test_wkb(self):
"Testing WKB input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex.encode())
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test_json(self):
"Testing GeoJSON input/output."
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
# Test input with some garbage content (but valid json) (#15529)
geom = OGRGeometry('{"type": "Point", "coordinates": [ 100.0, 0.0 ], "other": "<test>"}')
self.assertIsInstance(geom, OGRGeometry)
def test_points(self):
"Testing Point objects."
OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(linestr, OGRGeometry(ls.wkt))
self.assertNotEqual(linestr, prev)
self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(mlinestr, OGRGeometry(mls.wkt))
self.assertNotEqual(mlinestr, prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr))
def test_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
# self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(lr, OGRGeometry(rr.wkt))
self.assertNotEqual(lr, prev)
prev = lr
def test_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = OGRGeometry.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(poly, OGRGeometry(p.wkt))
self.assertNotEqual(poly, prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
with self.assertRaises(GDALException):
poly.centroid
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test_multipolygons(self):
"Testing MultiPolygon objects."
OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolygon after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
def test_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertTrue(a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
self.assertRaises(GDALException, mp.add, pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3):
self.assertEqual(mpoly, tmp)
def test_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test_pickle(self):
"Testing pickle support."
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = pickle.loads(pickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
def test_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertIsNotNone(OGRGeometry('POINT(0 0)'))
self.assertNotEqual(OGRGeometry('LINESTRING(0 0, 1 1)'), 3)
| bsd-3-clause | 5,070,150,641,486,555,000 | 41.342857 | 97 | 0.601407 | false |
dbbhattacharya/kitsune | vendor/packages/sqlalchemy/lib/sqlalchemy/test/requires.py | 6 | 9872 | """Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
"""
from testing import \
_block_unconditionally as no_support, \
_chain_decorators_on, \
exclude, \
emits_warning_on,\
skip_if,\
fails_on,\
fails_on_everything_except
import testing
import sys
def deferrable_constraints(fn):
"""Target database must support derferable constraints."""
return _chain_decorators_on(
fn,
no_support('firebird', 'not supported by database'),
no_support('mysql', 'not supported by database'),
no_support('mssql', 'not supported by database'),
)
def foreign_keys(fn):
"""Target database must support foreign keys."""
return _chain_decorators_on(
fn,
no_support('sqlite', 'not supported by database'),
)
def unbounded_varchar(fn):
"""Target database must support VARCHAR with no length"""
return _chain_decorators_on(
fn,
no_support('firebird', 'not supported by database'),
no_support('oracle', 'not supported by database'),
no_support('mysql', 'not supported by database'),
)
def boolean_col_expressions(fn):
"""Target database must support boolean expressions as columns"""
return _chain_decorators_on(
fn,
no_support('firebird', 'not supported by database'),
no_support('oracle', 'not supported by database'),
no_support('mssql', 'not supported by database'),
no_support('sybase', 'not supported by database'),
no_support('maxdb', 'FIXME: verify not supported by database'),
)
def identity(fn):
"""Target database must support GENERATED AS IDENTITY or a facsimile.
Includes GENERATED AS IDENTITY, AUTOINCREMENT, AUTO_INCREMENT, or other
column DDL feature that fills in a DB-generated identifier at INSERT-time
without requiring pre-execution of a SEQUENCE or other artifact.
"""
return _chain_decorators_on(
fn,
no_support('firebird', 'not supported by database'),
no_support('oracle', 'not supported by database'),
no_support('postgresql', 'not supported by database'),
no_support('sybase', 'not supported by database'),
)
def independent_cursors(fn):
"""Target must support simultaneous, independent database cursors on a single connection."""
return _chain_decorators_on(
fn,
no_support('mssql+pyodbc', 'no driver support'),
no_support('mssql+mxodbc', 'no driver support'),
)
def independent_connections(fn):
"""Target must support simultaneous, independent database connections."""
# This is also true of some configurations of UnixODBC and probably win32
# ODBC as well.
return _chain_decorators_on(
fn,
no_support('sqlite', 'no driver support'),
exclude('mssql', '<', (9, 0, 0),
'SQL Server 2005+ is required for independent connections'),
)
def row_triggers(fn):
"""Target must support standard statement-running EACH ROW triggers."""
return _chain_decorators_on(
fn,
# no access to same table
no_support('mysql', 'requires SUPER priv'),
exclude('mysql', '<', (5, 0, 10), 'not supported by database'),
# huh? TODO: implement triggers for PG tests, remove this
no_support('postgresql', 'PG triggers need to be implemented for tests'),
)
def correlated_outer_joins(fn):
"""Target must support an outer join to a subquery which correlates to the parent."""
return _chain_decorators_on(
fn,
no_support('oracle', 'Raises "ORA-01799: a column may not be outer-joined to a subquery"')
)
def savepoints(fn):
"""Target database must support savepoints."""
return _chain_decorators_on(
fn,
emits_warning_on('mssql', 'Savepoint support in mssql is experimental and may lead to data loss.'),
no_support('access', 'not supported by database'),
no_support('sqlite', 'not supported by database'),
no_support('sybase', 'FIXME: guessing, needs confirmation'),
exclude('mysql', '<', (5, 0, 3), 'not supported by database'),
)
def denormalized_names(fn):
"""Target database must have 'denormalized', i.e. UPPERCASE as case insensitive names."""
return skip_if(
lambda: not testing.db.dialect.requires_name_normalize,
"Backend does not require denomralized names."
)(fn)
def schemas(fn):
"""Target database must support external schemas, and have one named 'test_schema'."""
return _chain_decorators_on(
fn,
no_support('sqlite', 'no schema support'),
no_support('firebird', 'no schema support')
)
def sequences(fn):
"""Target database must support SEQUENCEs."""
return _chain_decorators_on(
fn,
no_support('access', 'no SEQUENCE support'),
no_support('mssql', 'no SEQUENCE support'),
no_support('mysql', 'no SEQUENCE support'),
no_support('sqlite', 'no SEQUENCE support'),
no_support('sybase', 'no SEQUENCE support'),
)
def update_nowait(fn):
"""Target database must support SELECT...FOR UPDATE NOWAIT"""
return _chain_decorators_on(
fn,
no_support('access', 'no FOR UPDATE NOWAIT support'),
no_support('firebird', 'no FOR UPDATE NOWAIT support'),
no_support('mssql', 'no FOR UPDATE NOWAIT support'),
no_support('mysql', 'no FOR UPDATE NOWAIT support'),
no_support('sqlite', 'no FOR UPDATE NOWAIT support'),
no_support('sybase', 'no FOR UPDATE NOWAIT support'),
)
def subqueries(fn):
"""Target database must support subqueries."""
return _chain_decorators_on(
fn,
exclude('mysql', '<', (4, 1, 1), 'no subquery support'),
)
def intersect(fn):
"""Target database must support INTERSECT or equivlaent."""
return _chain_decorators_on(
fn,
fails_on('firebird', 'no support for INTERSECT'),
fails_on('mysql', 'no support for INTERSECT'),
fails_on('sybase', 'no support for INTERSECT'),
)
def except_(fn):
"""Target database must support EXCEPT or equivlaent (i.e. MINUS)."""
return _chain_decorators_on(
fn,
fails_on('firebird', 'no support for EXCEPT'),
fails_on('mysql', 'no support for EXCEPT'),
fails_on('sybase', 'no support for EXCEPT'),
)
def offset(fn):
"""Target database must support some method of adding OFFSET or equivalent to a result set."""
return _chain_decorators_on(
fn,
fails_on('sybase', 'no support for OFFSET or equivalent'),
)
def returning(fn):
return _chain_decorators_on(
fn,
no_support('access', 'not supported by database'),
no_support('sqlite', 'not supported by database'),
no_support('mysql', 'not supported by database'),
no_support('maxdb', 'not supported by database'),
no_support('sybase', 'not supported by database'),
no_support('informix', 'not supported by database'),
)
def two_phase_transactions(fn):
"""Target database must support two-phase transactions."""
return _chain_decorators_on(
fn,
no_support('access', 'not supported by database'),
no_support('firebird', 'no SA implementation'),
no_support('maxdb', 'not supported by database'),
no_support('mssql', 'FIXME: guessing, needs confirmation'),
no_support('oracle', 'no SA implementation'),
no_support('sqlite', 'not supported by database'),
no_support('sybase', 'FIXME: guessing, needs confirmation'),
no_support('postgresql+zxjdbc', 'FIXME: JDBC driver confuses the transaction state, may '
'need separate XA implementation'),
exclude('mysql', '<', (5, 0, 3), 'not supported by database'),
)
def unicode_connections(fn):
"""Target driver must support some encoding of Unicode across the wire."""
# TODO: expand to exclude MySQLdb versions w/ broken unicode
return _chain_decorators_on(
fn,
exclude('mysql', '<', (4, 1, 1), 'no unicode connection support'),
)
def unicode_ddl(fn):
"""Target driver must support some encoding of Unicode across the wire."""
# TODO: expand to exclude MySQLdb versions w/ broken unicode
return _chain_decorators_on(
fn,
no_support('maxdb', 'database support flakey'),
no_support('oracle', 'FIXME: no support in database?'),
no_support('sybase', 'FIXME: guessing, needs confirmation'),
no_support('mssql+pymssql', 'no FreeTDS support'),
exclude('mysql', '<', (4, 1, 1), 'no unicode connection support'),
)
def sane_rowcount(fn):
return _chain_decorators_on(
fn,
skip_if(lambda: not testing.db.dialect.supports_sane_rowcount)
)
def sane_multi_rowcount(fn):
return _chain_decorators_on(
fn,
skip_if(lambda: not testing.db.dialect.supports_sane_multi_rowcount)
)
def reflects_pk_names(fn):
"""Target driver reflects the name of primary key constraints."""
return _chain_decorators_on(
fn,
fails_on_everything_except('postgresql', 'oracle')
)
def python2(fn):
return _chain_decorators_on(
fn,
skip_if(
lambda: sys.version_info >= (3,),
"Python version 2.xx is required."
)
)
def _has_sqlite():
from sqlalchemy import create_engine
try:
e = create_engine('sqlite://')
return True
except ImportError:
return False
def sqlite(fn):
return _chain_decorators_on(
fn,
skip_if(lambda: not _has_sqlite())
)
| bsd-3-clause | 2,485,461,649,200,191,500 | 33.638596 | 107 | 0.623278 | false |
andreparrish/python-for-android | python3-alpha/python3-src/Lib/test/test_list.py | 53 | 2600 | import sys
from test import support, list_tests
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
if sys.maxsize == 0x7fffffff:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, range(sys.maxsize // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_truth(self):
super().test_truth()
self.assertTrue(not [])
self.assertTrue([42])
def test_identity(self):
self.assertTrue([] is not [])
def test_len(self):
super().test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxsize*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_main(verbose=None):
support.run_unittest(ListTest)
# verify reference counting
import sys
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(ListTest)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
| apache-2.0 | -6,712,079,117,923,703,000 | 32.333333 | 77 | 0.559231 | false |
google/grr | grr/core/grr_response_core/lib/registry.py | 1 | 5649 | #!/usr/bin/env python
"""This is the GRR class registry.
A central place responsible for registering plugins. Any class can have plugins
if it defines __metaclass__ = MetaclassRegistry. Any derived class from this
baseclass will have the member classes as a dict containing class name by key
and class as value.
"""
# The following are abstract base classes
import abc
# Metaclasses confuse the linter so: pylint: disable=no-value-for-parameter
class MetaclassRegistry(abc.ABCMeta):
"""Automatic Plugin Registration through metaclasses."""
def IsAbstract(cls):
# Abstract classes should not be registered. We define classes as abstract
# by giving them the __abstract attribute (this is not inheritable) or by
# naming them Abstract<ClassName>.
abstract_attribute = "_%s__abstract" % cls.__name__
return (cls.__name__.startswith("Abstract") or
hasattr(cls, abstract_attribute))
def __init__(cls, name, bases, env_dict):
abc.ABCMeta.__init__(cls, name, bases, env_dict)
if not cls.IsAbstract():
# Attach the classes dict to the baseclass and have all derived classes
# use the same one:
for base in bases:
try:
cls.classes = base.classes
cls.classes_by_name = base.classes_by_name
cls.plugin_feature = base.plugin_feature
cls.top_level_class = base.top_level_class
break
except AttributeError:
pass
try:
if cls.classes and cls.__name__ in cls.classes:
raise RuntimeError("Duplicate names for registered classes: %s, %s" %
(cls, cls.classes[cls.__name__]))
cls.classes[cls.__name__] = cls
cls.classes_by_name[getattr(cls, "name", None)] = cls
except AttributeError:
cls.classes = {cls.__name__: cls}
cls.classes_by_name = {getattr(cls, "name", None): cls}
cls.plugin_feature = cls.__name__
# Keep a reference to the top level class
cls.top_level_class = cls
else:
# Abstract classes should still have all the metadata attributes
# registered.
for base in bases:
try:
cls.classes = base.classes
cls.classes_by_name = base.classes_by_name
break
except AttributeError:
pass
if not hasattr(cls, "classes"):
cls.classes = {}
if not hasattr(cls, "classes_by_name"):
cls.classes_by_name = {}
def GetPlugin(cls, name):
"""Return the class of the implementation that carries that name.
Args:
name: The name of the plugin to return.
Raises:
KeyError: If the plugin does not exist.
Returns:
A the registered class referred to by the name.
"""
return cls.classes[name]
class EventRegistry(MetaclassRegistry):
"""Event registry."""
EVENT_NAME_MAP = {}
EVENTS = []
def __init__(cls, name, bases, env_dict):
MetaclassRegistry.__init__(cls, name, bases, env_dict)
if not cls.IsAbstract():
# Register ourselves as listeners for the events in cls.EVENTS.
for ev in cls.EVENTS:
EventRegistry.EVENT_NAME_MAP.setdefault(ev, set()).add(cls)
class AFF4FlowRegistry(MetaclassRegistry):
"""A dedicated registry that only contains flows."""
FLOW_REGISTRY = {}
def __init__(cls, name, bases, env_dict):
MetaclassRegistry.__init__(cls, name, bases, env_dict)
if not cls.IsAbstract():
cls.FLOW_REGISTRY[name] = cls
@classmethod
def FlowClassByName(mcs, flow_name):
flow_cls = mcs.FLOW_REGISTRY.get(flow_name)
if flow_cls is None:
raise ValueError("Flow '%s' not known." % flow_name)
return flow_cls
class FlowRegistry(MetaclassRegistry):
"""A dedicated registry that only contains new style flows."""
FLOW_REGISTRY = {}
def __init__(cls, name, bases, env_dict):
MetaclassRegistry.__init__(cls, name, bases, env_dict)
if not cls.IsAbstract():
cls.FLOW_REGISTRY[name] = cls
@classmethod
def FlowClassByName(mcs, flow_name):
flow_cls = mcs.FLOW_REGISTRY.get(flow_name)
if flow_cls is None:
raise ValueError("Flow '%s' not known." % flow_name)
return flow_cls
class CronJobRegistry(MetaclassRegistry):
"""A dedicated registry that only contains cron jobs."""
CRON_REGISTRY = {}
def __init__(cls, name, bases, env_dict):
MetaclassRegistry.__init__(cls, name, bases, env_dict)
if not cls.IsAbstract():
cls.CRON_REGISTRY[name] = cls
@classmethod
def CronJobClassByName(mcs, job_name):
job_cls = mcs.CRON_REGISTRY.get(job_name)
if job_cls is None:
raise ValueError("CronJob '%s' not known." % job_name)
return job_cls
class SystemCronJobRegistry(CronJobRegistry):
"""A dedicated registry that only contains cron jobs."""
SYSTEM_CRON_REGISTRY = {}
def __init__(cls, name, bases, env_dict):
super(SystemCronJobRegistry, cls).__init__(name, bases, env_dict)
if not cls.IsAbstract():
cls.SYSTEM_CRON_REGISTRY[name] = cls
@classmethod
def CronJobClassByName(mcs, job_name):
job_cls = mcs.SYSTEM_CRON_REGISTRY.get(job_name)
if job_cls is None:
raise ValueError("CronJob '%s' not known." % job_name)
return job_cls
class OutputPluginRegistry(MetaclassRegistry):
"""A dedicated registry that only contains output plugins."""
PLUGIN_REGISTRY = {}
def __init__(cls, name, bases, env_dict):
MetaclassRegistry.__init__(cls, name, bases, env_dict)
if not cls.IsAbstract():
cls.PLUGIN_REGISTRY[name] = cls
@classmethod
def PluginClassByName(mcs, plugin_name):
return mcs.PLUGIN_REGISTRY.get(plugin_name)
| apache-2.0 | -7,348,031,194,561,359,000 | 27.530303 | 79 | 0.652151 | false |
rwgdrummer/maskgen | maskgen/software_loader.py | 1 | 28738 | # =============================================================================
# Authors: PAR Government
# Organization: DARPA
#
# Copyright (c) 2016 PAR Government
# All rights reserved.
# ==============================================================================
import json
import logging
import os
from json import JSONEncoder
from maskgen.config import global_config
from maskgen_loader import MaskGenLoader
from maskgen.support import getValue
class OperationEncoder(JSONEncoder):
def default(self, o):
return o.__dict__
def strip_version(version):
return '.'.join(version.split('.')[:2]) if version is not None else ''
def getFileName(fileName, path=None):
import sys
if (os.path.exists(fileName)):
logging.getLogger('maskgen').info( 'Loading ' + fileName)
return fileName
places = [os.getenv('MASKGEN_RESOURCES', 'resources')]
places.extend([os.path.join(x,'resources') for x in sys.path if 'maskgen' in x or
(path is not None and path in x)])
for place in places:
newNanme = os.path.abspath(os.path.join(place, fileName))
if os.path.exists(newNanme):
logging.getLogger('maskgen').info( 'Loading ' + newNanme)
return newNanme
def extract_default_values(operation_arguments):
"""
given argument definitions, return operation name: default value if default is present
:param operation_arguments:
:return:
@type dict
"""
return {k:v['defaultvalue'] for k,v in operation_arguments.iteritems() if 'defaultvalue' in v}
class ProjectProperty:
description = None
name = None
type = None
operations = None
parameter = None
rule = None
values = None
value = None
information = None
semanticgroup = False
node = False
readonly = False
mandatory= False
nodetype = None
defaultvalue = None
"""
@type operations: list of str
@type nodetype: str
"""
def __init__(self, name='', type='', operations=None, parameter=None, description=None,
information=None, value=None, values=None, rule=None, node=False, readonly=False,mandatory=True,
nodetype=None,semanticgroup=False,defaultvalue = None,includedonors=False):
self.name = name
self.type = type
self.operations = operations
self.parameter = parameter
self.description = description
self.rule = rule
self.values = values
self.value = value
self.information = information
self.node = node
self.readonly = readonly
self.mandatory = mandatory
self.nodetype = nodetype
self.semanticgroup = semanticgroup
self.defaultvalue = defaultvalue
self.includedonors = includedonors
class Operation:
name = None
category = None
includeInMask = {'default':False}
description = None
optionalparameters = {}
mandatoryparameters = {}
rules = []
analysisOperations = []
transitions = []
compareparameters = {}
generateMask = "all"
groupedOperations = None
groupedCategories = None
maskTransformFunction = None
compareOperations = None
parameter_dependencies = None
donor_processor = None
"""
parameter_dependencies is a dictionary: { 'parameter name' : { 'parameter value' : 'dependenent parameter name'}}
If the parameter identitied by parameter name has a value if 'parameter value' then the parameter identified by
'dependent parameter name' is required.
compareparamaters are used to pick arguments and algorithms for link comparison and analysis functions.
Examples:
"function" :"maskgen.tool_set.cropCompare",
"video_function": "maskgen.video_tools.cropCompare"
"tolerance" : 0.0001
maskTransformFunction is a dictionary of functions associated with type of media which determines the
transformation function applied to a mask as it is re-alligned to the final or base image for composite or
donor mask construction, respectively. Examples:
"image": "maskgen.mask_rules.crop_transform",
"video":"maskgen.mask_rules.video_crop_transform"
rules is a list of functions to apply to each link during validation. The signature of each of function
is (op, graph, frm, to)
op = Operation
graph = maskgen.image_graph.ImageGraph
frm = str source node id
to = str targe node id
transitions is a list of string of the format source type '.' target type.
The types identify media types (e.g. audio, video ,zip and image). The transition identifies
allowed transitions supported by the specific operation. For example, 'video.image' states that the
associated operation can convert a video to an image.
generateMask states whether an operation analysis requires mask generation for 'all', 'frames', 'meta' or None.
For the moment, all and frames are the same thing: frames and meta data is collected for each link comparing source
and target media. generateMask currently only applies to video and audio.
analysisOperations is a list of function names that are used to populate the analysis dictionary collected at link
comparison time. Analysis can find transform matrices, shape changes, location identification, etc.
The results of analysis are often used by maskTransformFunction functions to construct composite and donor masks,
acting as the transform parameters.
groupedOperations and groupedCategories are lists of operations and categories represented by an agglomerative/composite
operation.
@type category: str
@type generateMask: tr
@type name: str
@type rules: list
@type transitions : list
@type description: str
@type analysisOperations: list
@type mandatoryparameters: dict
@type optionalparameters: dict
@type compareparameters: dict
@type parameter_dependencies: dict
@type maskTransformFunction:dict
@type donor_processor: str
"""
def __init__(self, name='', category='', includeInMask={"default": False}, rules=list(), optionalparameters=dict(),
mandatoryparameters=dict(), description=None, analysisOperations=list(), transitions=list(),
compareparameters=dict(),generateMask = "all",groupedOperations=None, groupedCategories = None,
maskTransformFunction=None,parameter_dependencies = None, qaList=None,donor_processor=None):
self.name = name
self.category = category
self.includeInMask = includeInMask
self.rules = rules
self.mandatoryparameters = mandatoryparameters if mandatoryparameters is not None else {}
self.optionalparameters = optionalparameters if optionalparameters is not None else {}
self.description = description
self.analysisOperations = analysisOperations
self.transitions = transitions
self.compareparameters = compareparameters
self.generateMask = generateMask
self.groupedOperations = groupedOperations
self.groupedCategories = groupedCategories
self.maskTransformFunction = maskTransformFunction
self.parameter_dependencies = parameter_dependencies
self.qaList = qaList
self.donor_processor = donor_processor
self.trigger_arguments = self._getTriggerUpdateArguments()
def _getTriggerUpdateArguments(self):
names = set()
for k,v in self.mandatoryparameters.iteritems():
if getValue(v,'trigger mask',False):
names.add(k)
for k,v in self.optionalparameters.iteritems():
if getValue(v,'trigger mask',False):
names.add(k)
return names
def getTriggerUpdateArguments(self):
return self.trigger_arguments
def recordMaskInComposite(self,filetype):
if filetype in self.includeInMask :
return 'yes' if self.includeInMask [filetype] else 'no'
if 'default' in self.includeInMask :
return 'yes' if self.includeInMask ['default'] else 'no'
return 'no'
def getParameterValuesForType(self, param_name, ftype, default_value=[]):
param = getValue(self.mandatoryparameters, param_name, getValue(self.optionalparameters, param_name,
{}))
return getValue(param, ftype + ':values', getValue(param, 'values', default_value), default_value)
def getDonorProcessor(self, default_processor = None):
if self.donor_processor is not None:
return getRule(self.donor_processor)
return getRule(default_processor)
def getConvertFunction(self):
if 'convert_function' in self.compareparameters:
funcName = self.compareparameters['convert_function']
return getRule(funcName)
return None
def getCompareFunction(self):
if 'function' in self.compareparameters:
funcName = self.compareparameters['function']
return getRule(funcName)
return None
def getVideoCompareFunction(self):
if 'video_function' in self.compareparameters:
funcName = self.compareparameters['video_function']
return getRule(funcName)
return None
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def getOperation(name, fake = False, warning=True):
"""
:param name: name of the operation
:param fake: Set to True to allow fake operations
:return: Operation
"""
if name == 'Donor':
return Operation(name='Donor', category='Donor',maskTransformFunction=
{'image':'maskgen.mask_rules.donor',
'video':'maskgen.mask_rules.video_donor',
'audio': 'maskgen.mask_rules.audio_donor',
})
if name not in getMetDataLoader().operations:
root_name = name.split('::')[0]
if root_name == name:
if warning:
logging.getLogger('maskgen').warning( 'Requested missing operation ' + str(name))
else:
return getOperation(root_name,fake=fake,warning=warning)
return getMetDataLoader().operations[name] if name in getMetDataLoader().operations else (Operation(name='name', category='Bad') if fake else None)
def getOperations():
return getMetDataLoader().operations
def getOperationsByCategory(sourcetype, targettype):
result = {}
transition = sourcetype + '.' + targettype
for name, op in getMetDataLoader().operations.iteritems():
if transition in op.transitions:
if op.category not in result:
result[op.category] = []
result[op.category].append(op.name)
return result
def getPropertiesBySourceType(source):
return getMetDataLoader().node_properties[source]
def getSoftwareSet():
return getMetDataLoader().software_set
def saveJSON(filename):
opnamelist = list(getMetDataLoader().operations.keys())
opnamelist.sort()
oplist = [getMetDataLoader().operations[op] for op in opnamelist]
with open(filename, 'w') as f:
json.dump({'operations': oplist}, f, indent=2, cls=OperationEncoder)
def loadProjectPropertyJSON(fileName):
"""
:param fileName:
:return:
@rtype: list of ProjectProperty
"""
res = list()
fileName = getFileName(fileName)
with open(fileName, 'r') as f:
props = json.load(f)
for prop in props['properties']:
res.append( ProjectProperty(name=prop['name'], type=prop['type'], description=prop['description'],
parameter=prop['parameter'] if 'parameter' in prop else None,
rule=prop['rule'] if 'rule' in prop else None,
values=prop['values'] if 'values' in prop else None,
value=prop['value'] if 'value' in prop else None,
node=prop['node'] if 'node' in prop else False,
information=prop['information'] if 'information' in prop else None,
operations=[prop['operation']] if 'operation' in prop else
(prop['operations'] if 'operations' in prop else []),
readonly=prop['readonly'] if 'readonly' in prop else None,
mandatory=prop['mandatory'] if 'mandatory' in prop else False,
semanticgroup=prop['semanticgroup'] if 'semanticgroup' in prop else False,
nodetype=prop['nodetype'] if 'nodetype' in prop else None,
defaultvalue=prop['defaultvalue'] if 'defaultvalue' in prop else None,
includedonors=prop['includedonors'] if 'includedonors' in prop else False))
return res
def loadOperationJSON(fileName):
"""
:param fileName:
:return:
@rtype: dict of str:Operation
"""
from collections import OrderedDict
operations = OrderedDict()
fileName = getFileName(fileName)
with open(fileName, 'r') as f:
ops = json.load(f)
for op in ops['operations']:
operations[op['name']] = Operation(name=op['name'], category=op['category'], includeInMask=op['includeInMask'],
rules=op['rules'], optionalparameters=op['optionalparameters'] if 'optionalparameters' in op else {},
mandatoryparameters=op['mandatoryparameters'],
description=op['description'] if 'description' in op else None,
generateMask=op['generateMask'] if 'generateMask' in op else "all",
analysisOperations=op[
'analysisOperations'] if 'analysisOperations' in op else [],
transitions=op['transitions'] if 'transitions' in op else [],
compareparameters=op[
'compareparameters'] if 'compareparameters' in op else dict(),
maskTransformFunction=op['maskTransformFunction'] if 'maskTransformFunction' in op else None,
parameter_dependencies=op['parameter_dependencies'] if 'parameter_dependencies' in op else None,
qaList=op['qaList'] if 'qaList' in op else None,
donor_processor=op['donor_processor'] if 'donor_processor' in op else None)
return operations, ops['filtergroups'] if 'filtergroups' in ops else {}, ops['version'] if 'version' in ops else '0.4.0308.db2133eadc', \
ops['node_properties'] if 'node_properties' in ops else {}
customRuleFunc = {}
def loadCustomRules():
global customRuleFunc
import pkg_resources
for p in pkg_resources.iter_entry_points("maskgen_rules"):
logging.getLogger('maskgen').info( 'load rule ' + p.name)
customRuleFunc[p.name] = p.load()
def insertCustomRule(name,func):
global customRuleFunc
customRuleFunc[name] = func
def returnNoneFunction(*arg,**kwargs):
return None
def getRule(name, globals={}, noopRule=returnNoneFunction, default_module=None):
if name is None:
return noopRule
import importlib
global customRuleFunc
if name in customRuleFunc:
return customRuleFunc[name]
else:
if '.' not in name:
mod_name = default_module
func_name = name
func = globals.get(name)
if func is None:
if default_module is None:
logging.getLogger('maskgen').error('Rule Function {} not found'.format(name))
return noopRule
else:
return func
else:
mod_name, func_name = name.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name)
func = getattr(mod, func_name)
customRuleFunc[name] = func
return func#globals.get(name)
except Exception as e:
logging.getLogger('maskgen').error('Unable to load rule {}: {}'.format(name,str(e)))
return noopRule
def getProjectProperties():
"""
:return:
@rtype: list of ProjectProperty
"""
return getMetDataLoader().projectProperties
def getSemanticGroups():
return [prop.description for prop in getProjectProperties() if prop.semanticgroup]
def getFilters(filtertype):
if filtertype == 'filtergroups':
return getMetDataLoader().filters
else:
return {}
def _load_software_from_resource(fileName):
fileName = getFileName(fileName)
software_set = {'image': {}, 'video': {}, 'audio': {},'zip': {}, 'collection':{}}
category_set = {'gan': [], 'other': []}
with open(fileName) as f:
line_no = 0
for l in f.readlines():
line_no += 1
l = l.strip()
if len(l) == 0:
continue
columns = l.split(',')
if len(columns) < 3:
logging.getLogger('maskgen').error(
'Invalid software description on line ' + str(line_no) + ': ' + l)
software_type = columns[0].strip()
software_name = columns[2].strip()
software_category = columns[1].strip().lower()
versions = [strip_version(x.strip()) for x in columns[3:] if len(x) > 0]
if software_type not in ['both', 'image', 'video', 'audio', 'all', 'collection']:
logging.getLogger('maskgen').error('Invalid software type on line ' + str(line_no) + ': ' + l)
elif len(software_name) > 0:
types = ['image', 'video', 'zip'] if software_type == 'both' else [software_type]
types = ['image', 'video', 'audio', 'zip'] if software_type == 'all' else types
types = ['video', 'audio'] if software_type == 'audio' else types
types = ['zip'] if software_type == 'zip' else types
types = ['collection'] if software_type == 'collection' else types
for stype in types:
software_set[stype][software_name] = versions
category_set[software_category].append(software_name)
return {'software_set': software_set, 'category_set': category_set}
class MetaDataLoader:
version = ''
software_set = {}
software_category_set = {}
operations = {}
filters = {}
operationsByCategory = {}
node_properties = {}
def __init__(self):
self.reload()
def reload(self):
self.operations, self.filters, self.operationsByCategory, self.node_properties, self.operation_version = self._load_operations('operations.json')
self.software_set, self.software_category_set = self._load_software('software.csv')
self.projectProperties = self._load_project_properties('project_properties.json')
self.manipulator_names = self._load_manipulators('ManipulatorCodeNames.txt')
def _load_software(self, fileName):
sets = _load_software_from_resource(fileName)
softwareset = sets['software_set']
categoryset = sets['category_set']
return softwareset, categoryset
def merge(self,fileName):
softwareset = _load_software_from_resource(fileName)['software_set']
bytesOne = {}
bytesTwo = {}
namesOne = {}
namesTwo = {}
for atype,names in self.software_set.iteritems():
for name in names:
bytesOne[name] = atype
for name,versions in names.iteritems():
namesOne[name] = versions
for atype,names in softwareset.iteritems():
for name in names:
bytesTwo[name] = atype
for name,versions in names.iteritems():
namesTwo[name] = versions
for name,versions in namesTwo.iteritems():
if name not in namesOne:
logging.getLogger('maskgen').warn( 'missing ' + name)
else:
for version in versions:
if version not in namesOne[name]:
logging.getLogger('maskgen').warn( 'missing ' + str(version) + ' in ' + name)
for name, atype in bytesTwo.iteritems():
if name in bytesOne and atype != bytesOne[name]:
logging.getLogger('maskgen').warn( 'missing ' + str(atype) + ' in ' + name)
def _load_manipulators(self, filename):
file = getFileName(filename)
if file is not None:
if os.path.exists(file):
with open(file, 'r') as fp:
return [name.strip() for name in fp.readlines() if len(name) > 1]
def _load_project_properties(self, fileName):
"""
:param fileName:
:return:
@rtype: list ProjectProperty
"""
loadCustomRules()
projectProperties = loadProjectPropertyJSON(fileName)
return projectProperties
def _load_operations(self, fileName):
operations, filters, version, node_properties = loadOperationJSON(fileName)
logging.getLogger('maskgen').info('Loaded operation version ' + version)
operationsByCategory = {}
for op, data in operations.iteritems():
category = data.category
if category not in operationsByCategory:
operationsByCategory[category] = []
operationsByCategory[category].append(op)
return operations, filters, operationsByCategory, node_properties, version
def propertiesToCSV(self, filename):
import csv
csv.register_dialect('unixpwd', delimiter=',', quoting=csv.QUOTE_MINIMAL)
with open(filename, 'w') as fp:
fp_writer = csv.writer(fp)
fp_writer.writerow(['JSON Name', 'Full Name', 'level', 'description', 'type', 'operations'])
for property in self.projectProperties:
opdata = [
property.name,
property.description,
'semantic group' if property.semanticgroup else 'node' if property.node else 'project',
property.information,
property.type,
' '.join(property.operations) if property.operations is not None else ''
]
try:
fp_writer.writerow(opdata)
except:
print ' '.join(opdata)
def operationsToCSV(self,filename):
import csv
csv.register_dialect('unixpwd', delimiter=',', quoting=csv.QUOTE_MINIMAL)
with open(filename,'w') as fp:
fp_writer = csv.writer(fp)
fp_writer.writerow(['category','operation','description','transitions','argument1','argument1 description'])
for cat, ops in self.operationsByCategory.iteritems():
for opname in ops:
op = self.operations[opname]
opdata = [
cat,
op.name,
op.description,
' '.join(op.transitions),
]
for name, val in op.mandatoryparameters.iteritems():
opdata.extend([name, val['description']])
for name, val in op.optionalparameters.iteritems():
opdata.extend([name, val['description']])
try:
fp_writer.writerow(opdata)
except:
print ' '.join(opdata)
def getProperty(self, propertyname):
for prop in self.projectProperties:
if propertyname == prop.name:
return prop
def getProjectProperty(name, prop_type):
"""
:param name: name of property
:param prop_type: one of 'semanticgroup' or 'node' or 'project'
:return: ProjectProperty
@type name: str
@type prop_type: str
@rtype: list of ProjectProperty
"""
for prop in getProjectProperties():
if (prop.description == name or prop.name == name) and \
((prop.semanticgroup and prop_type == 'semanticgroup') or
(prop.node and prop_type == 'node') or (prop_type == 'project'
and not (prop.node or prop.semanticgroup))):
return prop
return None
def toSoftware(columns):
return [x.strip() for x in columns[1:] if len(x) > 0]
def getMetDataLoader():
"""
:return:
@rtype: MetaDataLoader
"""
if 'metadataLoader' not in global_config:
global_config['metadataLoader'] = MetaDataLoader()
return global_config['metadataLoader']
def operationVersion():
return getMetDataLoader().version
def validateSoftware(softwareName, softwareVersion):
for software_type, typed_software_set in getMetDataLoader().software_set.iteritems():
if softwareName in typed_software_set and softwareVersion in typed_software_set[softwareName]:
return True
return False
class Software:
name = None
version = None
internal = False
def __init__(self, name, version, internal=False):
self.name = name
self.version = version
self.internal = internal
class SoftwareLoader:
software = {}
preference = None
loader = MaskGenLoader()
def __init__(self):
self.load()
def load(self):
res = {}
self.preference = self.loader.get_key('software_pref')
newset = self.loader.get_key('software')
if newset is not None:
if type(newset) == list:
for item in newset:
if validateSoftware(item[0], item[1]):
res[item[0]] = item[1]
else:
for name, version in newset.iteritems():
if validateSoftware(name, version):
res[name] = version
self.software = res
def get_preferred_version(self, name=None):
if self.preference is not None and (name is None or name == self.preference[0]):
return self.preference[1]
if len(self.software) > 0:
if name in self.software:
return self.software[name]
elif name is None:
return self.software[self.software.keys()[0]]
return None
def get_preferred_name(self):
if self.preference is not None:
return self.preference[0]
if len(self.software) > 0:
return self.software.keys()[0]
return None
def get_names(self, software_type):
if software_type is None:
return []
return list(getMetDataLoader().software_set[software_type].keys())
def get_versions(self, name, software_type=None, version=None):
types_to_check = getMetDataLoader().software_set.keys() if software_type is None else [software_type]
for type_to_check in types_to_check:
versions = getMetDataLoader().software_set[type_to_check][name] if name in getMetDataLoader().software_set[type_to_check] else None
if versions is None:
continue
if version is not None and strip_version(version) not in versions:
versions = list(versions)
versions.append(version)
logging.getLogger('maskgen').warning( version + ' not in approved set for software ' + name)
return versions
return []
def add(self, software):
isChanged = False
if validateSoftware(software.name, software.version):
if not software.name in self.software or self.software[software.name] != software.version:
self.software[software.name] = software.version
isChanged = True
pref = self.preference
if pref is None or pref[0] != software.name or pref[1] != software.version:
self.preference = [software.name, software.version]
isChanged = True
return isChanged
def save(self):
self.loader.saveall([("software", self.software), ("software_pref", self.preference)])
| bsd-3-clause | -7,429,292,932,377,921,000 | 39.87909 | 153 | 0.599207 | false |
finfish/scrapy | tests/test_utils_conf.py | 2 | 4165 | import unittest
from scrapy.settings import BaseSettings
from scrapy.utils.conf import build_component_list, arglist_to_dict
class BuildComponentListTest(unittest.TestCase):
def test_build_dict(self):
d = {'one': 1, 'two': None, 'three': 8, 'four': 4}
self.assertEqual(build_component_list(d, convert=lambda x: x),
['one', 'four', 'three'])
def test_backward_compatible_build_dict(self):
base = {'one': 1, 'two': 2, 'three': 3, 'five': 5, 'six': None}
custom = {'two': None, 'three': 8, 'four': 4}
self.assertEqual(build_component_list(base, custom,
convert=lambda x: x),
['one', 'four', 'five', 'three'])
def test_return_list(self):
custom = ['a', 'b', 'c']
self.assertEqual(build_component_list(None, custom,
convert=lambda x: x),
custom)
def test_map_dict(self):
custom = {'one': 1, 'two': 2, 'three': 3}
self.assertEqual(build_component_list({}, custom,
convert=lambda x: x.upper()),
['ONE', 'TWO', 'THREE'])
def test_map_list(self):
custom = ['a', 'b', 'c']
self.assertEqual(build_component_list(None, custom,
lambda x: x.upper()),
['A', 'B', 'C'])
def test_duplicate_components_in_dict(self):
duplicate_dict = {'one': 1, 'two': 2, 'ONE': 4}
self.assertRaises(ValueError, build_component_list, {}, duplicate_dict,
convert=lambda x: x.lower())
def test_duplicate_components_in_list(self):
duplicate_list = ['a', 'b', 'a']
self.assertRaises(ValueError, build_component_list, None,
duplicate_list, convert=lambda x: x)
def test_duplicate_components_in_basesettings(self):
# Higher priority takes precedence
duplicate_bs = BaseSettings({'one': 1, 'two': 2}, priority=0)
duplicate_bs.set('ONE', 4, priority=10)
self.assertEqual(build_component_list(duplicate_bs,
convert=lambda x: x.lower()),
['two', 'one'])
duplicate_bs.set('one', duplicate_bs['one'], priority=20)
self.assertEqual(build_component_list(duplicate_bs,
convert=lambda x: x.lower()),
['one', 'two'])
# Same priority raises ValueError
duplicate_bs.set('ONE', duplicate_bs['ONE'], priority=20)
self.assertRaises(ValueError, build_component_list, duplicate_bs,
convert=lambda x: x.lower())
def test_valid_numbers(self):
# work well with None and numeric values
d = {'a': 10, 'b': None, 'c': 15, 'd': 5.0}
self.assertEqual(build_component_list(d, convert=lambda x: x),
['d', 'a', 'c'])
d = {'a': 33333333333333333333, 'b': 11111111111111111111, 'c': 22222222222222222222}
self.assertEqual(build_component_list(d, convert=lambda x: x),
['b', 'c', 'a'])
# raise exception for invalid values
d = {'one': '5'}
self.assertRaises(ValueError, build_component_list, {}, d, convert=lambda x: x)
d = {'one': '1.0'}
self.assertRaises(ValueError, build_component_list, {}, d, convert=lambda x: x)
d = {'one': [1, 2, 3]}
self.assertRaises(ValueError, build_component_list, {}, d, convert=lambda x: x)
d = {'one': {'a': 'a', 'b': 2}}
self.assertRaises(ValueError, build_component_list, {}, d, convert=lambda x: x)
d = {'one': 'lorem ipsum',}
self.assertRaises(ValueError, build_component_list, {}, d, convert=lambda x: x)
class UtilsConfTestCase(unittest.TestCase):
def test_arglist_to_dict(self):
self.assertEqual(arglist_to_dict(['arg1=val1', 'arg2=val2']),
{'arg1': 'val1', 'arg2': 'val2'})
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -2,630,516,114,137,063,400 | 42.842105 | 93 | 0.52605 | false |
htzy/bigfour | common/djangoapps/cors_csrf/tests/test_views.py | 150 | 2397 | """Tests for cross-domain request views. """
import json
from django.test import TestCase
from django.core.urlresolvers import reverse, NoReverseMatch
import ddt
from config_models.models import cache
from cors_csrf.models import XDomainProxyConfiguration
@ddt.ddt
class XDomainProxyTest(TestCase):
"""Tests for the xdomain proxy end-point. """
def setUp(self):
"""Clear model-based config cache. """
super(XDomainProxyTest, self).setUp()
try:
self.url = reverse('xdomain_proxy')
except NoReverseMatch:
self.skipTest('xdomain_proxy URL is not configured')
cache.clear()
def test_xdomain_proxy_disabled(self):
self._configure(False)
response = self._load_page()
self.assertEqual(response.status_code, 404)
@ddt.data(None, [' '], [' ', ' '])
def test_xdomain_proxy_enabled_no_whitelist(self, whitelist):
self._configure(True, whitelist=whitelist)
response = self._load_page()
self.assertEqual(response.status_code, 404)
@ddt.data(
(['example.com'], ['example.com']),
(['example.com', 'sub.example.com'], ['example.com', 'sub.example.com']),
([' example.com '], ['example.com']),
([' ', 'example.com'], ['example.com']),
)
@ddt.unpack
def test_xdomain_proxy_enabled_with_whitelist(self, whitelist, expected_whitelist):
self._configure(True, whitelist=whitelist)
response = self._load_page()
self._check_whitelist(response, expected_whitelist)
def _configure(self, is_enabled, whitelist=None):
"""Enable or disable the end-point and configure the whitelist. """
config = XDomainProxyConfiguration.current()
config.enabled = is_enabled
if whitelist:
config.whitelist = "\n".join(whitelist)
config.save()
cache.clear()
def _load_page(self):
"""Load the end-point. """
return self.client.get(reverse('xdomain_proxy'))
def _check_whitelist(self, response, expected_whitelist):
"""Verify that the domain whitelist is rendered on the page. """
rendered_whitelist = json.dumps({
domain: '*'
for domain in expected_whitelist
})
self.assertContains(response, 'xdomain.min.js')
self.assertContains(response, rendered_whitelist)
| agpl-3.0 | 2,079,769,672,689,885,400 | 32.291667 | 87 | 0.627451 | false |
morph027/ansible-modules-extras | cloud/cloudstack/cs_ip_address.py | 33 | 7989 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Darren Worrall <[email protected]>
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_ip_address
short_description: Manages public IP address associations on Apache CloudStack based clouds.
description:
- Acquires and associates a public IP to an account or project. Due to API
limitations this is not an idempotent call, so be sure to only
conditionally call this when C(state=present)
version_added: '2.0'
author:
- "Darren Worrall (@dazworrall)"
- "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address.
- Required if C(state=absent)
required: false
default: null
domain:
description:
- Domain the IP address is related to.
required: false
default: null
network:
description:
- Network the IP address is related to.
required: false
default: null
vpc:
description:
- VPC the IP address is related to.
required: false
default: null
version_added: "2.2"
account:
description:
- Account the IP address is related to.
required: false
default: null
project:
description:
- Name of the project the IP address is related to.
required: false
default: null
zone:
description:
- Name of the zone in which the IP address is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Associate an IP address conditonally
- local_action:
module: cs_ip_address
network: My Network
register: ip_address
when: instance.public_ip is undefined
# Disassociate an IP address
- local_action:
module: cs_ip_address
ip_address: 1.2.3.4
state: absent
'''
RETURN = '''
---
id:
description: UUID of the Public IP address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
zone:
description: Name of zone the IP address is related to.
returned: success
type: string
sample: ch-gva-2
project:
description: Name of project the IP address is related to.
returned: success
type: string
sample: Production
account:
description: Account the IP address is related to.
returned: success
type: string
sample: example account
domain:
description: Domain the IP address is related to.
returned: success
type: string
sample: example domain
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackIPAddress(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackIPAddress, self).__init__(module)
self.returns = {
'ipaddress': 'ip_address',
}
#TODO: Add to parent class, duplicated in cs_network
def get_network(self, key=None, network=None):
if not network:
network = self.module.params.get('network')
if not network:
return None
args = {}
args['account'] = self.get_account('name')
args['domainid'] = self.get_domain('id')
args['projectid'] = self.get_project('id')
args['zoneid'] = self.get_zone('id')
networks = self.cs.listNetworks(**args)
if not networks:
self.module.fail_json(msg="No networks available")
for n in networks['network']:
if network in [ n['displaytext'], n['name'], n['id'] ]:
return self._get_by_key(key, n)
self.module.fail_json(msg="Network '%s' not found" % network)
#TODO: Merge changes here with parent class
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.module.fail_json(msg="IP address param 'ip_address' is required")
args = {}
args['ipaddress'] = ip_address
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['vpcid'] = self.get_vpc(key='id')
ip_addresses = self.cs.listPublicIpAddresses(**args)
if ip_addresses:
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def associate_ip_address(self):
self.result['changed'] = True
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['networkid'] = self.get_network(key='id')
args['zoneid'] = self.get_zone(key='id')
ip_address = {}
if not self.module.check_mode:
res = self.cs.associateIpAddress(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
res = self.poll_job(res, 'ipaddress')
ip_address = res
return ip_address
def disassociate_ip_address(self):
ip_address = self.get_ip_address()
if ip_address is None:
return ip_address
if ip_address['isstaticnat']:
self.module.fail_json(msg="IP address is allocated via static nat")
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.disassociateIpAddress(id=ip_address['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'ipaddress')
return ip_address
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=False),
state = dict(choices=['present', 'absent'], default='present'),
vpc = dict(default=None),
network = dict(default=None),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_ip_address = AnsibleCloudStackIPAddress(module)
state = module.params.get('state')
if state in ['absent']:
ip_address = acs_ip_address.disassociate_ip_address()
else:
ip_address = acs_ip_address.associate_ip_address()
result = acs_ip_address.get_result(ip_address)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 375,532,266,963,628,540 | 28.913858 | 92 | 0.628897 | false |
tdegrunt/or-tools | examples/python/crypta.py | 34 | 3413 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Cryptarithmetic puzzle in Google CP Solver.
Prolog benchmark problem GNU Prolog (crypta.pl)
'''
Name : crypta.pl
Title : crypt-arithmetic
Original Source: P. Van Hentenryck's book
Adapted by : Daniel Diaz - INRIA France
Date : September 1992
Solve the operation:
B A I J J A J I I A H F C F E B B J E A
+ D H F G A B C D I D B I F F A G F E J E
-----------------------------------------
= G J E G A C D D H F A F J B F I H E E F
'''
Compare with the following models:
* Comet: http://hakank.org/comet/crypta.co
* MiniZinc: http://hakank.org/minizinc/crypta.mzn
* ECLiPSe: http://hakank.org/eclipse/crypta.ecl
* Gecode: http://hakank.org/gecode/crypta.cpp
* SICStus: http://hakank.org/sicstus/crypta.pl
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver("Crypta")
#
# data
#
#
# variables
#
LD = [solver.IntVar(0, 9, "LD[%i]" % i) for i in range(0, 10)]
A, B, C, D, E, F, G, H, I, J = LD
Sr1 = solver.IntVar(0, 1, "Sr1")
Sr2 = solver.IntVar(0, 1, "Sr2")
#
# constraints
#
solver.Add(solver.AllDifferent(LD))
solver.Add(B >= 1)
solver.Add(D >= 1)
solver.Add(G >= 1)
solver.Add(A + 10 * E + 100 * J + 1000 * B + 10000 * B + 100000 * E + 1000000 * F +
E + 10 * J + 100 * E + 1000 * F + 10000 * G + 100000 * A + 1000000 * F
== F + 10 * E + 100 * E + 1000 * H + 10000 * I + 100000 * F + 1000000 * B + 10000000 * Sr1)
solver.Add(C + 10 * F + 100 * H + 1000 * A + 10000 * I + 100000 * I + 1000000 * J +
F + 10 * I + 100 * B + 1000 * D + 10000 * I + 100000 * D + 1000000 * C + Sr1
== J + 10 * F + 100 * A + 1000 * F + 10000 * H + 100000 * D + 1000000 * D + 10000000 * Sr2)
solver.Add(A + 10 * J + 100 * J + 1000 * I + 10000 * A + 100000 * B +
B + 10 * A + 100 * G + 1000 * F + 10000 * H + 100000 * D + Sr2
== C + 10 * A + 100 * G + 1000 * E + 10000 * J + 100000 * G)
#
# search and result
#
db = solver.Phase(LD,
solver.INT_VAR_SIMPLE,
solver.INT_VALUE_SIMPLE)
solver.NewSearch(db)
num_solutions = 0
str = "ABCDEFGHIJ"
while solver.NextSolution():
num_solutions += 1
for (letter, val) in [(str[i], LD[i].Value()) for i in range(len(LD))]:
print "%s: %i" % (letter, val)
print
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
if __name__ == "__main__":
main()
| apache-2.0 | -3,790,485,967,875,447,300 | 28.422414 | 104 | 0.58746 | false |
utkarsh-goswami/erpnext | erpnext/demo/user/accounts.py | 33 | 4171 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import random
from frappe.utils import random_string
from frappe.desk import query_report
from erpnext.accounts.doctype.journal_entry.journal_entry import get_payment_entry_against_invoice
from erpnext.accounts.doctype.payment_entry.payment_entry import get_payment_entry
from frappe.utils.make_random import get_random
from erpnext.accounts.doctype.payment_request.payment_request import make_payment_request, make_payment_entry
from erpnext.demo.user.sales import make_sales_order
from erpnext.selling.doctype.sales_order.sales_order import make_sales_invoice
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import make_purchase_invoice
def work():
frappe.set_user(frappe.db.get_global('demo_accounts_user'))
if random.random() <= 0.6:
report = "Ordered Items to be Billed"
for so in list(set([r[0] for r in query_report.run(report)["result"]
if r[0]!="Total"]))[:random.randint(1, 5)]:
try:
si = frappe.get_doc(make_sales_invoice(so))
si.posting_date = frappe.flags.current_date
for d in si.get("items"):
if not d.income_account:
d.income_account = "Sales - {}".format(frappe.db.get_value('Company', si.company, 'abbr'))
si.insert()
si.submit()
frappe.db.commit()
except frappe.ValidationError:
pass
if random.random() <= 0.6:
report = "Received Items to be Billed"
for pr in list(set([r[0] for r in query_report.run(report)["result"]
if r[0]!="Total"]))[:random.randint(1, 5)]:
try:
pi = frappe.get_doc(make_purchase_invoice(pr))
pi.posting_date = frappe.flags.current_date
pi.bill_no = random_string(6)
pi.insert()
pi.submit()
frappe.db.commit()
except frappe.ValidationError:
pass
if random.random() < 0.5:
make_payment_entries("Sales Invoice", "Accounts Receivable")
if random.random() < 0.5:
make_payment_entries("Purchase Invoice", "Accounts Payable")
if random.random() < 0.1:
#make payment request against sales invoice
sales_invoice_name = get_random("Sales Invoice", filters={"docstatus": 1})
if sales_invoice_name:
si = frappe.get_doc("Sales Invoice", sales_invoice_name)
if si.outstanding_amount > 0:
payment_request = make_payment_request(dt="Sales Invoice", dn=si.name, recipient_id=si.contact_email,
submit_doc=True, mute_email=True, use_dummy_message=True)
payment_entry = frappe.get_doc(make_payment_entry(payment_request.name))
payment_entry.posting_date = frappe.flags.current_date
payment_entry.submit()
make_pos_invoice()
def make_payment_entries(ref_doctype, report):
outstanding_invoices = list(set([r[3] for r in query_report.run(report,
{"report_date": frappe.flags.current_date })["result"] if r[2]==ref_doctype]))
# make Payment Entry
for inv in outstanding_invoices[:random.randint(1, 2)]:
pe = get_payment_entry(ref_doctype, inv)
pe.posting_date = frappe.flags.current_date
pe.reference_no = random_string(6)
pe.reference_date = frappe.flags.current_date
pe.insert()
pe.submit()
frappe.db.commit()
outstanding_invoices.remove(inv)
# make payment via JV
for inv in outstanding_invoices[:1]:
jv = frappe.get_doc(get_payment_entry_against_invoice(ref_doctype, inv))
jv.posting_date = frappe.flags.current_date
jv.cheque_no = random_string(6)
jv.cheque_date = frappe.flags.current_date
jv.insert()
jv.submit()
frappe.db.commit()
def make_pos_invoice():
make_sales_order()
for data in frappe.get_all('Sales Order', fields=["name"],
filters = [["per_billed", "<", "100"]]):
si = frappe.get_doc(make_sales_invoice(data.name))
si.is_pos =1
si.posting_date = frappe.flags.current_date
for d in si.get("items"):
if not d.income_account:
d.income_account = "Sales - {}".format(frappe.db.get_value('Company', si.company, 'abbr'))
si.set_missing_values()
make_payment_entries_for_pos_invoice(si)
si.insert()
si.submit()
def make_payment_entries_for_pos_invoice(si):
for data in si.payments:
data.amount = si.outstanding_amount
return
| gpl-3.0 | 4,403,884,196,325,438,500 | 34.347458 | 109 | 0.713018 | false |
tsaitsai/jasper-client | client/populate.py | 28 | 5053 | # -*- coding: utf-8-*-
import os
import re
from getpass import getpass
import yaml
from pytz import timezone
import feedparser
import jasperpath
def run():
profile = {}
print("Welcome to the profile populator. If, at any step, you'd prefer " +
"not to enter the requested information, just hit 'Enter' with a " +
"blank field to continue.")
def simple_request(var, cleanVar, cleanInput=None):
input = raw_input(cleanVar + ": ")
if input:
if cleanInput:
input = cleanInput(input)
profile[var] = input
# name
simple_request('first_name', 'First name')
simple_request('last_name', 'Last name')
# gmail
print("\nJasper uses your Gmail to send notifications. Alternatively, " +
"you can skip this step (or just fill in the email address if you " +
"want to receive email notifications) and setup a Mailgun " +
"account, as at http://jasperproject.github.io/documentation/" +
"software/#mailgun.\n")
simple_request('gmail_address', 'Gmail address')
profile['gmail_password'] = getpass()
# phone number
def clean_number(s):
return re.sub(r'[^0-9]', '', s)
phone_number = clean_number(raw_input("\nPhone number (no country " +
"code). Any dashes or spaces will " +
"be removed for you: "))
profile['phone_number'] = phone_number
# carrier
print("\nPhone carrier (for sending text notifications).")
print("If you have a US phone number, you can enter one of the " +
"following: 'AT&T', 'Verizon', 'T-Mobile' (without the quotes). " +
"If your carrier isn't listed or you have an international " +
"number, go to http://www.emailtextmessages.com and enter the " +
"email suffix for your carrier (e.g., for Virgin Mobile, enter " +
"'vmobl.com'; for T-Mobile Germany, enter 't-d1-sms.de').")
carrier = raw_input('Carrier: ')
if carrier == 'AT&T':
profile['carrier'] = 'txt.att.net'
elif carrier == 'Verizon':
profile['carrier'] = 'vtext.com'
elif carrier == 'T-Mobile':
profile['carrier'] = 'tmomail.net'
else:
profile['carrier'] = carrier
# location
def verifyLocation(place):
feed = feedparser.parse('http://rss.wunderground.com/auto/rss_full/' +
place)
numEntries = len(feed['entries'])
if numEntries == 0:
return False
else:
print("Location saved as " + feed['feed']['description'][33:])
return True
print("\nLocation should be a 5-digit US zipcode (e.g., 08544). If you " +
"are outside the US, insert the name of your nearest big " +
"town/city. For weather requests.")
location = raw_input("Location: ")
while location and not verifyLocation(location):
print("Weather not found. Please try another location.")
location = raw_input("Location: ")
if location:
profile['location'] = location
# timezone
print("\nPlease enter a timezone from the list located in the TZ* " +
"column at http://en.wikipedia.org/wiki/" +
"List_of_tz_database_time_zones, or none at all.")
tz = raw_input("Timezone: ")
while tz:
try:
timezone(tz)
profile['timezone'] = tz
break
except:
print("Not a valid timezone. Try again.")
tz = raw_input("Timezone: ")
response = raw_input("\nWould you prefer to have notifications sent by " +
"email (E) or text message (T)? ")
while not response or (response != 'E' and response != 'T'):
response = raw_input("Please choose email (E) or text message (T): ")
profile['prefers_email'] = (response == 'E')
stt_engines = {
"sphinx": None,
"google": "GOOGLE_SPEECH"
}
response = raw_input("\nIf you would like to choose a specific STT " +
"engine, please specify which.\nAvailable " +
"implementations: %s. (Press Enter to default " +
"to PocketSphinx): " % stt_engines.keys())
if (response in stt_engines):
profile["stt_engine"] = response
api_key_name = stt_engines[response]
if api_key_name:
key = raw_input("\nPlease enter your API key: ")
profile["keys"] = {api_key_name: key}
else:
print("Unrecognized STT engine. Available implementations: %s"
% stt_engines.keys())
profile["stt_engine"] = "sphinx"
# write to profile
print("Writing to profile...")
if not os.path.exists(jasperpath.CONFIG_PATH):
os.makedirs(jasperpath.CONFIG_PATH)
outputFile = open(jasperpath.config("profile.yml"), "w")
yaml.dump(profile, outputFile, default_flow_style=False)
print("Done.")
if __name__ == "__main__":
run()
| mit | -6,358,675,409,753,429,000 | 36.42963 | 79 | 0.574312 | false |
Xowap/ansible | lib/ansible/playbook/handler.py | 237 | 1957 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
#from ansible.inventory.host import Host
from ansible.playbook.task import Task
class Handler(Task):
def __init__(self, block=None, role=None, task_include=None):
self._flagged_hosts = []
super(Handler, self).__init__(block=block, role=role, task_include=task_include)
def __repr__(self):
''' returns a human readable representation of the handler '''
return "HANDLER: %s" % self.get_name()
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = Handler(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def flag_for_host(self, host):
#assert instanceof(host, Host)
if host not in self._flagged_hosts:
self._flagged_hosts.append(host)
def has_triggered(self, host):
return host in self._flagged_hosts
def serialize(self):
result = super(Handler, self).serialize()
result['is_handler'] = True
return result
| gpl-3.0 | -6,567,281,502,828,106,000 | 35.924528 | 97 | 0.69954 | false |
adrifloresm/sssweep | sssweep/web_viewer_gen.py | 1 | 13431 | """
* Copyright (c) 2012-2017, Adriana Flores
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the name of prim nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
"""
def get_css():
css = """\
html, body, .viewport {
width: 100%;
height: 100%;
margin: 0;
padding: 0;
}
html *
{
font-family: Arial, Helvetica, sans-serif !important;
font-size: 14px;
}
img {
image-rendering: -moz-crisp-edges; /* Firefox */
image-rendering: -o-crisp-edges; /* Opera */
image-rendering: -webkit-optimize-contrast;/* Webkit */
image-rendering: crisp-edges;
-ms-interpolation-mode: nearest-neighbor; /* IE (non-standard property) */
}
.wrapper {
display: -webkit-box;
display: -moz-box;
display: -ms-flexbox;
display: -webkit-flex;
display: flex;
-webkit-flex-flow: row wrap;
flex-flow: row wrap;
text-align: center;
height: 100%;
margin: 0;
padding: 0;
}
/* We tell all items to be 100% width */
.wrapper > * {
padding: 10px;
flex: 1 100%;
}
h2 {font-size: 20px !important; text-align:center;}
.logo img { height:45px;}
.main {text-align: center;}
.aside-1 {
border-right: thin solid #C6C9CA;
background: #eee;
}
.plotImg {
height: auto;
width: auto;
max-height: 100%;
max-width: 100%;
}
/* Large format (side to side) */
@media all and (min-width: 1000px) {
.aside-1 { text-align:left;
-webkit-flex: 1 5%;
flex: 1 5%;
-webkit-order:1;
order:1;}
.main { order: 2; flex:6;}
}
/* small format - nav top plot bottom */
@media (max-width: 1000px) {
.wrapper { height: auto;}
.logo img {height:40px;}
.aside-1 {border: none; border-bottom: thin solid #C6C9CA;}
.plotImg {height: auto; width:auto;}
}"""
return css
def get_html_top(self, files):
html_top = ("""\
<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="{0}">
<script src="{1}"></script>
</head>
<body>
<div class="wrapper">
<!-- ==================================================================- -->
<aside class="aside aside-1">
<!-- --------------------------------- -->
<div class="logo">
<a href=""><img src="https://www.labs.hpe.com/img/home/labs-logo.png"
alt="HPE Labs logo"/></a>
<h2>SuperSim Plot Viewer</h2>
</div>
<!-- --------------------------------- -->
<div id="mode">
Plot Type:
<select id="mode_sel" name="mode_select" onchange="showDiv(this)">
<option disabled selected value> -- select an option -- </option>
<option value="lplot">lplot</option>
<option value="qplot">qplot</option>
""".format(files['css_in'], files['javascript_in']))
if self._comp_var_count == 0:
html_top2 = """
</select>
</div>
<hr>
<!-- --------------------------------- -->
<div id="options">"""
else:
html_top2 = """\
<option value="cplot">cplot</option>
</select>
</div>
<hr>
<!-- --------------------------------- -->
<div id="options">"""
return html_top + html_top2
def get_html_bottom():
html_bottom = """\
<!-- --------------------------------- -->
<div>
<p>Filename:</p>
<p id="plot_name"></p>
</div>
</div>
</aside>
<!-- ==================================================================- -->
<article class="main">
<img class="plotImg" id="plot" src="" onError="noImgFile()" />
</article>
<!-- ==================================================================- -->
</div>
</body>
</html>"""
return html_bottom
def get_html_dyn(self, load_latency_stats):
html_dyn = ""
# ------------------------------------------- #
#(KEEP SPACES AS IS)
vars_selector = ""
cmp_selector = ""
# end of selector
select_end = ("""</select><br></p>
</div>
""")
# Comp Selector
cmp_option = ""
cmp_sel_top = ("""\
<div style ='display:none;' id="{0}">
<p>Compare Variable:
<select id="{0}_sel" onchange="CplotDivs(this)">
""".format(self._id_cmp))
# select an option
disable_select = ("<option disabled selected value> -- "
"select an option -- </option>")
# latency distribution selector
ld_option = ""
ld_top = ("""\
<div style ='display:none;' id="{0}">
<p>Latency Distribution:
<select id="{0}_sel" onchange="createName()">
<option disabled selected value> -- select an option -- </option>
""".format(self._id_lat_dist))
# ------------------------------------------- #
# dynamic generation of selects for html
for var in self._variables:
# only one option - pre select it
if len(var['values']) == 1:
# start of selector
select_start = ("""<div style ='display:none;' id="{1}">
<p>{0}:
<select id="{1}_sel" onchange="createName()">
""".format(var['name'], var['short_name'])) #no "select an option"
# options - iterate through values
select_option = ""
if var['values_dic'] is not None: # with dict name ()
for val in var['values_dic']:
select_option += ("""\
<option value="{0}" selected="true" disabled="disabled">{1} ({0})</option>
""".format(val, var['values_dic'][val]))
else: # no dict name
for val in var['values']:
select_option += ("""\
<option value="{0}" selected="true" disabled="disabled">{0}</option>
""".format(val))
# more than 1 value - multiple options
elif len(var['values']) > 1:
# start of selector with select an option
select_start = ("""<div style ='display:none;' id="{1}">
<p>{0}:
<select id="{1}_sel" onchange="createName()">
<option disabled selected value> -- select an option -- </option>
""".format(var['name'],
var['short_name']))
# options - iterate through values
select_option = ""
if var['values_dic'] is not None: # with dict name ()
for val in var['values_dic']:
select_option += (""" <option value="{0}">{1} ({0})</option>
""".format(val, var['values_dic'][val]))
else: # no dict name
for val in var['values']:
select_option += (""" <option value="{0}">{0}</option>
""".format(val))
selector = select_start + select_option + select_end
vars_selector += selector
# ------------------------------------------- #
# Compare Variables
for var in self._variables:
if var['compare'] and len(var['values']) > 1:
cmp_option += (""" <option value="{1}">{0} ({1})</option>
""".format(var['name'], var['short_name']))
if self._comp_var_count == 0: # no compare variable
cmp_option = ""
else: # multiple comp variables
cmp_option = disable_select + cmp_option
# ------------------------------------------- #
# loop through latency distributions
for field in load_latency_stats:
ld_option += (""" <option value="{0}">{0}</option>
""".format(field))
ld_selector = ld_top + ld_option + select_end
cmp_selector = cmp_sel_top + cmp_option + select_end
# all dynamic selectors
html_dyn = cmp_selector + vars_selector + ld_selector
return html_dyn
def get_show_div(self):
top = """function showDiv(elem){
"""
qplot_top = """\
if(elem.value == "qplot") {{
// no comp no loaddist
document.getElementById('{0}').style.display = "none";
document.getElementById('{1}').style.display = "none";
""".format(self._id_cmp, self._id_lat_dist)
lplot_top = """\
}} else if (elem.value == "lplot") {{
// no load no comp no loaddist
document.getElementById('{0}').style.display = "none";
document.getElementById('{1}').style.display = "none";
""".format(self._id_cmp, self._id_lat_dist)
cplot_top = """\
}} else if (elem.value == "cplot") {{
// only cmp selector
document.getElementById('{0}').style.display = "block";
document.getElementById('{0}').getElementsByTagName('option')[0].selected =
"selected";
document.getElementById('{1}').style.display = "none";
""".format(self._id_cmp, self._id_lat_dist)
bottom = """\
}
createName();
}
"""
#--------------------------------------------#
qplot_dyn = ""
lplot_dyn = ""
cplot_dyn = ""
id_one = ""
for var in self._variables:
# many options
if len(var['values']) > 1:
qplot_dyn += """\
document.getElementById('{0}').style.display = "block";
""".format(var['short_name'])
cplot_dyn += """\
document.getElementById('{0}').style.display = "none";
""".format(var['short_name'])
# lplot has no load selector
if var['name'] == self._load_name:
lplot_dyn += """\
document.getElementById('{0}').style.display = "none";
""".format(var['short_name'])
else:
lplot_dyn += """\
document.getElementById('{0}').style.display = "block";
""".format(var['short_name'])
# only one option do not display and color blue to add to filename
elif len(var['values']) == 1:
id_one += """\
document.getElementById('{0}').style.color = "blue";
""".format(var['short_name'])
qplot_dyn += """\
document.getElementById('{0}').style.display = "none";
""".format(var['short_name'])
cplot_dyn += """\
document.getElementById('{0}').style.display = "none";
""".format(var['short_name'])
# lplot has no load selector
if var['name'] == self._load_name:
lplot_dyn += """\
document.getElementById('{0}').style.display = "none";
""".format(var['short_name'])
else:
lplot_dyn += """\
document.getElementById('{0}').style.display = "none";
""".format(var['short_name'])
return top + id_one + qplot_top + qplot_dyn + lplot_top + lplot_dyn + \
cplot_top + cplot_dyn + bottom
def get_cplot_divs(self):
top = """\
function CplotDivs(elem) {{
document.getElementById('{0}').style.display = "block";
""".format(self._id_lat_dist)
bottom = """\
//deactive cvar
document.getElementById(elem.value).style.display = "none";
createName();
}
"""
dyn = ""
for var in self._variables:
# no load selector
if var['name'] == self._load_name:
dyn += """\
document.getElementById('{0}').style.display = "none"
""".format(var['short_name'])
else:
if len(var['values']) > 1:
dyn += """\
document.getElementById('{0}').style.display = "block";
""".format(var['short_name'])
elif len(var['values']) == 1:
dyn += """\
document.getElementById('{0}').style.display = "none";
""".format(var['short_name'])
return top + dyn + bottom
def get_create_name():
create_name = """\
function noImgFile() {
document.getElementById("plot_name").style.color = "red";
document.getElementById('plot').src = '';
}
function createName() {
document.getElementById("plot_name").innerHTML = composeName();
document.getElementById("plot_name").style.color = "black";
document.getElementById('plot').src = '../plots/' + composeName();
}
"""
return create_name
def get_compose_name(self):
top = """\
function composeName() {
var m = document.getElementById("mode_sel").value;
"""
bottom = """\
// get displayed div values
var y = "";
for (var i = 0; i < vars_div_id.length; i++)
{
curr_elem = document.getElementById(vars_div_id[i]);
if (curr_elem.style.display == "block")
{
y += '_'
y += document.getElementById(vars_sel_id[i]).value;
} else if(curr_elem.style.color == "blue")
{
y += '_'
y += document.getElementById(vars_sel_id[i]).value;
}
}
return m + y + '.png'
}"""
# format variables for js
var_div_id = [] # list of div ids
var_sel_id = [] # list of selectors ids
# div ids
var_div_id.append(self._id_cmp)
for var in self._variables:
var_div_id.append(var['short_name'])
var_div_id.append(self._id_lat_dist)
# slector ids
for v_id in var_div_id:
sid = v_id + '_sel'
var_sel_id.append(sid)
dyn = """\
var vars_div_id = {0};
var vars_sel_id = {1};
""".format(var_div_id, var_sel_id)
return top + dyn + bottom
| bsd-3-clause | -5,049,409,654,571,146,000 | 28.261438 | 80 | 0.567791 | false |
DDelon/youtube-dl | youtube_dl/downloader/rtmp.py | 17 | 8354 | from __future__ import unicode_literals
import os
import re
import subprocess
import time
from .common import FileDownloader
from ..compat import compat_str
from ..utils import (
check_executable,
encodeFilename,
encodeArgument,
get_exe_version,
)
def rtmpdump_version():
return get_exe_version(
'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)')
class RtmpFD(FileDownloader):
def real_download(self, filename, info_dict):
def run_rtmpdump(args):
start = time.time()
resume_percent = None
resume_downloaded_data_len = None
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
cursor_in_new_line = True
proc_stderr_closed = False
while not proc_stderr_closed:
# read line from stderr
line = ''
while True:
char = proc.stderr.read(1)
if not char:
proc_stderr_closed = True
break
if char in [b'\r', b'\n']:
break
line += char.decode('ascii', 'replace')
if not line:
# proc_stderr_closed is True
continue
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec \(([0-9]{1,2}\.[0-9])%\)', line)
if mobj:
downloaded_data_len = int(float(mobj.group(1)) * 1024)
percent = float(mobj.group(2))
if not resume_percent:
resume_percent = percent
resume_downloaded_data_len = downloaded_data_len
time_now = time.time()
eta = self.calc_eta(start, time_now, 100 - resume_percent, percent - resume_percent)
speed = self.calc_speed(start, time_now, downloaded_data_len - resume_downloaded_data_len)
data_len = None
if percent > 0:
data_len = int(downloaded_data_len * 100 / percent)
self._hook_progress({
'status': 'downloading',
'downloaded_bytes': downloaded_data_len,
'total_bytes_estimate': data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'eta': eta,
'elapsed': time_now - start,
'speed': speed,
})
cursor_in_new_line = False
else:
# no percent for live streams
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
if mobj:
downloaded_data_len = int(float(mobj.group(1)) * 1024)
time_now = time.time()
speed = self.calc_speed(start, time_now, downloaded_data_len)
self._hook_progress({
'downloaded_bytes': downloaded_data_len,
'tmpfilename': tmpfilename,
'filename': filename,
'status': 'downloading',
'elapsed': time_now - start,
'speed': speed,
})
cursor_in_new_line = False
elif self.params.get('verbose', False):
if not cursor_in_new_line:
self.to_screen('')
cursor_in_new_line = True
self.to_screen('[rtmpdump] ' + line)
proc.wait()
if not cursor_in_new_line:
self.to_screen('')
return proc.returncode
url = info_dict['url']
player_url = info_dict.get('player_url', None)
page_url = info_dict.get('page_url', None)
app = info_dict.get('app', None)
play_path = info_dict.get('play_path', None)
tc_url = info_dict.get('tc_url', None)
flash_version = info_dict.get('flash_version', None)
live = info_dict.get('rtmp_live', False)
conn = info_dict.get('rtmp_conn', None)
protocol = info_dict.get('rtmp_protocol', None)
real_time = info_dict.get('rtmp_real_time', False)
no_resume = info_dict.get('no_resume', False)
continue_dl = self.params.get('continuedl', True)
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
test = self.params.get('test', False)
# Check for rtmpdump first
if not check_executable('rtmpdump', ['-h']):
self.report_error('RTMP download detected but "rtmpdump" could not be run. Please install it.')
return False
# Download using rtmpdump. rtmpdump returns exit code 2 when
# the connection was interrupted and resuming appears to be
# possible. This is part of rtmpdump's normal usage, AFAIK.
basic_args = [
'rtmpdump', '--verbose', '-r', url,
'-o', tmpfilename]
if player_url is not None:
basic_args += ['--swfVfy', player_url]
if page_url is not None:
basic_args += ['--pageUrl', page_url]
if app is not None:
basic_args += ['--app', app]
if play_path is not None:
basic_args += ['--playpath', play_path]
if tc_url is not None:
basic_args += ['--tcUrl', tc_url]
if test:
basic_args += ['--stop', '1']
if flash_version is not None:
basic_args += ['--flashVer', flash_version]
if live:
basic_args += ['--live']
if isinstance(conn, list):
for entry in conn:
basic_args += ['--conn', entry]
elif isinstance(conn, compat_str):
basic_args += ['--conn', conn]
if protocol is not None:
basic_args += ['--protocol', protocol]
if real_time:
basic_args += ['--realtime']
args = basic_args
if not no_resume and continue_dl and not live:
args += ['--resume']
if not live and continue_dl:
args += ['--skip', '1']
args = [encodeArgument(a) for a in args]
self._debug_cmd(args, exe='rtmpdump')
RD_SUCCESS = 0
RD_FAILED = 1
RD_INCOMPLETE = 2
RD_NO_CONNECT = 3
retval = run_rtmpdump(args)
if retval == RD_NO_CONNECT:
self.report_error('[rtmpdump] Could not connect to RTMP server.')
return False
while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
prevsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('[rtmpdump] %s bytes' % prevsize)
time.sleep(5.0) # This seems to be needed
args = basic_args + ['--resume']
if retval == RD_FAILED:
args += ['--skip', '1']
args = [encodeArgument(a) for a in args]
retval = run_rtmpdump(args)
cursize = os.path.getsize(encodeFilename(tmpfilename))
if prevsize == cursize and retval == RD_FAILED:
break
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
if prevsize == cursize and retval == RD_INCOMPLETE and cursize > 1024:
self.to_screen('[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
retval = RD_SUCCESS
break
if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('[rtmpdump] %s bytes' % fsize)
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
else:
self.to_stderr('\n')
self.report_error('rtmpdump exited with code %d' % retval)
return False
| unlicense | -3,263,989,459,475,636,700 | 40.152709 | 121 | 0.494015 | false |
syedsuhail/customer_analyzer | lib/flask/signals.py | 783 | 2140 | # -*- coding: utf-8 -*-
"""
flask.signals
~~~~~~~~~~~~~
Implements signals based on blinker if available, otherwise
falls silently back to a noop
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = connected_to = _fail
del _fail
# the namespace for code signals. If you are not flask code, do
# not put signals in here. Create your own namespace instead.
_signals = Namespace()
# core signals. For usage examples grep the sourcecode or consult
# the API documentation in docs/api.rst as well as docs/signals.rst
template_rendered = _signals.signal('template-rendered')
request_started = _signals.signal('request-started')
request_finished = _signals.signal('request-finished')
request_tearing_down = _signals.signal('request-tearing-down')
got_request_exception = _signals.signal('got-request-exception')
appcontext_tearing_down = _signals.signal('appcontext-tearing-down')
appcontext_pushed = _signals.signal('appcontext-pushed')
appcontext_popped = _signals.signal('appcontext-popped')
message_flashed = _signals.signal('message-flashed')
| apache-2.0 | 8,851,114,825,711,569,000 | 37.909091 | 71 | 0.662617 | false |
gam17/QAD | qad_break_fun.py | 1 | 4906 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
QAD Quantum Aided Design plugin
funzioni per comando SPEZZA per tagliare un oggetto
-------------------
begin : 2019-08-08
copyright : iiiii
email : hhhhh
developers : bbbbb aaaaa ggggg
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import *
from qgis.PyQt.QtGui import *
from qgis.core import *
from qgis.gui import *
from .qad_multi_geom import getQadGeomAt, isLinearQadGeom
from .qad_geom_relations import *
#===============================================================================
# breakQadGeometry
#===============================================================================
def breakQadGeometry(qadGeom, firstPt, secondPt):
"""
la funzione spezza la geometria in un punto (se <secondPt> = None) o in due punti
come fa il trim.
<qadGeom> = geometria da tagliare
<firstPt> = primo punto di divisione
<secondPt> = secondo punto di divisione
"""
if qadGeom is None: return None
gType = qadGeom.whatIs()
if gType == "POINT" or gType == "MULTI_POINT":
return None
# la funzione ritorna una lista con
# (<minima distanza>
# <punto più vicino>
# <indice della geometria più vicina>
# <indice della sotto-geometria più vicina>
# <indice della parte della sotto-geometria più vicina>
# <"a sinistra di" se il punto é alla sinista della parte con i seguenti valori:
# - < 0 = sinistra (per linea, arco o arco di ellisse) o interno (per cerchi, ellissi)
# - > 0 = destra (per linea, arco o arco di ellisse) o esterno (per cerchi, ellissi)
# )
result = getQadGeomClosestPart(qadGeom, firstPt)
myFirstPt = result[1]
atGeom = result[2]
atSubGeom = result[3]
subQadGeom = getQadGeomAt(qadGeom, atGeom, atSubGeom).copy()
mySecondPt = None
if secondPt is not None:
# la funzione ritorna una lista con
# (<minima distanza>
# <punto più vicino>
# <indice della geometria più vicina>
# <indice della sotto-geometria più vicina>
# <indice della parte della sotto-geometria più vicina>
# <"a sinistra di" se il punto é alla sinista della parte con i seguenti valori:
# - < 0 = sinistra (per linea, arco o arco di ellisse) o interno (per cerchi, ellissi)
# - > 0 = destra (per linea, arco o arco di ellisse) o esterno (per cerchi, ellissi)
# )
result = getQadGeomClosestPart(qadGeom, secondPt)
mySecondPt = result[1]
atGeom = result[2]
atSubGeom = result[3]
# se le sottogeometrie sono diverse
if result[2] != atGeom or result[3] != atSubGeom: return None
if mySecondPt is None or qad_utils.ptNear(myFirstPt, mySecondPt):
# divido la polilinea in 2
if isLinearQadGeom(subQadGeom) == False: return None
dummy = subQadGeom.breakOnPts(myFirstPt, None)
if dummy is None: return None
return [dummy[0], dummy[1], atGeom, atSubGeom]
else: # c'é anche il secondo punto di divisione
gType = subQadGeom.whatIs()
if gType == "CIRCLE":
endAngle = qad_utils.getAngleBy2Pts(subQadGeom.center, myFirstPt)
startAngle = qad_utils.getAngleBy2Pts(subQadGeom.center, mySecondPt)
arc = QadArc().set(subQadGeom.center, subQadGeom.radius, startAngle, endAngle)
return [arc, None, atGeom, atSubGeom]
elif gType == "ELLIPSE":
endAngle = qad_utils.getAngleBy3Pts(subQadGeom.majorAxisFinalPt, subQadGeom.center, myFirstPt, False)
startAngle = qad_utils.getAngleBy3Pts(subQadGeom.majorAxisFinalPt, subQadGeom.center, mySecondPt, False)
ellipseArc = QadEllipseArc().set(subQadGeom.center, subQadGeom.majorAxisFinalPt, subQadGeom.axisRatio, startAngle, endAngle)
return [ellipseArc, None, atGeom, atSubGeom]
else:
dummy = subQadGeom.breakOnPts(myFirstPt, mySecondPt)
return [dummy[0], dummy[1], atGeom, atSubGeom]
| gpl-3.0 | 2,953,694,900,859,195,400 | 41.705357 | 133 | 0.549132 | false |
joebowen/movement_validation_cloud | djangodev/lib/python2.7/site-packages/django/shortcuts.py | 78 | 5688 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.template import loader, RequestContext
from django.http import HttpResponse, Http404
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.db.models.base import ModelBase
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.core import urlresolvers
from django.utils import six
def render_to_response(*args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
httpresponse_kwargs = {'content_type': kwargs.pop('content_type', None)}
return HttpResponse(loader.render_to_string(*args, **kwargs), **httpresponse_kwargs)
def render(request, *args, **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
httpresponse_kwargs = {
'content_type': kwargs.pop('content_type', None),
'status': kwargs.pop('status', None),
}
if 'context_instance' in kwargs:
context_instance = kwargs.pop('context_instance')
if kwargs.get('current_app', None):
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
current_app = kwargs.pop('current_app', None)
context_instance = RequestContext(request, current_app=current_app)
kwargs['context_instance'] = context_instance
return HttpResponse(loader.render_to_string(*args, **kwargs),
**httpresponse_kwargs)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
elif isinstance(klass, ModelBase):
manager = klass._default_manager
else:
if isinstance(klass, type):
klass__name = klass.__name__
else:
klass__name = klass.__class__.__name__
raise ValueError("Object is of type '%s', but must be a Django Model, "
"Manager, or QuerySet" % klass__name)
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, six.string_types):
# Handle relative URLs
if any(to.startswith(path) for path in ('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return urlresolvers.reverse(to, args=args, kwargs=kwargs)
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
| mit | -832,662,720,353,733,200 | 32.857143 | 90 | 0.658228 | false |
JPMoresmau/aifh | vol2/vol2-python-examples/lib/aifh/aifh_error.py | 6 | 1218 | """
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2013 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
__author__ = 'jheaton'
class AIFHError(Exception):
"""An error was raised. This is used for several purposes, see individual error messages."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value) | apache-2.0 | 9,087,343,678,150,114,000 | 30.25641 | 96 | 0.706076 | false |
eenchev/idea-note-taking-app | env/lib/python2.7/site-packages/sqlalchemy/dialects/mysql/gaerdbms.py | 33 | 3387 | # mysql/gaerdbms.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+gaerdbms
:name: Google Cloud SQL
:dbapi: rdbms
:connectstring: mysql+gaerdbms:///<dbname>?instance=<instancename>
:url: https://developers.google.com/appengine/docs/python/cloud-sql/\
developers-guide
This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with
minimal changes.
.. versionadded:: 0.7.8
.. deprecated:: 1.0 This dialect is **no longer necessary** for
Google Cloud SQL; the MySQLdb dialect can be used directly.
Cloud SQL now recommends creating connections via the
mysql dialect using the URL format
``mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>``
Pooling
-------
Google App Engine connections appear to be randomly recycled,
so the dialect does not pool connections. The :class:`.NullPool`
implementation is installed within the :class:`.Engine` by
default.
"""
import os
from .mysqldb import MySQLDialect_mysqldb
from ...pool import NullPool
import re
from sqlalchemy.util import warn_deprecated
def _is_dev_environment():
return os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')
class MySQLDialect_gaerdbms(MySQLDialect_mysqldb):
@classmethod
def dbapi(cls):
warn_deprecated(
"Google Cloud SQL now recommends creating connections via the "
"MySQLdb dialect directly, using the URL format "
"mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/"
"<projectid>:<instancename>"
)
# from django:
# http://code.google.com/p/googleappengine/source/
# browse/trunk/python/google/storage/speckle/
# python/django/backend/base.py#118
# see also [ticket:2649]
# see also http://stackoverflow.com/q/14224679/34549
from google.appengine.api import apiproxy_stub_map
if _is_dev_environment():
from google.appengine.api import rdbms_mysqldb
return rdbms_mysqldb
elif apiproxy_stub_map.apiproxy.GetStub('rdbms'):
from google.storage.speckle.python.api import rdbms_apiproxy
return rdbms_apiproxy
else:
from google.storage.speckle.python.api import rdbms_googleapi
return rdbms_googleapi
@classmethod
def get_pool_class(cls, url):
# Cloud SQL connections die at any moment
return NullPool
def create_connect_args(self, url):
opts = url.translate_connect_args()
if not _is_dev_environment():
# 'dsn' and 'instance' are because we are skipping
# the traditional google.api.rdbms wrapper
opts['dsn'] = ''
opts['instance'] = url.query['instance']
return [], opts
def _extract_error_code(self, exception):
match = re.compile(r"^(\d+)L?:|^\((\d+)L?,").match(str(exception))
# The rdbms api will wrap then re-raise some types of errors
# making this regex return no matches.
code = match.group(1) or match.group(2) if match else None
if code:
return int(code)
dialect = MySQLDialect_gaerdbms
| mit | 7,314,556,820,720,296,000 | 32.205882 | 91 | 0.658105 | false |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/tensorflow/contrib/distributions/python/ops/distribution.py | 7 | 33841 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import inspect
import types
import warnings
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape", "get_batch_shape", "event_shape", "get_event_shape",
"sample_n", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "std", "mode"]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create
# a non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
base = baseclasses[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = (n,) + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.get_event_shape()
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape
# Sampling returns a sample per distribution. `samples` has shape
# (5, 2, 2), which is (n,) + batch_shape + event_shape, where n=5,
# batch_shape=(2, 2), and event_shape=().
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape (2, 2) as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is (2, 2), one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `a` and `b`, and does not have well-defined mode if
`a < 1` or `b < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
is_continuous,
is_reparameterized,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
is_continuous: Python boolean. If `True` this
`Distribution` is continuous over its supported domain.
is_reparameterized: Python boolean. If `True` this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution.
validate_args: Python boolean. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python boolean. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
parameters: Python dictionary of parameters used to instantiate this
`Distribution`.
graph_parents: Python list of graph prerequisites of this `Distribution`.
name: A name for this distribution. Default: subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not contrib_framework.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
parameters = parameters or {}
self._dtype = dtype
self._is_continuous = is_continuous
self._is_reparameterized = is_reparameterized
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters
self._graph_parents = graph_parents
self._name = name or type(self).__name__
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
Subclasses should override static method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. TensorShape) shapes.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
return self._parameters
@property
def is_continuous(self):
return self._is_continuous
@property
def is_reparameterized(self):
return self._is_reparameterized
@property
def allow_nan_stats(self):
"""Python boolean describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance
of a Cauchy distribution is infinity. However, sometimes the
statistic is undefined, e.g., if a distribution's pdf does not achieve a
maximum within the support of the distribution, the mode is undefined.
If the mean is undefined, then by definition the variance is undefined.
E.g. the mean for Student's T for df = 1 is undefined (no clear way to say
it is either + or - infinity), so the variance = E[(X - mean)^2] is also
undefined.
Returns:
allow_nan_stats: Python boolean.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python boolean indicated possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
intialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` intitialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
# Python3 leaks "__class__" into `locals()` so we remove if present.
# TODO(b/32376812): Remove this pop.
parameters.pop("__class__", None)
return type(self)(**parameters)
def _batch_shape(self):
raise NotImplementedError("batch_shape is not implemented")
def batch_shape(self, name="batch_shape"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
return self._batch_shape()
def _get_batch_shape(self):
return tensor_shape.TensorShape(None)
def get_batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._get_batch_shape()
def _event_shape(self):
raise NotImplementedError("event_shape is not implemented")
def event_shape(self, name="event_shape"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
return self._event_shape()
def _get_event_shape(self):
return tensor_shape.TensorShape(None)
def get_event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `event_shape`. May be only partially defined.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._get_event_shape()
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def sample(self, sample_shape=(), seed=None, name="sample",
**condition_kwargs):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
if sample_shape.get_shape().ndims == 0:
return self.sample_n(sample_shape, seed, **condition_kwargs)
sample_shape, total = self._expand_sample_shape(sample_shape)
samples = self.sample_n(total, seed, **condition_kwargs)
output_shape = array_ops.concat(0, [sample_shape, array_ops.slice(
array_ops.shape(samples), [1], [-1])])
output = array_ops.reshape(samples, output_shape)
output.set_shape(tensor_util.constant_value_as_shape(
sample_shape).concatenate(samples.get_shape()[1:]))
return output
def sample_n(self, n, seed=None, name="sample_n", **condition_kwargs):
"""Generate `n` samples.
Args:
n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
observations to sample.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with a prepended dimension (n,).
Raises:
TypeError: if `n` is not an integer type.
"""
warnings.warn("Please use `sample` instead of `sample_n`. `sample_n` "
"will be deprecated in December 2016.",
PendingDeprecationWarning)
with self._name_scope(name, values=[n]):
n = ops.convert_to_tensor(n, name="n")
if not n.dtype.is_integer:
raise TypeError("n.dtype=%s is not an integer type" % n.dtype)
x = self._sample_n(n, seed, **condition_kwargs)
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(n))
batch_ndims = self.get_batch_shape().ndims
event_ndims = self.get_event_shape().ndims
if batch_ndims is not None and event_ndims is not None:
inferred_shape = sample_shape.concatenate(
self.get_batch_shape().concatenate(
self.get_event_shape()))
x.set_shape(inferred_shape)
elif x.get_shape().ndims is not None and x.get_shape().ndims > 0:
x.get_shape()[0].merge_with(sample_shape[0])
if batch_ndims is not None and batch_ndims > 0:
x.get_shape()[1:1+batch_ndims].merge_with(self.get_batch_shape())
if event_ndims is not None and event_ndims > 0:
x.get_shape()[-event_ndims:].merge_with(self.get_event_shape())
return x
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def log_prob(self, value, name="log_prob", **condition_kwargs):
"""Log probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob", **condition_kwargs):
"""Probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def log_cdf(self, value, name="log_cdf", **condition_kwargs):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def cdf(self, value, name="cdf", **condition_kwargs):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def log_survival_function(self, value, name="log_survival_function",
**condition_kwargs):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(1. - self.cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def survival_function(self, value, name="survival_function",
**condition_kwargs):
"""Survival function.
Given random variable `X`, the survival function is defined:
```
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **condition_kwargs)
except NotImplementedError:
raise original_exception
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance."""
with self._name_scope(name):
return self._variance()
def _std(self):
raise NotImplementedError("std is not implemented")
def std(self, name="std"):
"""Standard deviation."""
with self._name_scope(name):
return self._std()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def log_pdf(self, value, name="log_pdf", **condition_kwargs):
"""Log probability density function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if not `is_continuous`.
"""
warnings.warn("Please use `log_prob` instead of `log_pdf`. `log_pdf` "
"will be deprecated in December 2016.",
PendingDeprecationWarning)
if not self.is_continuous:
raise TypeError("log_pdf is undefined for non-continuous distributions.")
return self.log_prob(value, name=name, **condition_kwargs)
def pdf(self, value, name="pdf", **condition_kwargs):
"""Probability density function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if not `is_continuous`.
"""
warnings.warn("Please use `prob` instead of `pdf`. `pdf` will be "
"deprecated in December 2016.",
PendingDeprecationWarning)
if not self.is_continuous:
raise TypeError("pdf is undefined for non-continuous distributions.")
return self.prob(value, name, **condition_kwargs)
def log_pmf(self, value, name="log_pmf", **condition_kwargs):
"""Log probability mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if `is_continuous`.
"""
warnings.warn("Please use `log_prob` instead of `log_pmf`. `log_pmf` will "
"be deprecated in December 2016.",
PendingDeprecationWarning)
if self.is_continuous:
raise TypeError("log_pmf is undefined for continuous distributions.")
return self.log_prob(value, name=name, **condition_kwargs)
def pmf(self, value, name="pmf", **condition_kwargs):
"""Probability mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if `is_continuous`.
"""
warnings.warn("Please use `prob` instead of `pmf`. `pmf` will be "
"deprecated in December 2016.",
PendingDeprecationWarning)
if self.is_continuous:
raise TypeError("pmf is undefined for continuous distributions.")
return self.prob(value, name=name, **condition_kwargs)
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape(self, sample_shape):
"""Helper to `sample` which ensures sample_shape is 1D."""
sample_shape_static_val = tensor_util.constant_value(sample_shape)
ndims = sample_shape.get_shape().ndims
if sample_shape_static_val is None:
if ndims is None or not sample_shape.get_shape().is_fully_defined():
ndims = array_ops.rank(sample_shape)
expanded_shape = distribution_util.pick_vector(
math_ops.equal(ndims, 0),
np.array((1,), dtype=dtypes.int32.as_numpy_dtype()),
array_ops.shape(sample_shape))
sample_shape = array_ops.reshape(sample_shape, expanded_shape)
total = math_ops.reduce_prod(sample_shape) # reduce_prod([]) == 1
else:
if ndims is None:
raise ValueError(
"Shouldn't be here; ndims cannot be none when we have a "
"tf.constant shape.")
if ndims == 0:
sample_shape_static_val = np.reshape(sample_shape_static_val, [1])
sample_shape = ops.convert_to_tensor(
sample_shape_static_val,
dtype=dtypes.int32,
name="sample_shape")
total = np.prod(sample_shape_static_val,
dtype=dtypes.int32.as_numpy_dtype())
return sample_shape, total
| gpl-3.0 | 1,255,708,913,969,260,300 | 34.962806 | 80 | 0.655093 | false |
d33tah/npyscreen | tokentextbox.py | 9 | 1333 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# filename: npsapp.py
import cProfile
import pstats
from npyscreen import NPSApp
from npyscreen import Form
from npyscreen import TextTokens, TitleTextTokens
class TextBoxForm(Form):
def create(self):
tb = self.add(TextTokens, name="TokenField", )#max_width=25)
tb.value = [
"Token 1 Testing",
"Token 2 ééé",
"Token 3 ",
"Token 4 ",
"Token 6 ",
"Token 7 ",
"Token 8 ",
"Token 9 ",
"Token 10 ",
"Token 11 ",
"Token 12 ",
"Token 6b ",
"Token 7b ",
"Token 8b ",
"Token 9b ",
"Token 10b ",
"Token 11b ",
"Token 12b ",
]
#tb.begin_at += 0
#tb.important=True
#tb.show_bold=True
self.highlight=True
tb.cursor_position=3
tb.left_margin=8
class App(NPSApp):
def main(self):
form = TextBoxForm(name='Welcome to Npyscreen')
form.edit()
if __name__ == '__main__':
app = App()
p = cProfile.run('app.run()', sort=1)
| bsd-2-clause | -83,342,801,809,311,300 | 24.576923 | 68 | 0.427068 | false |
emedvedev/st2 | st2common/tests/unit/test_util_api.py | 7 | 3292 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from oslo_config import cfg
from st2common.constants.api import DEFAULT_API_VERSION
from st2common.util.api import get_base_public_api_url
from st2common.util.api import get_full_public_api_url
from st2common.util.api import get_mistral_api_url
from st2tests.config import parse_args
parse_args()
class APIUtilsTestCase(unittest2.TestCase):
def test_get_base_public_api_url(self):
values = [
'http://foo.bar.com',
'http://foo.bar.com/',
'http://foo.bar.com:8080',
'http://foo.bar.com:8080/',
'http://localhost:8080/',
]
expected = [
'http://foo.bar.com',
'http://foo.bar.com',
'http://foo.bar.com:8080',
'http://foo.bar.com:8080',
'http://localhost:8080',
]
for mock_value, expected_result in zip(values, expected):
cfg.CONF.auth.api_url = mock_value
actual = get_base_public_api_url()
self.assertEqual(actual, expected_result)
def test_get_full_public_api_url(self):
values = [
'http://foo.bar.com',
'http://foo.bar.com/',
'http://foo.bar.com:8080',
'http://foo.bar.com:8080/',
'http://localhost:8080/',
]
expected = [
'http://foo.bar.com/' + DEFAULT_API_VERSION,
'http://foo.bar.com/' + DEFAULT_API_VERSION,
'http://foo.bar.com:8080/' + DEFAULT_API_VERSION,
'http://foo.bar.com:8080/' + DEFAULT_API_VERSION,
'http://localhost:8080/' + DEFAULT_API_VERSION,
]
for mock_value, expected_result in zip(values, expected):
cfg.CONF.auth.api_url = mock_value
actual = get_full_public_api_url()
self.assertEqual(actual, expected_result)
def test_get_mistral_api_url(self):
cfg.CONF.set_override(name='api_url', override='http://127.0.0.1:9999', group='auth')
cfg.CONF.set_override(name='api_url', override=None, group='mistral')
# No URL set, should fall back to auth.api_url
result = get_mistral_api_url()
self.assertEqual(result, 'http://127.0.0.1:9999/' + DEFAULT_API_VERSION)
# mistral.api_url provided, should use that
cfg.CONF.set_override(name='api_url', override='http://10.0.0.0:9999', group='mistral')
result = get_mistral_api_url()
self.assertEqual(result, 'http://10.0.0.0:9999/' + DEFAULT_API_VERSION)
| apache-2.0 | 2,895,635,442,480,118,000 | 38.662651 | 95 | 0.625152 | false |
SqueezeStudioAnimation/omtk | python/omtk/vendor/pyparsing.py | 22 | 157475 | # module pyparsing.py
#
# Copyright (c) 2003-2013 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString( hello ))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "2.0.2"
__versionTime__ = "13 April 2014 12:10"
__author__ = "Paul McGuire <[email protected]>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 'Upcase',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'keepOriginalText', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr',
]
PY_3 = sys.version.startswith('3')
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_lowercase + string.ascii_uppercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "loc msg pstr parserElement lineno col line " \
"markInputline __str__ __repr__".split()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{L{ParseFatalException}}, but thrown internally when an
C{L{ErrorStop<And._ErrorStop>}} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def iterkeys( self ):
"""Returns all named result keys."""
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def itervalues( self ):
"""Returns all named result values."""
return (self[k] for k in self.iterkeys())
def iteritems( self ):
return ((k, self[k]) for k in self.iterkeys())
if PY_3:
keys = iterkeys
values = itervalues
items = iteritems
else:
def keys( self ):
"""Returns all named result keys."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values."""
return list(self.itervalues())
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""Removes and returns item at specified index (default=last).
Supports both list and dict semantics for pop(). If passed no
argument or an integer argument, it will use list semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use dict
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in dict.pop()."""
if not args:
args = [-1]
if 'default' in kwargs:
args.append(kwargs['default'])
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict:
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""Add single element to end of ParseResults list of elements."""
self.__toklist.append(item)
def extend( self, itemseq ):
"""Add sequence of elements to end of ParseResults list of elements."""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""Clear all elements and results names."""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
return self.copy()
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = []
for i in self.__toklist:
if isinstance(i, ParseResults):
out.append(_ustr(i))
else:
out.append(repr(i))
return '[' + ', '.join(out) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
if PY_3:
return dict( self.items() )
else:
return dict( self.iteritems() )
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
items = sorted(self.items())
for k,v in items:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.haskeys():
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
return "".join(out)
def pprint(self, *args, **kwargs):
"""Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __dir__(self):
return dir(super(ParseResults,self)) + list(self.keys())
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
if limit[0] <= maxargs and not foundArity[0]:
limit[0] += 1
continue
raise
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
"""
ParserElement.literalStringClass = cls
inlineLiteralsUsing = staticmethod(inlineLiteralsUsing)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value, Exception):
raise value
return (value[0],value[1].copy())
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException as pe:
pe.__traceback__ = None
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns C{L{And}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{L{And}} with error stop"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{L{MatchFirst}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{L{Or}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{L{Each}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{L{NotAny}}"""
return NotAny( self )
def __call__(self, name=None):
"""Shortcut for C{L{setResultsName}}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other.copy() )
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "r")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or self.__dict__ == other.__dict__
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement.literalStringClass = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}::
Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}.
Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is C{False}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{exclude} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
charset = ''.join(set(self.quoteChar[0]+self.endQuoteChar[0])).replace('^',r'\^').replace('-',r'\-')
self.escCharReplacePattern = re.escape(self.escChar)+("([%s])" % charset)
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
elif isinstance( exprs, collections.Sequence ):
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(Literal, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
maxException = None
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and e not in opt1 ]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults:
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If C{include} is set to true, the matched expression is also parsed (the skipped text
and matched expression are returned as a 2-element list). The C{ignore}
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if failOn is not None and isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
failParse = False
while loc <= instrlen:
try:
if self.failOn:
try:
self.failOn.tryParse(instring, loc)
except ParseBaseException:
pass
else:
failParse = True
raise ParseException(instring, loc, "Found expression " + str(self.failOn))
failParse = False
if self.ignoreExpr is not None:
while 1:
try:
loc = self.ignoreExpr.tryParse(instring,loc)
# print("found ignoreExpr, advance to", loc)
except ParseBaseException:
break
expr._parse( instring, loc, doActions=False, callPreParse=False )
skipText = instring[startLoc:loc]
if self.includeMatch:
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ skipText ]
except (ParseException,IndexError):
if failParse:
raise
else:
loc += 1
raise ParseException(instring, loc, self.errmsg, self)
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of C{ParseExpression}, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return list(map( str.upper, tokenlist ))
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr )
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, collections.Sequence):
symbols = list(strs[:])
elif isinstance(strs, _generatorType):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) )
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. Simpler to use than the parse action C{L{keepOriginalText}}, and does not
require the inspect module to chase up the call stack. By default, returns a
string containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
del t[:]
t.insert(0, s[t._original_start:t._original_end])
del t["_original_start"]
del t["_original_end"]
matchExpr.setParseAction(extractText)
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty."""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\x' (\x21, which is a '!' character)
(\0x## is also supported for backwards compatibility)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""DEPRECATED - use new helper method C{L{originalTextFor}}.
Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException("incorrect usage of keepOriginalText - may only be called as a parse action")
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException("incorrect usage of getTokensEndLoc - may only be called from within a parse action")
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=Suppress('('))
- rpar - expression for matching right-parentheses (default=Suppress(')'))
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr <<= ( matchExpr | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one C{blockStatement}.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = Empty() + Empty().setParseAction(checkSubIndent)
PEER = Empty().setParseAction(checkPeerIndent)
UNDENT = Empty().setParseAction(checkUnindent)
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";").streamline()
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),'><& "'))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (teststring + "->" + str(tokenlist))
print ("tokens = " + str(tokens))
print ("tokens.columns = " + str(tokens.columns))
print ("tokens.tables = " + str(tokens.tables))
print (tokens.asXML("SQL",True))
except ParseBaseException as err:
print (teststring + "->")
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| mit | 5,709,935,698,927,364,000 | 40.015742 | 196 | 0.571507 | false |
quizlet/grpc | src/python/grpcio_tests/tests/testing/_time_test.py | 12 | 6050 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import threading
import time
import unittest
import grpc_testing
_QUANTUM = 0.3
_MANY = 10000
# Tests that run in real time can either wait for the scheduler to
# eventually run what needs to be run (and risk timing out) or declare
# that the scheduler didn't schedule work reasonably fast enough. We
# choose the latter for this test.
_PATHOLOGICAL_SCHEDULING = 'pathological thread scheduling!'
class _TimeNoter(object):
def __init__(self, time):
self._condition = threading.Condition()
self._time = time
self._call_times = []
def __call__(self):
with self._condition:
self._call_times.append(self._time.time())
def call_times(self):
with self._condition:
return tuple(self._call_times)
class TimeTest(object):
def test_sleep_for(self):
start_time = self._time.time()
self._time.sleep_for(_QUANTUM)
end_time = self._time.time()
self.assertLessEqual(start_time + _QUANTUM, end_time)
def test_sleep_until(self):
start_time = self._time.time()
self._time.sleep_until(start_time + _QUANTUM)
end_time = self._time.time()
self.assertLessEqual(start_time + _QUANTUM, end_time)
def test_call_in(self):
time_noter = _TimeNoter(self._time)
start_time = self._time.time()
self._time.call_in(time_noter, _QUANTUM)
self._time.sleep_for(_QUANTUM * 2)
call_times = time_noter.call_times()
self.assertTrue(call_times, msg=_PATHOLOGICAL_SCHEDULING)
self.assertLessEqual(start_time + _QUANTUM, call_times[0])
def test_call_at(self):
time_noter = _TimeNoter(self._time)
start_time = self._time.time()
self._time.call_at(time_noter, self._time.time() + _QUANTUM)
self._time.sleep_for(_QUANTUM * 2)
call_times = time_noter.call_times()
self.assertTrue(call_times, msg=_PATHOLOGICAL_SCHEDULING)
self.assertLessEqual(start_time + _QUANTUM, call_times[0])
def test_cancel(self):
time_noter = _TimeNoter(self._time)
future = self._time.call_in(time_noter, _QUANTUM * 2)
self._time.sleep_for(_QUANTUM)
cancelled = future.cancel()
self._time.sleep_for(_QUANTUM * 2)
call_times = time_noter.call_times()
self.assertFalse(call_times, msg=_PATHOLOGICAL_SCHEDULING)
self.assertTrue(cancelled)
self.assertTrue(future.cancelled())
def test_many(self):
test_events = tuple(threading.Event() for _ in range(_MANY))
possibly_cancelled_futures = {}
background_noise_futures = []
for test_event in test_events:
possibly_cancelled_futures[test_event] = self._time.call_in(
test_event.set, _QUANTUM * (2 + random.random()))
for _ in range(_MANY):
background_noise_futures.append(
self._time.call_in(threading.Event().set, _QUANTUM * 1000 *
random.random()))
self._time.sleep_for(_QUANTUM)
cancelled = set()
for test_event, test_future in possibly_cancelled_futures.items():
if bool(random.randint(0, 1)) and test_future.cancel():
cancelled.add(test_event)
self._time.sleep_for(_QUANTUM * 3)
for test_event in test_events:
(self.assertFalse if test_event in cancelled else
self.assertTrue)(test_event.is_set())
for background_noise_future in background_noise_futures:
background_noise_future.cancel()
def test_same_behavior_used_several_times(self):
time_noter = _TimeNoter(self._time)
start_time = self._time.time()
first_future_at_one = self._time.call_in(time_noter, _QUANTUM)
second_future_at_one = self._time.call_in(time_noter, _QUANTUM)
first_future_at_three = self._time.call_in(time_noter, _QUANTUM * 3)
second_future_at_three = self._time.call_in(time_noter, _QUANTUM * 3)
self._time.sleep_for(_QUANTUM * 2)
first_future_at_one_cancelled = first_future_at_one.cancel()
second_future_at_one_cancelled = second_future_at_one.cancel()
first_future_at_three_cancelled = first_future_at_three.cancel()
self._time.sleep_for(_QUANTUM * 2)
second_future_at_three_cancelled = second_future_at_three.cancel()
first_future_at_three_cancelled_again = first_future_at_three.cancel()
call_times = time_noter.call_times()
self.assertEqual(3, len(call_times), msg=_PATHOLOGICAL_SCHEDULING)
self.assertFalse(first_future_at_one_cancelled)
self.assertFalse(second_future_at_one_cancelled)
self.assertTrue(first_future_at_three_cancelled)
self.assertFalse(second_future_at_three_cancelled)
self.assertTrue(first_future_at_three_cancelled_again)
self.assertLessEqual(start_time + _QUANTUM, call_times[0])
self.assertLessEqual(start_time + _QUANTUM, call_times[1])
self.assertLessEqual(start_time + _QUANTUM * 3, call_times[2])
class StrictRealTimeTest(TimeTest, unittest.TestCase):
def setUp(self):
self._time = grpc_testing.strict_real_time()
class StrictFakeTimeTest(TimeTest, unittest.TestCase):
def setUp(self):
self._time = grpc_testing.strict_fake_time(
random.randint(0, int(time.time())))
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 | -4,169,475,018,417,231,400 | 35.666667 | 78 | 0.64595 | false |
google/nitroml | nitroml/benchmark/suites/openml_cc18_test.py | 1 | 1850 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""Tests for nitroml.suites.openml_cc18."""
import os
from absl.testing import absltest
from absl.testing import parameterized
from nitroml.benchmark.suites import openml_cc18
from nitroml.benchmark.suites import testing_utils
import requests_mock
class OpenMLCC18Test(parameterized.TestCase, absltest.TestCase):
"""Test cases for datasets.openML_datasets provider.
There are two test cases:
1) Test Case 1: Downloads mock data for openML CC18 datasets.
2) Test Case 2: Checks the cache, if data exists loads from the disk, else
downloads.
"""
@parameterized.named_parameters(
{
'testcase_name': 'openML_default',
'use_cache': False,
}, {
'testcase_name': 'openML_use-cache',
'use_cache': True,
})
def test_examples(self, use_cache):
root_dir = os.path.join(absltest.get_default_test_tmpdir(),
'openML_mock_data')
with requests_mock.Mocker() as mocker:
testing_utils.register_mock_urls(mocker)
suite = openml_cc18.OpenMLCC18(root_dir, use_cache, mock_data=True)
self.assertNotEmpty(list(suite))
if __name__ == '__main__':
absltest.main()
| apache-2.0 | -3,053,389,581,779,941,400 | 32.035714 | 79 | 0.672432 | false |
artemh/asuswrt-merlin | release/src/router/samba36/wintest/test-s3.py | 19 | 9730 | #!/usr/bin/env python
'''automated testing of Samba3 against windows'''
import sys, os
import optparse
import wintest
def set_libpath(t):
t.putenv("LD_LIBRARY_PATH", "${PREFIX}/lib")
def set_krb5_conf(t):
t.run_cmd("mkdir -p ${PREFIX}/etc")
t.write_file("${PREFIX}/etc/krb5.conf",
'''[libdefaults]
dns_lookup_realm = false
dns_lookup_kdc = true''')
t.putenv("KRB5_CONFIG", '${PREFIX}/etc/krb5.conf')
def build_s3(t):
'''build samba3'''
t.info('Building s3')
t.chdir('${SOURCETREE}/source3')
t.putenv('CC', 'ccache gcc')
t.run_cmd("./autogen.sh")
t.run_cmd("./configure -C --prefix=${PREFIX} --enable-developer")
t.run_cmd('make basics')
t.run_cmd('make -j4')
t.run_cmd('rm -rf ${PREFIX}')
t.run_cmd('make install')
def start_s3(t):
t.info('Starting Samba3')
t.chdir("${PREFIX}")
t.run_cmd('killall -9 -q samba smbd nmbd winbindd', checkfail=False)
t.run_cmd("rm -f var/locks/*.pid")
t.run_cmd(['sbin/nmbd', "-D"])
t.run_cmd(['sbin/winbindd', "-D"])
t.run_cmd(['sbin/smbd', "-D"])
t.port_wait("${INTERFACE_IP}", 139)
def test_wbinfo(t):
t.info('Testing wbinfo')
t.chdir('${PREFIX}')
t.cmd_contains("bin/wbinfo --version", ["Version 3."])
t.cmd_contains("bin/wbinfo -p", ["Ping to winbindd succeeded"])
t.retry_cmd("bin/wbinfo --online-status",
["BUILTIN : online",
"${HOSTNAME} : online",
"${WIN_DOMAIN} : online"],
casefold=True)
t.cmd_contains("bin/wbinfo -u",
["${WIN_DOMAIN}/administrator",
"${WIN_DOMAIN}/krbtgt" ],
casefold=True)
t.cmd_contains("bin/wbinfo -g",
["${WIN_DOMAIN}/domain users",
"${WIN_DOMAIN}/domain guests",
"${WIN_DOMAIN}/domain admins"],
casefold=True)
t.cmd_contains("bin/wbinfo --name-to-sid administrator",
"S-1-5-.*-500 SID_USER .1",
regex=True)
t.cmd_contains("bin/wbinfo --name-to-sid 'domain users'",
"S-1-5-.*-513 SID_DOM_GROUP .2",
regex=True)
t.retry_cmd("bin/wbinfo --authenticate=${WIN_DOMAIN}/administrator%${WIN_PASS}",
["plaintext password authentication succeeded",
"challenge/response password authentication succeeded"])
def test_smbclient(t):
t.info('Testing smbclient')
t.chdir('${PREFIX}')
t.cmd_contains("bin/smbclient --version", ["Version 3."])
t.cmd_contains('bin/smbclient -L ${INTERFACE_IP} -U%', ["Domain=[${WIN_DOMAIN}]", "test", "IPC$", "Samba 3."],
casefold=True)
child = t.pexpect_spawn('bin/smbclient //${HOSTNAME}.${WIN_REALM}/test -Uroot@${WIN_REALM}%${PASSWORD2}')
child.expect("smb:")
child.sendline("dir")
child.expect("blocks available")
child.sendline("mkdir testdir")
child.expect("smb:")
child.sendline("cd testdir")
child.expect('testdir')
child.sendline("cd ..")
child.sendline("rmdir testdir")
child = t.pexpect_spawn('bin/smbclient //${HOSTNAME}.${WIN_REALM}/test -Uroot@${WIN_REALM}%${PASSWORD2} -k')
child.expect("smb:")
child.sendline("dir")
child.expect("blocks available")
child.sendline("mkdir testdir")
child.expect("smb:")
child.sendline("cd testdir")
child.expect('testdir')
child.sendline("cd ..")
child.sendline("rmdir testdir")
def create_shares(t):
t.info("Adding test shares")
t.chdir('${PREFIX}')
t.write_file("lib/smb.conf", '''
[test]
path = ${PREFIX}/test
read only = no
''',
mode='a')
t.run_cmd("mkdir -p test")
def prep_join_as_member(t, vm):
'''prepare to join a windows domain as a member server'''
t.setwinvars(vm)
t.info("Starting VMs for joining ${WIN_VM} as a member using net ads join")
t.chdir('${PREFIX}')
t.run_cmd('killall -9 -q samba smbd nmbd winbindd', checkfail=False)
t.vm_poweroff("${WIN_VM}", checkfail=False)
t.vm_restore("${WIN_VM}", "${WIN_SNAPSHOT}")
child = t.open_telnet("${WIN_HOSTNAME}", "administrator", "${WIN_PASS}", set_time=True)
t.get_ipconfig(child)
t.del_files(["var", "private"])
t.write_file("lib/smb.conf", '''
[global]
netbios name = ${HOSTNAME}
log level = ${DEBUGLEVEL}
realm = ${WIN_REALM}
workgroup = ${WIN_DOMAIN}
security = ADS
bind interfaces only = yes
interfaces = ${INTERFACE}
winbind separator = /
idmap uid = 1000000-2000000
idmap gid = 1000000-2000000
winbind enum users = yes
winbind enum groups = yes
max protocol = SMB2
map hidden = no
map system = no
ea support = yes
panic action = xterm -e gdb --pid %d
''')
def join_as_member(t, vm):
'''join a windows domain as a member server'''
t.setwinvars(vm)
t.info("Joining ${WIN_VM} as a member using net ads join")
t.port_wait("${WIN_IP}", 389)
t.retry_cmd("host -t SRV _ldap._tcp.${WIN_REALM} ${WIN_IP}", ['has SRV record'] )
t.cmd_contains("bin/net ads join -Uadministrator%${WIN_PASS}", ["Joined"])
t.cmd_contains("bin/net ads testjoin", ["Join is OK"])
t.cmd_contains("bin/net ads dns register ${HOSTNAME}.${WIN_REALM} -P", ["Successfully registered hostname with DNS"])
t.cmd_contains("host -t A ${HOSTNAME}.${WIN_REALM}",
['${HOSTNAME}.${WIN_REALM} has address'])
def test_join_as_member(t, vm):
'''test the domain join'''
t.setwinvars(vm)
t.info('Testing join as member')
t.chdir('${PREFIX}')
t.run_cmd('bin/net ads user add root -Uadministrator%${WIN_PASS}')
child = t.pexpect_spawn('bin/net ads password root -Uadministrator%${WIN_PASS}')
child.expect("Enter new password for root")
child.sendline("${PASSWORD2}")
child.expect("Password change for ");
child.expect(" completed")
child = t.pexpect_spawn('bin/net rpc shell -S ${WIN_HOSTNAME}.${WIN_REALM} -Uadministrator%${WIN_PASS}')
child.expect("net rpc>")
child.sendline("user edit disabled root no")
child.expect("Set root's disabled flag")
test_wbinfo(t)
test_smbclient(t)
def test_s3(t):
'''basic s3 testing'''
t.setvar("SAMBA_VERSION", "Version 3")
t.check_prerequesites()
set_libpath(t)
if not t.skip("configure_bind"):
t.configure_bind()
if not t.skip("stop_bind"):
t.stop_bind()
if not t.skip("stop_vms"):
t.stop_vms()
if not t.skip("build"):
build_s3(t)
set_krb5_conf(t)
if not t.skip("configure_bind2"):
t.configure_bind()
if not t.skip("start_bind"):
t.start_bind()
dc_started = False
if t.have_var('W2K8R2A_VM') and not t.skip("join_w2k8r2"):
t.start_winvm('W2K8R2A')
dc_started = True
prep_join_as_member(t, "W2K8R2A")
t.run_dcpromo_as_first_dc("W2K8R2A", func_level='2008r2')
join_as_member(t, "W2K8R2A")
create_shares(t)
start_s3(t)
test_join_as_member(t, "W2K8R2A")
if t.have_var('WINDOWS7_VM') and t.have_var('W2K8R2A_VM') and not t.skip("join_windows7_2008r2"):
if not dc_started:
t.start_winvm('W2K8R2A')
t.run_dcpromo_as_first_dc("W2K8R2A", func_level='2008r2')
dc_started = True
else:
t.setwinvars('W2K8R2A')
realm = t.getvar("WIN_REALM")
dom_username = t.getvar("WIN_USER")
dom_password = t.getvar("WIN_PASS")
dom_realm = t.getvar("WIN_REALM")
t.start_winvm('WINDOWS7')
t.test_remote_smbclient("WINDOWS7")
t.run_winjoin('WINDOWS7', realm, username=dom_username, password=dom_password)
t.test_remote_smbclient("WINDOWS7", dom_username, dom_password)
t.test_remote_smbclient('WINDOWS7', dom_username, dom_password, args='--option=clientntlmv2auth=no')
t.test_remote_smbclient('WINDOWS7', "%s@%s" % (dom_username, dom_realm), dom_password, args="-k")
t.test_remote_smbclient('WINDOWS7', "%s@%s" % (dom_username, dom_realm), dom_password, args="-k --option=clientusespnegoprincipal=yes")
if t.have_var('WINXP_VM') and t.have_var('W2K8R2A_VM') and not t.skip("join_winxp_2008r2"):
if not dc_started:
t.start_winvm('W2K8R2A')
t.run_dcpromo_as_first_dc("W2K8R2A", func_level='2008r2')
dc_started = True
else:
t.setwinvars('W2K8R2A')
realm = t.getvar("WIN_REALM")
dom_username = t.getvar("WIN_USER")
dom_password = t.getvar("WIN_PASS")
dom_realm = t.getvar("WIN_REALM")
t.start_winvm('WINXP')
t.run_winjoin('WINXP', realm, username=dom_username, password=dom_password)
t.test_remote_smbclient('WINXP', dom_username, dom_password)
t.test_remote_smbclient('WINXP', dom_username, dom_password, args='--option=clientntlmv2auth=no')
t.test_remote_smbclient('WINXP', "%s@%s" % (dom_username, dom_realm), dom_password, args="-k")
t.test_remote_smbclient('WINXP', "%s@%s" % (dom_username, dom_realm), dom_password, args="-k --clientusespnegoprincipal=yes")
t.info("S3 test: All OK")
def test_cleanup(t):
'''cleanup after tests'''
t.info("Cleaning up ...")
t.restore_resolv_conf()
if getattr(t, 'bind_child', False):
t.bind_child.kill()
if __name__ == '__main__':
t = wintest.wintest()
t.setup("test-s3.py", "source3")
try:
test_s3(t)
except:
if not t.opts.nocleanup:
test_cleanup(t)
raise
if not t.opts.nocleanup:
test_cleanup(t)
t.info("S3 test: All OK")
| gpl-2.0 | 6,256,183,168,282,289,000 | 34.381818 | 143 | 0.584995 | false |
ZHAW-INES/rioxo-uClinux-dist | user/python/python-2.4.4/Lib/_strptime.py | 11 | 18819 | """Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE
from re import escape as re_escape
from datetime import date as datetime_date
try:
from thread import allocate_lock as _thread_allocate_lock
except:
from dummy_thread import allocate_lock as _thread_allocate_lock
__author__ = "Brett Cannon"
__email__ = "[email protected]"
__all__ = ['strptime']
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (01,22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == timetzname[1]
# and time.daylight; handle that in strptime .
try:
time.tzset()
except AttributeError:
pass
no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
if time.daylight:
has_saving = frozenset([time.tzname[1].lower()])
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super(TimeRE, self)
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
#XXX: Does 'Y' need to worry about having less or more than
# 4 digits?
'Y': r"(?P<Y>\d\d\d\d)",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occuring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile('\s+')
format = whitespace_replacement.sub('\s*', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
_cache_lock = _thread_allocate_lock()
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
# first!
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
def strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input string and the format string."""
global _TimeRE_cache, _regex_cache
_cache_lock.acquire()
try:
time_re = _TimeRE_cache
locale_time = time_re.locale_time
if _getlang() != locale_time.lang:
_TimeRE_cache = TimeRE()
_regex_cache = {}
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
format_regex = _regex_cache.get(format)
if not format_regex:
format_regex = time_re.compile(format)
_regex_cache[format] = format_regex
finally:
_cache_lock.release()
found = format_regex.match(data_string)
if not found:
raise ValueError("time data did not match format: data=%s fmt=%s" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = 1900
month = day = 1
hour = minute = second = 0
tz = -1
# Default to -1 to signify that values not known; not critical to have,
# though
week_of_year = -1
week_of_year_start = -1
# weekday and julian defaulted to -1 so as to signal need to calculate
# values
weekday = julian = -1
found_dict = found.groupdict()
for group_key in found_dict.iterkeys():
# Directives not explicitly handled below:
# c, x, X
# handled by making out of other directives
# U, W
# worthless without day of the week
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
# U starts week on Sunday
week_of_year_start = 6
else:
# W starts week on Monday
week_of_year_start = 0
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
# Deal with bad locale setup where timezone names are the
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
time.daylight and found_zone not in ("utc", "gmt")):
break
else:
tz = value
break
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year
# Calculations below assume 0 is a Monday
if julian == -1 and week_of_year != -1 and weekday != -1:
# Calculate how many days in week 0
first_weekday = datetime_date(year, 1, 1).weekday()
preceeding_days = 7 - first_weekday
if preceeding_days == 7:
preceeding_days = 0
# Adjust for U directive so that calculations are not dependent on
# directive used to figure out week of year
if weekday == 6 and week_of_year_start == 6:
week_of_year -= 1
# If a year starts and ends on a Monday but a week is specified to
# start on a Sunday we need to up the week to counter-balance the fact
# that with %W that first Monday starts week 1 while with %U that is
# week 0 and thus shifts everything by a week
if weekday == 0 and first_weekday == 0 and week_of_year_start == 6:
week_of_year += 1
# If in week 0, then just figure out how many days from Jan 1 to day of
# week specified, else calculate by multiplying week of year by 7,
# adding in days in week 0, and the number of days from Monday to the
# day of the week
if week_of_year == 0:
julian = 1 + weekday - first_weekday
else:
days_to_week = preceeding_days + (7 * (week_of_year - 1))
julian = 1 + days_to_week + weekday
# Cannot pre-calculate datetime_date() since can change in Julian
#calculation and thus could have different value for the day of the week
#calculation
if julian == -1:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
#be accurate
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday == -1:
weekday = datetime_date(year, month, day).weekday()
return time.struct_time((year, month, day,
hour, minute, second,
weekday, julian, tz))
| gpl-2.0 | 1,180,111,993,181,234,700 | 42.064073 | 105 | 0.548541 | false |
azurestandard/django | tests/regressiontests/comment_tests/tests/app_api_tests.py | 50 | 2600 | from __future__ import absolute_import
from django.conf import settings
from django.contrib import comments
from django.contrib.comments.models import Comment
from django.contrib.comments.forms import CommentForm
from . import CommentTestCase
class CommentAppAPITests(CommentTestCase):
"""Tests for the "comment app" API"""
def testGetCommentApp(self):
self.assertEqual(comments.get_comment_app(), comments)
def testGetForm(self):
self.assertEqual(comments.get_form(), CommentForm)
def testGetFormTarget(self):
self.assertEqual(comments.get_form_target(), "/post/")
def testGetFlagURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_flag_url(c), "/flag/12345/")
def getGetDeleteURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_delete_url(c), "/delete/12345/")
def getGetApproveURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_approve_url(c), "/approve/12345/")
class CustomCommentTest(CommentTestCase):
urls = 'regressiontests.comment_tests.urls'
def setUp(self):
self.old_comments_app = getattr(settings, 'COMMENTS_APP', None)
settings.COMMENTS_APP = 'regressiontests.comment_tests.custom_comments'
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) + [settings.COMMENTS_APP,]
def tearDown(self):
del settings.INSTALLED_APPS[-1]
settings.COMMENTS_APP = self.old_comments_app
if settings.COMMENTS_APP is None:
del settings._wrapped.COMMENTS_APP
def testGetCommentApp(self):
from regressiontests.comment_tests import custom_comments
self.assertEqual(comments.get_comment_app(), custom_comments)
def testGetModel(self):
from regressiontests.comment_tests.custom_comments.models import CustomComment
self.assertEqual(comments.get_model(), CustomComment)
def testGetForm(self):
from regressiontests.comment_tests.custom_comments.forms import CustomCommentForm
self.assertEqual(comments.get_form(), CustomCommentForm)
def testGetFormTarget(self):
self.assertEqual(comments.get_form_target(), "/post/")
def testGetFlagURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_flag_url(c), "/flag/12345/")
def getGetDeleteURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_delete_url(c), "/delete/12345/")
def getGetApproveURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_approve_url(c), "/approve/12345/")
| bsd-3-clause | 4,304,082,047,380,734,500 | 33.666667 | 90 | 0.695385 | false |
CTSNE/NodeDefender | NodeDefender/mail/node.py | 1 | 1136 | from flask_mail import Message
from flask import render_template, url_for
import NodeDefender
import smtplib
@NodeDefender.decorators.mail_enabled
@NodeDefender.decorators.celery_task
def new_node(group, node):
group = NodeDefender.db.group.get(group)
if group is None:
return False
if group.email is None:
return False
node = NodeDefender.db.node.get(node)
if node is None:
return False
msg = Message('Node added to {}'.format(group.name), sender='[email protected]',
recipients=[group.email])
url = url_for('node_view.nodes_node', name = NodeDefender.serializer.dumps(node.name))
msg.body = render_template('mail/node/new_node.txt', node = node, url =
url)
try:
NodeDefender.mail.mail.send(msg)
except smtplib.SMTPRecipientsRefused:
NodeDefender.mail.logger.error("Unable to send email to: {}".\
format(group.email))
except smtplib.SMTPAuthenticationError:
NodeDefender.mail.logger.error("Authentication error when sending email")
return True
| mit | 1,991,880,481,027,996,200 | 35.645161 | 91 | 0.659331 | false |
kevin-coder/tensorflow-fork | tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver.py | 8 | 6532 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for Kubernetes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import format_master_url
from tensorflow.python.training import server_lib
from tensorflow.python.util.tf_export import tf_export
_KUBERNETES_API_CLIENT_INSTALLED = True
try:
from kubernetes import client as k8sclient # pylint: disable=g-import-not-at-top
from kubernetes import config as k8sconfig # pylint: disable=g-import-not-at-top
except ImportError:
_KUBERNETES_API_CLIENT_INSTALLED = False
@tf_export('distribute.cluster_resolver.KubernetesClusterResolver')
class KubernetesClusterResolver(ClusterResolver):
"""Cluster Resolver for Kubernetes.
This is an implementation of cluster resolvers for Kubernetes. When given the
the Kubernetes namespace and label selector for pods, we will retrieve the
pod IP addresses of all running pods matching the selector, and return a
ClusterSpec based on that information.
"""
def __init__(self,
job_to_label_mapping=None,
tf_server_port=8470,
rpc_layer='grpc',
override_client=None):
"""Initializes a new KubernetesClusterResolver.
This initializes a new Kubernetes Cluster Resolver. The Cluster Resolver
will attempt to talk to the Kubernetes master to retrieve all the instances
of pods matching a label selector.
Args:
job_to_label_mapping: A mapping of TensorFlow jobs to label selectors.
This allows users to specify many TensorFlow jobs in one Cluster
Resolver, and each job can have pods belong with different label
selectors. For example, a sample mapping might be
```
{'worker': ['job-name=worker-cluster-a', 'job-name=worker-cluster-b'],
'ps': ['job-name=ps-1', 'job-name=ps-2']}
```
tf_server_port: The port the TensorFlow server is listening on.
rpc_layer: (Optional) The RPC layer TensorFlow should use to communicate
between tasks in Kubernetes. Defaults to 'grpc'.
override_client: The Kubernetes client (usually automatically retrieved
using `from kubernetes import client as k8sclient`). If you pass this
in, you are responsible for setting Kubernetes credentials manually.
Raises:
ImportError: If the Kubernetes Python client is not installed and no
`override_client` is passed in.
RuntimeError: If autoresolve_task is not a boolean or a callable.
"""
if _KUBERNETES_API_CLIENT_INSTALLED:
k8sconfig.load_kube_config()
if not job_to_label_mapping:
job_to_label_mapping = {'worker': ['job-name=tensorflow']}
if not override_client and not _KUBERNETES_API_CLIENT_INSTALLED:
raise ImportError('The Kubernetes Python client must be installed before'
'using the Kubernetes Cluster Resolver. To install the'
'Kubernetes Python client, run `pip install '
'kubernetes` on your command line.')
self._job_to_label_mapping = job_to_label_mapping
self._tf_server_port = tf_server_port
self._override_client = override_client
self.task_type = None
self.task_id = None
self.rpc_layer = rpc_layer
def master(self, task_type=None, task_id=None, rpc_layer=None):
"""Returns the master address to use when creating a session.
You must have set the task_type and task_id object properties before
calling this function, or pass in the `task_type` and `task_id`
parameters when using this function. If you do both, the function parameters
will override the object properties.
Args:
task_type: (Optional) The type of the TensorFlow task of the master.
task_id: (Optional) The index of the TensorFlow task of the master.
rpc_layer: (Optional) The RPC protocol for the given cluster.
Returns:
The name or URL of the session master.
"""
task_type = task_type if task_type is not None else self.task_type
task_id = task_id if task_id is not None else self.task_id
if task_type is not None and task_id is not None:
return format_master_url(
self.cluster_spec().task_address(task_type, task_id),
rpc_layer or self.rpc_layer)
return ''
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest info from Kubernetes.
We retrieve the information from the Kubernetes master every time this
method is called.
Returns:
A ClusterSpec containing host information returned from Kubernetes.
Raises:
RuntimeError: If any of the pods returned by the master is not in the
`Running` phase.
"""
if not self._override_client:
k8sconfig.load_kube_config()
client = self._override_client or k8sclient.CoreV1Api()
cluster_map = {}
for tf_job in self._job_to_label_mapping:
all_pods = []
for selector in self._job_to_label_mapping[tf_job]:
ret = client.list_pod_for_all_namespaces(label_selector=selector)
selected_pods = []
# Sort the list by the name to make sure it doesn't change call to call.
for pod in sorted(ret.items, key=lambda x: x.metadata.name):
if pod.status.phase == 'Running':
selected_pods.append(
'%s:%s' % (pod.status.host_ip, self._tf_server_port))
else:
raise RuntimeError('Pod "%s" is not running; phase: "%s"' %
(pod.metadata.name, pod.status.phase))
all_pods.extend(selected_pods)
cluster_map[tf_job] = all_pods
return server_lib.ClusterSpec(cluster_map)
| apache-2.0 | 5,517,690,364,646,745,000 | 40.341772 | 92 | 0.684783 | false |
foxwill/ol-api-tester | env/lib/python2.7/site-packages/setuptools/tests/test_find_packages.py | 43 | 6005 | """Tests for setuptools.find_packages()."""
import os
import sys
import shutil
import tempfile
import unittest
import platform
import setuptools
from setuptools import find_packages
from setuptools.tests.py26compat import skipIf
find_420_packages = setuptools.PEP420PackageFinder.find
# modeled after CPython's test.support.can_symlink
def can_symlink():
TESTFN = tempfile.mktemp()
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
globals().update(can_symlink=lambda: can)
return can
def has_symlink():
bad_symlink = (
# Windows symlink directory detection is broken on Python 3.2
platform.system() == 'Windows' and sys.version_info[:2] == (3,2)
)
return can_symlink() and not bad_symlink
class TestFindPackages(unittest.TestCase):
def setUp(self):
self.dist_dir = tempfile.mkdtemp()
self._make_pkg_structure()
def tearDown(self):
shutil.rmtree(self.dist_dir)
def _make_pkg_structure(self):
"""Make basic package structure.
dist/
docs/
conf.py
pkg/
__pycache__/
nspkg/
mod.py
subpkg/
assets/
asset
__init__.py
setup.py
"""
self.docs_dir = self._mkdir('docs', self.dist_dir)
self._touch('conf.py', self.docs_dir)
self.pkg_dir = self._mkdir('pkg', self.dist_dir)
self._mkdir('__pycache__', self.pkg_dir)
self.ns_pkg_dir = self._mkdir('nspkg', self.pkg_dir)
self._touch('mod.py', self.ns_pkg_dir)
self.sub_pkg_dir = self._mkdir('subpkg', self.pkg_dir)
self.asset_dir = self._mkdir('assets', self.sub_pkg_dir)
self._touch('asset', self.asset_dir)
self._touch('__init__.py', self.sub_pkg_dir)
self._touch('setup.py', self.dist_dir)
def _mkdir(self, path, parent_dir=None):
if parent_dir:
path = os.path.join(parent_dir, path)
os.mkdir(path)
return path
def _touch(self, path, dir_=None):
if dir_:
path = os.path.join(dir_, path)
fp = open(path, 'w')
fp.close()
return path
def test_regular_package(self):
self._touch('__init__.py', self.pkg_dir)
packages = find_packages(self.dist_dir)
self.assertEqual(packages, ['pkg', 'pkg.subpkg'])
def test_exclude(self):
self._touch('__init__.py', self.pkg_dir)
packages = find_packages(self.dist_dir, exclude=('pkg.*',))
assert packages == ['pkg']
def test_include_excludes_other(self):
"""
If include is specified, other packages should be excluded.
"""
self._touch('__init__.py', self.pkg_dir)
alt_dir = self._mkdir('other_pkg', self.dist_dir)
self._touch('__init__.py', alt_dir)
packages = find_packages(self.dist_dir, include=['other_pkg'])
self.assertEqual(packages, ['other_pkg'])
def test_dir_with_dot_is_skipped(self):
shutil.rmtree(os.path.join(self.dist_dir, 'pkg/subpkg/assets'))
data_dir = self._mkdir('some.data', self.pkg_dir)
self._touch('__init__.py', data_dir)
self._touch('file.dat', data_dir)
packages = find_packages(self.dist_dir)
self.assertTrue('pkg.some.data' not in packages)
def test_dir_with_packages_in_subdir_is_excluded(self):
"""
Ensure that a package in a non-package such as build/pkg/__init__.py
is excluded.
"""
build_dir = self._mkdir('build', self.dist_dir)
build_pkg_dir = self._mkdir('pkg', build_dir)
self._touch('__init__.py', build_pkg_dir)
packages = find_packages(self.dist_dir)
self.assertTrue('build.pkg' not in packages)
@skipIf(not has_symlink(), 'Symlink support required')
def test_symlinked_packages_are_included(self):
"""
A symbolically-linked directory should be treated like any other
directory when matched as a package.
Create a link from lpkg -> pkg.
"""
self._touch('__init__.py', self.pkg_dir)
linked_pkg = os.path.join(self.dist_dir, 'lpkg')
os.symlink('pkg', linked_pkg)
assert os.path.isdir(linked_pkg)
packages = find_packages(self.dist_dir)
self.assertTrue('lpkg' in packages)
def _assert_packages(self, actual, expected):
self.assertEqual(set(actual), set(expected))
def test_pep420_ns_package(self):
packages = find_420_packages(
self.dist_dir, include=['pkg*'], exclude=['pkg.subpkg.assets'])
self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
def test_pep420_ns_package_no_includes(self):
packages = find_420_packages(
self.dist_dir, exclude=['pkg.subpkg.assets'])
self._assert_packages(packages, ['docs', 'pkg', 'pkg.nspkg', 'pkg.subpkg'])
def test_pep420_ns_package_no_includes_or_excludes(self):
packages = find_420_packages(self.dist_dir)
expected = [
'docs', 'pkg', 'pkg.nspkg', 'pkg.subpkg', 'pkg.subpkg.assets']
self._assert_packages(packages, expected)
def test_regular_package_with_nested_pep420_ns_packages(self):
self._touch('__init__.py', self.pkg_dir)
packages = find_420_packages(
self.dist_dir, exclude=['docs', 'pkg.subpkg.assets'])
self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
def test_pep420_ns_package_no_non_package_dirs(self):
shutil.rmtree(self.docs_dir)
shutil.rmtree(os.path.join(self.dist_dir, 'pkg/subpkg/assets'))
packages = find_420_packages(self.dist_dir)
self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
| gpl-2.0 | 6,998,133,970,633,741,000 | 34.323529 | 83 | 0.594338 | false |
Bergiu/smarthomepi | packages/shp/server/Program.py | 1 | 3915 | #
from datetime import datetime
class Program ( ):
#private:
"""
id # int
datetime # datetime
description # string
status # boolean
inactive # boolean
weekly # boolean
pin # int(4)
id_related_dev # int
id_initiator # int
"""
def __init__(self,**kwargs):
"""
@**kwargs:
id: int
datetime: datetime
description: string = ""
status: boolean
inactive: boolean = False
weekly: boolean = False
pin: int(4) = ""
id_related_dev: int
id_initiator: int
"""
err_func="User __init__: "
missing="User __init__: Missing "
if "id" in kwargs.keys():
self.id=int(kwargs["id"])
else:
raise ValueError(missing+"id")
if "datetime" in kwargs.keys():
if type(kwargs["datetime"]) is datetime:
self.datetime=kwargs["datetime"]
else:
raise ValueError(err_func+"The key datetime is not propper type(datetime): "+type(kwargs["datetime"]))
else:
raise ValueError(missing+"datetime")
if "description" in kwargs.keys():
self.description=str(kwargs["description"])
else:
self.description=""
if "status" in kwargs.keys():
self.status=bool(kwargs["status"])
else:
raise ValueError(missing+"status")
if "inactive" in kwargs.keys():
self.inactive=bool(kwargs["inactive"])
else:
self.inactive=False
if "weekly" in kwargs.keys():
self.weekly=bool(kwargs["weekly"])
else:
self.weekly=False
if "pin" in kwargs.keys():
if int(kwargs["pin"]) >= 0 and int(kwargs["pin"]) < 10000:
self.pin=int(kwargs["pin"])
else:
raise ValueError(err_func+"Pin must be between 0 and 10000")
else:
self.pin=""
if "id_related_dev" in kwargs.keys():
self.id_related_dev=int(kwargs["id_related_dev"])
else:
raise ValueError(missing+"id_related_dev")
if "id_initiator" in kwargs.keys():
self.id_initiator=int(kwargs["id_initiator"])
else:
raise ValueError(missing+"id_initiator")
#public:
def getId( self):
"""
@id:int
"""
return self.id
def getDatetime( self):
"""
@datetime:datetime
"""
return self.datetime
def setDatetime( self, date_time):
"""
@date_time:datetime
"""
if type(date_time) is datetime:
self.datetime=date_time
return True
else:
return False
def getDescription( self):
"""
@description:string
"""
return self.description
def setDescription( self, description):
"""
@description:string
"""
self.description=str(description)
return True
def getStatus( self):
"""
@status:boolean
"""
return self.status
def setStatus( self, status):
"""
@status:boolean
"""
self.status=bool(status)
return True
def toggleStatus( self):
"""
"""
# xor = ^
self.status=bool(self.status^1)
return True
def isInactive( self):
"""
@inactive:boolean
"""
return self.inactive
def setInactive( self, inactive):
"""
@inactive:boolean
"""
self.inactive=bool(inactive)
return True
def toggleInactive( self):
"""
"""
# xor = ^
self.inactive=bool(self.inactive^1)
return True
def isWeekly( self):
"""
@weekly:boolean
"""
return self.weekly
def setWeekly( self, weekly):
"""
@weekly:boolean
"""
self.weekly=bool(weekly)
return True
def toggleWeekly( self):
"""
"""
# xor = ^
self.weekly=bool(self.weekly^1)
return True
def checkPin( self, pin):
"""
@pin:int(4)
@validPin:boolean :
"""
if self.pin=="":
return True
elif type(pin) is str:
return False
elif self.pin == int(pin):
return True
else:
return False
def setPin( self, pin):
"""
@pin:int(4)
"""
if int(pin) >= 0 and int(pin) < 10000:
self.pin=int(pin)
return True
else:
return False
def removePin( self):
"""
"""
self.pin=""
return True
def getDevice( self):
"""
@device:Device
"""
return self.id_related_dev
def getUser( self):
"""
@user:User
"""
return self.id_initiator
| gpl-3.0 | -920,460,828,087,173,400 | 17.209302 | 106 | 0.62069 | false |
tereka114/chainer | tests/functions_tests/test_split_axis.py | 1 | 2359 | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer.testing import attr
if cuda.available:
cuda.init()
class TestSplitAxis0(unittest.TestCase):
def setUp(self):
self.x = numpy.arange(42, dtype=numpy.float32).reshape(2, 7, 3)
self.ys = [self.x[:, :2], self.x[:, 2:5], self.x[:, 5:]]
self.ys_section = [2, 5]
self.axis = 1
def check_forward(self, x_data, ys_data, indices_or_sections, axis):
x = chainer.Variable(x_data)
ys = functions.split_axis(x, indices_or_sections, axis)
for yd, y in zip(ys_data, ys):
gradient_check.assert_allclose(yd, y.data, atol=0, rtol=0)
def test_forward_cpu(self):
self.check_forward(self.x, self.ys, self.ys_section, self.axis)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x),
[cuda.to_gpu(y.copy()) for y in self.ys],
self.ys_section, axis=self.axis)
def check_backward(self, x_data, indices_or_sections, axis):
x = chainer.Variable(x_data)
ys = functions.split_axis(x, indices_or_sections, axis)
for y in ys:
y.grad = y.data
ys[0].backward()
gradient_check.assert_allclose(x.data, x.grad, atol=0, rtol=0)
def test_backward_cpu(self):
self.check_backward(self.x, self.ys_section, axis=self.axis)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), self.ys_section, axis=self.axis)
class TestSplitAxis1(TestSplitAxis0):
def setUp(self):
self.x = numpy.arange(21, dtype=numpy.float32).reshape(7, 3)
self.ys = [self.x[:2], self.x[2:5], self.x[5:]]
self.ys_section = [2, 5]
self.axis = 0
class TestSplitAxis2(TestSplitAxis0):
def setUp(self):
self.x = numpy.arange(54, dtype=numpy.float32).reshape(2, 9, 3)
self.ys = [self.x[:, :3], self.x[:, 3:6], self.x[:, 6:]]
self.ys_section = 3
self.axis = 1
class TestSplitAxis3(TestSplitAxis0):
def setUp(self):
self.x = numpy.arange(36, dtype=numpy.float32).reshape(2, 6, 3)
self.ys = [self.x[:, :2], self.x[:, 2:4], self.x[:, 4:]]
self.ys_section = 3
self.axis = 1
| mit | -7,395,524,975,415,688,000 | 27.768293 | 72 | 0.59983 | false |
FinalAngel/djangocms-installer | tests/config.py | 3 | 25247 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
from mock import patch
from six import StringIO, text_type
from tzlocal import get_localzone
from djangocms_installer import config
from djangocms_installer.install import check_install
from djangocms_installer.utils import less_than_version, supported_versions
from .base import BaseTestClass
class TestConfig(BaseTestClass):
def test_default_config(self):
conf_data = config.parse(['--db=postgres://user:pwd@host/dbname',
'-q', '-p'+self.project_dir, 'example_prj'])
self.assertEqual(conf_data.project_name, 'example_prj')
self.assertEqual(conf_data.cms_version, 3.1)
self.assertEqual(conf_data.django_version, 1.7)
self.assertEqual(conf_data.i18n, 'yes')
self.assertEqual(conf_data.reversion, 'yes')
self.assertEqual(conf_data.permissions, 'yes')
self.assertEqual(conf_data.use_timezone, 'yes')
self.assertEqual(conf_data.db, 'postgres://user:pwd@host/dbname')
self.assertEqual(conf_data.no_db_driver, False)
self.assertEqual(conf_data.no_deps, False)
self.assertEqual(conf_data.no_sync, False)
self.assertEqual(conf_data.plugins, False)
self.assertEqual(conf_data.requirements_file, None)
def test_cli_config(self):
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--cms-version=stable',
'--django-version=1.7',
'--i18n=no',
'--reversion=no',
'--permissions=no',
'--use-tz=no',
'-tEurope/Rome',
'-len', '-lde', '-lit',
'-p'+self.project_dir,
'example_prj'])
self.assertEqual(conf_data.project_name, 'example_prj')
self.assertEqual(conf_data.cms_version, 3.1)
self.assertEqual(conf_data.django_version, 1.7)
self.assertEqual(conf_data.i18n, 'no')
self.assertEqual(conf_data.reversion, 'no')
self.assertEqual(conf_data.permissions, 'no')
self.assertEqual(conf_data.use_timezone, 'no')
self.assertEqual(conf_data.timezone, 'Europe/Rome')
self.assertEqual(conf_data.languages, ['en', 'de', 'it'])
self.assertEqual(conf_data.project_directory, self.project_dir)
self.assertEqual(conf_data.db, 'postgres://user:pwd@host/dbname')
self.assertEqual(conf_data.db_driver, 'psycopg2')
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--cms-version=stable',
'--django-version=1.4',
'--cms-version=3.0',
'--i18n=no',
'--reversion=no',
'--permissions=no',
'--use-tz=no',
'-tEurope/Rome',
'-len', '-lde', '-lit',
'-p'+self.project_dir,
'example_prj'])
self.assertEqual(conf_data.project_name, 'example_prj')
self.assertEqual(conf_data.cms_version, 3.0)
self.assertEqual(conf_data.django_version, 1.4)
self.assertEqual(conf_data.i18n, 'no')
self.assertEqual(conf_data.reversion, 'no')
self.assertEqual(conf_data.permissions, 'no')
self.assertEqual(conf_data.use_timezone, 'no')
self.assertEqual(conf_data.timezone, 'Europe/Rome')
self.assertEqual(conf_data.languages, ['en', 'de', 'it'])
self.assertEqual(conf_data.project_directory, self.project_dir)
self.assertEqual(conf_data.db, 'postgres://user:pwd@host/dbname')
self.assertEqual(conf_data.db_driver, 'psycopg2')
def test_version_mismatch(self):
with self.assertRaises(SystemExit):
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--cms-version=stable',
'--django-version=1.4',
'--i18n=no',
'--reversion=no',
'--permissions=no',
'--use-tz=no',
'-tEurope/Rome',
'-len', '-lde', '-lit',
'-p'+self.project_dir,
'example_prj'])
def test_cli_config_commaseparated_languages(self):
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-len,de,it',
'-p'+self.project_dir,
'example_prj'
])
self.assertEqual(conf_data.languages, ['en', 'de', 'it'])
def test_cli_config_comma_languages_with_space(self):
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-len , de , it',
'-p'+self.project_dir,
'example_prj'
])
self.assertEqual(conf_data.languages, ['en', 'de', 'it'])
def test_invalid_choices(self):
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--cms-version=2.6',
'--django-version=1.1',
'--i18n=no',
'-p'+self.project_dir,
'example_prj'])
self.assertTrue(self.stderr.getvalue().find("--cms-version/-v: invalid choice: '2.6'") > -1)
def test_invalid_project_name(self):
with patch('sys.stdout', self.stdout):
stderr_tmp = StringIO()
with patch('sys.stderr', stderr_tmp):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-p'+self.project_dir,
'test'])
self.assertTrue(stderr_tmp.getvalue().find("Project name 'test' is not a valid app name") > -1)
stderr_tmp = StringIO()
with patch('sys.stderr', stderr_tmp):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-p'+self.project_dir,
'assert'])
self.assertTrue(stderr_tmp.getvalue().find("Project name 'assert' is not a valid app name") > -1)
stderr_tmp = StringIO()
with patch('sys.stderr', stderr_tmp):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-p'+self.project_dir,
'values'])
self.assertTrue(stderr_tmp.getvalue().find("Project name 'values' is not a valid app name") > -1)
stderr_tmp = StringIO()
with patch('sys.stderr', stderr_tmp):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-p'+self.project_dir,
'project-name'])
self.assertTrue(stderr_tmp.getvalue().find("Project name 'project-name' is not a valid app name") > -1)
def test_invalid_project_path(self):
prj_dir = 'example_prj'
existing_path = os.path.join(self.project_dir, prj_dir)
os.makedirs(existing_path)
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-p'+self.project_dir,
prj_dir])
self.assertEqual(conf_data.project_path, existing_path)
self.assertTrue(self.stderr.getvalue().find("Path '%s' already exists and is not empty" % self.project_dir) > -1)
def test_invalid_project_dir(self):
prj_dir = 'example_prj'
existing_path = os.path.join(self.project_dir, 'a_file')
with open(existing_path, 'w') as f:
f.write('')
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-p'+self.project_dir,
prj_dir])
self.assertEqual(conf_data.project_path, existing_path)
self.assertTrue(self.stderr.getvalue().find("Path '%s' already exists and is not empty" % self.project_dir) > -1)
def test_invalid_project_dir_skip(self):
prj_dir = 'example_prj'
existing_path = os.path.join(self.project_dir, 'a_file')
with open(existing_path, 'w') as f:
f.write('')
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
conf_data = config.parse([
'-q', '-s',
'--db=postgres://user:pwd@host/dbname',
'-p'+self.project_dir,
prj_dir])
self.assertFalse(self.stderr.getvalue().find("Path '%s' already exists and is not empty" % self.project_dir) > -1)
def test_valid_project_dir(self):
prj_dir = 'example_prj'
existing_path = os.path.join(self.project_dir, '.hidden_file')
with open(existing_path, 'w') as f:
f.write('')
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'-p'+self.project_dir,
prj_dir])
self.assertFalse(self.stderr.getvalue().find("Path '%s' already exists and is not empty" % self.project_dir) > -1)
def test_latest_version(self):
self.assertEqual(less_than_version('2.4'), '2.5')
self.assertEqual(less_than_version('3'), '3.1')
self.assertEqual(less_than_version('3.0.1'), '3.1.1')
def test_supported_versions(self):
self.assertEqual(supported_versions('stable', 'stable'), (1.7, 3.1))
self.assertEqual(supported_versions('stable', '3.0'), (1.7, 3.0))
self.assertEqual(supported_versions('stable', '3.0.10'), (1.7, None))
self.assertEqual(supported_versions('stable', 'rc'), (1.7, 3.2))
self.assertEqual(supported_versions('stable', 'beta'), (1.7, 3.2))
self.assertEqual(supported_versions('stable', 'develop'), (1.7, 3.2))
with self.assertRaises(RuntimeError):
supported_versions('stable', '2.4'), (1.5, 2.4)
supported_versions('1.5', 'stable'), (1.7, 3.1)
self.assertEqual(supported_versions('1.5', '2.4'), (1.5, 2.4))
self.assertEqual(supported_versions('1.6', 'stable'), (1.6, 3.1))
self.assertEqual(supported_versions('1.6.9', 'stable'), (None, 3.1))
self.assertEqual(supported_versions('1.7', 'stable'), (1.7, 3.1))
self.assertEqual(supported_versions('beta', 'stable'), (1.8, 3.1))
self.assertEqual(supported_versions('develop', 'stable'), (1.8, 3.1))
def test_requirements(self):
"""
Test for different configuration and package versions
"""
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--django-version=1.6',
'--i18n=no',
'-f',
'-p'+self.project_dir,
'example_prj'])
self.assertTrue(conf_data.requirements.find('django-cms<3.2') > -1)
self.assertTrue(conf_data.requirements.find('Django<1.7') > -1)
self.assertTrue(conf_data.requirements.find('django-filer') > -1)
self.assertTrue(conf_data.requirements.find('cmsplugin-filer') > -1)
self.assertTrue(conf_data.requirements.find('django-reversion>=1.8,<1.8.6') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-text-ckeditor') > -1)
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--i18n=no',
'--cms-version=2.4',
'--django-version=1.5',
'-f',
'-p'+self.project_dir,
'example_prj'])
self.assertTrue(conf_data.requirements.find('six') > -1)
self.assertTrue(conf_data.requirements.find('django-cms<2.5') > -1)
self.assertTrue(conf_data.requirements.find('Django<1.6') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-text-ckeditor') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-admin-style') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-column') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-file') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-flash') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-googlemap') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-inherit') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-link') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-picture') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-style') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-teaser') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-video') == -1)
self.assertTrue(conf_data.requirements.find('django-reversion>=1.7') > -1)
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--i18n=no',
'--cms-version=stable',
'--django-version=stable',
'--reversion=yes',
'-p'+self.project_dir,
'example_prj'])
self.assertTrue(conf_data.requirements.find('django-cms<3.2') > -1)
self.assertTrue(conf_data.requirements.find('Django<1.8') > -1)
self.assertTrue(conf_data.requirements.find('django-reversion>=1.8') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-text-ckeditor') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-admin-style') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-column') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-file') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-flash') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-googlemap') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-inherit') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-link') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-picture') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-style') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-teaser') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-video') > -1)
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--i18n=no',
'--cms-version=develop',
'--django-version=stable',
'-f',
'--reversion=yes',
'-p'+self.project_dir,
'example_prj'])
self.assertTrue(conf_data.requirements.find(config.data.DJANGOCMS_DEVELOP) > -1)
self.assertTrue(conf_data.requirements.find('Django<1.8') > -1)
self.assertTrue(conf_data.requirements.find('django-reversion>=1.8') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-text-ckeditor') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-admin-style') > -1)
self.assertTrue(conf_data.requirements.find('django-filer') > -1)
self.assertTrue(conf_data.requirements.find('cmsplugin-filer') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-column') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-file') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-flash') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-googlemap') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-inherit') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-link') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-picture') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-style') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-teaser') == -1)
self.assertTrue(conf_data.requirements.find('djangocms-video') == -1)
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--i18n=no',
'--cms-version=develop',
'--django-version=1.6',
'-f',
'--reversion=yes',
'-p'+self.project_dir,
'example_prj'])
self.assertTrue(conf_data.requirements.find(config.data.DJANGOCMS_DEVELOP) > -1)
self.assertTrue(conf_data.requirements.find('Django<1.7') > -1)
self.assertTrue(conf_data.requirements.find('django-mptt') == -1)
self.assertTrue(conf_data.requirements.find('django-treebeard') > -1)
self.assertTrue(conf_data.requirements.find('django-reversion>=1.8,<1.8.6') > -1)
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--i18n=no',
'--cms-version=develop',
'--django-version=stable',
'-f',
'--reversion=yes',
'-z=yes',
'-p'+self.project_dir,
'example_prj'])
self.assertTrue(conf_data.requirements.find(config.data.DJANGOCMS_DEVELOP) > -1)
self.assertTrue(conf_data.requirements.find('Django<1.8') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-text-ckeditor') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-admin-style') > -1)
self.assertTrue(conf_data.requirements.find('django-reversion>=1.8') > -1)
self.assertTrue(conf_data.requirements.find('pytz') > -1)
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--i18n=no',
'--cms-version=develop',
'--django-version=1.7',
'--reversion=yes',
'-z=yes',
'-p'+self.project_dir,
'example_prj'])
self.assertTrue(conf_data.requirements.find(config.data.DJANGOCMS_DEVELOP) > -1)
self.assertTrue(conf_data.requirements.find('Django<1.8') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-text-ckeditor/archive/master.zip') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-admin-style/archive/master.zip') > -1)
self.assertTrue(conf_data.requirements.find('djangocms-teaser/archive/master.zip') > -1)
self.assertTrue(conf_data.requirements.find('django-reversion>=1.8.2') > -1)
self.assertTrue(conf_data.requirements.find('south') == -1)
def disabled_test_aldryn_compatibility(self):
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
with self.assertRaises(SystemExit) as error:
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--cms-version=2.4',
'--django-version=stable',
#'-a',
'-p'+self.project_dir,
'example_prj'])
try:
self.assertEqual(error.exception.code, 5)
except AttributeError:
self.assertEqual(error.exception, 5)
def test_boostrap(self):
"""
Verify handling of bootstrap parameter
"""
conf_data = config.parse([
'-q',
'-p'+self.project_dir,
'example_prj'])
self.assertFalse(conf_data.bootstrap)
conf_data = config.parse([
'--bootstrap=yes', '-q',
'-p'+self.project_dir,
'example_prj'])
self.assertTrue(conf_data.bootstrap)
def test_starting_page(self):
"""
Verify handling of starting-page parameter
"""
conf_data = config.parse([
'-q',
'-p'+self.project_dir,
'example_prj'])
self.assertFalse(conf_data.starting_page)
conf_data = config.parse([
'--starting-page=yes', '-q',
'-p'+self.project_dir,
'example_prj'])
self.assertTrue(conf_data.starting_page)
def test_utc(self):
"""
Verify handling UTC default
"""
default_tz = get_localzone()
conf_data = config.parse([
'-q',
'-p'+self.project_dir,
'example_prj'])
self.assertEqual(text_type(conf_data.timezone), default_tz.zone)
conf_data = config.parse([
'-q', '--utc',
'-p'+self.project_dir,
'example_prj'])
self.assertEqual(conf_data.timezone, 'UTC')
def test_templates(self):
"""
Verify handling of valid (existing) and invalid (non-existing) templates directory parameter
"""
conf_data = config.parse([
'--templates=/foo/bar', '-q',
'-p'+self.project_dir,
'example_prj'])
self.assertFalse(conf_data.templates)
tpl_path = os.path.join(os.path.dirname(__file__), 'test_templates')
conf_data = config.parse([
'--templates=%s' % tpl_path, '-q',
'-p'+self.project_dir,
'example_prj'])
self.assertEqual(conf_data.templates, tpl_path)
def suspend_test_check_install(self):
import pip
# discard the argparser errors
with patch('sys.stdout', self.stdout):
with patch('sys.stderr', self.stderr):
# clean the virtualenv
try:
pip.main(['uninstall', '-y', 'psycopg2'])
except pip.exceptions.UninstallationError:
## package not installed, all is fine
pass
try:
pip.main(['uninstall', '-y', 'pillow'])
except pip.exceptions.UninstallationError:
## package not installed, all is fine
pass
try:
pip.main(['uninstall', '-y', 'mysql-python'])
except pip.exceptions.UninstallationError:
## package not installed, all is fine
pass
# Check postgres / pillow
conf_data = config.parse([
'-q',
'--db=postgres://user:pwd@host/dbname',
'--django-version=1.4',
'--i18n=no',
'-f',
'-p'+self.project_dir,
'example_prj'])
with self.assertRaises(EnvironmentError) as context_error:
check_install(conf_data)
self.assertTrue(str(context_error.exception).find('Pillow is not installed') > -1)
self.assertTrue(str(context_error.exception).find('PostgreSQL driver is not installed') > -1)
# Check mysql
conf_data = config.parse([
'-q',
'--db=mysql://user:pwd@host/dbname',
'--django-version=1.4',
'--i18n=no',
'-f',
'-p'+self.project_dir,
'example_prj'])
with self.assertRaises(EnvironmentError) as context_error:
check_install(conf_data)
self.assertTrue(str(context_error.exception).find('MySQL driver is not installed') > -1)
def test_show_plugins(self):
sys.stdout = StringIO()
try:
config.show_plugins()
finally:
sys.stdout = sys.__stdout__
def test_show_requirements(self):
sys.stdout = StringIO()
try:
conf_data = config.parse([
'-q',
'--db=mysql://user:pwd@host/dbname',
'--django-version=1.7',
'--i18n=no',
'-f',
'-p'+self.project_dir,
'example_prj'])
config.show_requirements(conf_data)
finally:
sys.stdout = sys.__stdout__
| bsd-3-clause | 5,874,671,321,833,623,000 | 42.231164 | 122 | 0.542322 | false |
drpngx/tensorflow | tensorflow/tools/docker/simple_console.py | 605 | 1028 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Start a simple interactive console with TensorFlow available."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import code
import sys
def main(_):
"""Run an interactive console."""
code.interact()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 | -203,848,976,866,271,100 | 30.151515 | 80 | 0.678988 | false |
yw374cornell/e-mission-server | emission/tests/storageTests/analysis_ts_common.py | 1 | 3325 | # Standard imports
import unittest
import datetime as pydt
import logging
import uuid
import json
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.storage.timeseries.timequery as estt
import emission.core.get_database as edb
def createNewTripLike(utest, key, wrapper):
new_trip = wrapper()
new_trip.start_ts = 5
new_trip.end_ts = 6
new_trip_id = esta.TimeSeries.get_time_series(utest.testUserId).insert_data(
utest.testUserId, key, new_trip)
new_trip_entry = esta.TimeSeries.get_time_series(utest.testUserId).get_entry_from_id(
key, new_trip_id)
utest.assertIsNotNone(new_trip_entry.get_id())
utest.assertEqual(new_trip_entry.user_id, utest.testUserId)
return new_trip_entry
def createNewPlaceLike(utest, key, wrapper):
new_place = wrapper()
new_place.enter_ts = 5
new_place.exit_ts = 6
new_trip_id = esta.TimeSeries.get_time_series(utest.testUserId).insert_data(
utest.testUserId, key, new_place)
new_place_entry = esta.TimeSeries.get_time_series(utest.testUserId).get_entry_from_id(
key, new_trip_id)
utest.assertIsNotNone(new_place_entry.get_id())
utest.assertEqual(new_place_entry.user_id, utest.testUserId)
return new_place_entry
def saveTripLike(utest, key, wrapper):
new_trip = createNewTripLike(utest, key, wrapper)
utest.assertEqual(edb.get_analysis_timeseries_db().find(
{"metadata.key": key, "data.end_ts": 6}).count(), 1)
utest.assertEqual(edb.get_analysis_timeseries_db().find_one(
{"metadata.key": key, "data.end_ts": 6})["_id"], new_trip.get_id())
utest.assertEqual(edb.get_analysis_timeseries_db().find_one(
{"metadata.key": key, "data.end_ts": 6})["user_id"], utest.testUserId)
return new_trip
def savePlaceLike(utest, key, wrapper):
new_place = createNewPlaceLike(utest, key, wrapper)
utest.assertEqual(edb.get_analysis_timeseries_db().find(
{"metadata.key": key, "data.exit_ts": 6}).count(), 1)
utest.assertEqual(edb.get_analysis_timeseries_db().find_one(
{"metadata.key": key, "data.exit_ts": 6})["_id"], new_place.get_id())
utest.assertEqual(edb.get_analysis_timeseries_db().find_one(
{"metadata.key": key, "data.exit_ts": 6})["user_id"], utest.testUserId)
return new_place
def queryTripLike(utest, key, wrapper):
new_trip = createNewTripLike(utest, key, wrapper)
ret_arr_time = esda.get_objects(key, utest.testUserId,
estt.TimeQuery("data.start_ts", 4, 6))
utest.assertEqual(ret_arr_time, [new_trip.data])
def queryPlaceLike(utest, key, wrapper):
new_trip = createNewPlaceLike(utest, key, wrapper)
ret_arr_time = esda.get_objects(key, utest.testUserId,
estt.TimeQuery("data.enter_ts", 4, 6))
utest.assertEqual(ret_arr_time, [new_trip.data])
def getObject(utest, key, wrapper):
if key == esda.RAW_TRIP_KEY or key == esda.RAW_SECTION_KEY:
new_obj = createNewTripLike(utest, key, wrapper)
else:
new_obj = createNewPlaceLike(utest, key, wrapper)
ret_obj = esda.get_object(key, new_obj.get_id())
utest.assertEqual(ret_obj, new_obj.data)
utest.assertEqual(ret_obj, new_obj.data)
| bsd-3-clause | -6,959,645,504,513,503,000 | 40.5625 | 90 | 0.682105 | false |
cfriedt/gnuradio | gr-uhd/examples/python/fm_tx_2_daughterboards.py | 58 | 7706 | #!/usr/bin/env python
#
# Copyright 2005-2007,2011,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Transmit 2 signals, one out each daughterboard.
Outputs SSB (USB) signals on side A and side B at frequencies
specified on command line.
Side A is 600 Hz tone.
Side B is 350 + 440 Hz tones.
"""
from gnuradio import gr, uhd
from gnuradio import filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio.eng_notation import num_to_str, str_to_num
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import math
import sys
class example_signal_0(gr.hier_block2):
"""
Sinusoid at 600 Hz.
"""
def __init__(self, sample_rate):
gr.hier_block2.__init__(self, "example_signal_0",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
src = analog.sig_source_c(sample_rate, # sample rate
analog.GR_SIN_WAVE, # waveform type
600, # frequency
1.0, # amplitude
0) # DC Offset
self.connect(src, self)
class example_signal_1(gr.hier_block2):
"""
North American dial tone (350 + 440 Hz).
"""
def __init__(self, sample_rate):
gr.hier_block2.__init__(self, "example_signal_1",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
src0 = analog.sig_source_c(sample_rate, # sample rate
analog.GR_SIN_WAVE, # waveform type
350, # frequency
1.0, # amplitude
0) # DC Offset
src1 = analog.sig_source_c(sample_rate, # sample rate
analog.GR_SIN_WAVE, # waveform type
440, # frequency
1.0, # amplitude
0) # DC Offset
sum = blocks.add_cc()
self.connect(src0, (sum, 0))
self.connect(src1, (sum, 1))
self.connect(sum, self)
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
usage = "%prog: [options] tx-freq0 tx-freq1"
parser = OptionParser (option_class=eng_option, usage=usage)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("-s", "--samp-rate", type="eng_float", default=320e3,
help="set sample rate [default=%default]")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
(options, args) = parser.parse_args ()
if len(args) != 2:
parser.print_help()
raise SystemExit
else:
freq0 = str_to_num(args[0])
freq1 = str_to_num(args[1])
# ----------------------------------------------------------------
# Set up USRP to transmit on both daughterboards
d = uhd.find_devices(uhd.device_addr(options.args))
uhd_type = d[0].get('type')
stream_args = uhd.stream_args('fc32', channels=range(2))
self.u = uhd.usrp_sink(device_addr=options.args, stream_args=stream_args)
# Set up USRP system based on type
if(uhd_type == "usrp"):
self.u.set_subdev_spec("A:0 B:0")
tr0 = uhd.tune_request(freq0)
tr1 = uhd.tune_request(freq1)
else:
if abs(freq0 - freq1) > 5.5e6:
sys.stderr.write("\nError: When not using two separate d'boards, frequencies must bewithin 5.5MHz of each other.\n")
raise SystemExit
self.u.set_subdev_spec("A:0 A:0")
mid_freq = (freq0 + freq1)/2.0
tr0 = uhd.tune_request(freq0, rf_freq=mid_freq,
rf_freq_policy=uhd.tune_request.POLICY_MANUAL)
tr1 = uhd.tune_request(freq1, rf_freq=mid_freq,
rf_freq_policy=uhd.tune_request.POLICY_MANUAL)
# Use the tune requests to tune each channel
self.set_freq(tr0, 0)
self.set_freq(tr1, 1)
self.usrp_rate = options.samp_rate
self.u.set_samp_rate(self.usrp_rate)
dev_rate = self.u.get_samp_rate()
# ----------------------------------------------------------------
# build two signal sources, interleave them, amplify and
# connect them to usrp
sig0 = example_signal_0(self.usrp_rate)
sig1 = example_signal_1(self.usrp_rate)
intl = blocks.interleave(gr.sizeof_gr_complex)
self.connect(sig0, (intl, 0))
self.connect(sig1, (intl, 1))
# Correct for any difference in requested and actual rates
rrate = self.usrp_rate / dev_rate
resamp = filter.pfb.arb_resampler_ccf(rrate)
# and wire them up
self.connect(intl, resamp, self.u)
if options.gain is None:
# if no gain was specified, use the mid-point in dB
g = self.u.get_gain_range()
options.gain = float(g.start()+g.stop())/2.0
self.set_gain(options.gain, 0)
self.set_gain(options.gain, 1)
# Set the subdevice spec
if(options.spec):
self.u.set_subdev_spec(options.spec, 0)
# Set the antenna
if(options.antenna):
self.u.set_antenna(options.antenna, 0)
self.u.set_antenna(options.antenna, 1)
def set_freq(self, target_freq, chan):
"""
Set the center frequency we're interested in.
Args:
side: 0 = side A, 1 = side B
target_freq: frequency in Hz
@rtype: bool
"""
print "Tuning channel %s to %sHz" % \
(chan, num_to_str(target_freq))
r = self.u.set_center_freq(target_freq, chan)
if r:
return True
else:
print " Set Frequency Failed!"
return False
def set_gain(self, gain, chan):
self.u.set_gain(gain, chan)
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
| gpl-3.0 | -7,199,769,946,225,564,000 | 34.675926 | 132 | 0.53348 | false |
analyseuc3m/ANALYSE-v1 | openedx/core/djangoapps/ccxcon/tests/test_api.py | 24 | 8241 | """
Unit tests for the API module
"""
import datetime
import mock
import pytz
import urlparse
from nose.plugins.attrib import attr
from opaque_keys.edx.keys import CourseKey
from student.tests.factories import AdminFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
SharedModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE
)
from xmodule.modulestore.tests.factories import (
CourseFactory,
ItemFactory,
)
from openedx.core.djangoapps.ccxcon import api as ccxconapi
from .factories import CcxConFactory
def flatten(seq):
"""
For [[1, 2], [3, 4]] returns [1, 2, 3, 4]. Does not recurse.
"""
return [x for sub in seq for x in sub]
def fetch_token_mock(*args, **kwargs): # pylint: disable=unused-argument
"""
Mock function used to bypass the oauth fetch token
"""
return
@attr('shard_1')
class APIsTestCase(SharedModuleStoreTestCase):
"""
Unit tests for the API module functions
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(APIsTestCase, cls).setUpClass()
cls.course = course = CourseFactory.create()
cls.course_key = cls.course.location.course_key
# Create a course outline
start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC
)
due = datetime.datetime(
2010, 7, 7, 0, 0, tzinfo=pytz.UTC
)
cls.chapters = [
ItemFactory.create(start=start, parent=course) for _ in xrange(2)
]
cls.sequentials = flatten([
[
ItemFactory.create(parent=chapter) for _ in xrange(2)
] for chapter in cls.chapters
])
cls.verticals = flatten([
[
ItemFactory.create(
start=start, due=due, parent=sequential, graded=True, format='Homework', category=u'vertical'
) for _ in xrange(2)
] for sequential in cls.sequentials
])
# Trying to wrap the whole thing in a bulk operation fails because it
# doesn't find the parents. But we can at least wrap this part...
with cls.store.bulk_operations(course.id, emit_signals=False):
blocks = flatten([ # pylint: disable=unused-variable
[
ItemFactory.create(parent=vertical) for _ in xrange(2)
] for vertical in cls.verticals
])
def setUp(self):
"""
Set up tests
"""
super(APIsTestCase, self).setUp()
# Create instructor account
self.instructor = AdminFactory.create()
# create an instance of modulestore
self.mstore = modulestore()
# enable ccx
self.course.enable_ccx = True
# setup CCX connector
self.course.ccx_connector = 'https://url.to.cxx.connector.mit.edu'
# save the changes
self.mstore.update_item(self.course, self.instructor.id)
# create a configuration for the ccx connector: this must match the one in the course
self.ccxcon_conf = CcxConFactory(url=self.course.ccx_connector)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.fetch_token', fetch_token_mock)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.post')
def test_course_info_to_ccxcon_no_valid_course_key(self, mock_post):
"""
Test for an invalid course key
"""
missing_course_key = CourseKey.from_string('course-v1:FakeOrganization+CN999+CR-FALL99')
self.assertIsNone(ccxconapi.course_info_to_ccxcon(missing_course_key))
self.assertEqual(mock_post.call_count, 0)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.fetch_token', fetch_token_mock)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.post')
def test_course_info_to_ccxcon_no_ccx_enabled(self, mock_post):
"""
Test for a course without CCX enabled
"""
self.course.enable_ccx = False
self.mstore.update_item(self.course, self.instructor.id)
self.assertIsNone(ccxconapi.course_info_to_ccxcon(self.course_key))
self.assertEqual(mock_post.call_count, 0)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.fetch_token', fetch_token_mock)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.post')
def test_course_info_to_ccxcon_invalid_ccx_connector(self, mock_post):
"""
Test for a course with invalid CCX connector URL
"""
# no connector at all
self.course.ccx_connector = ""
self.mstore.update_item(self.course, self.instructor.id)
self.assertIsNone(ccxconapi.course_info_to_ccxcon(self.course_key))
self.assertEqual(mock_post.call_count, 0)
# invalid url
self.course.ccx_connector = "www.foo"
self.mstore.update_item(self.course, self.instructor.id)
self.assertIsNone(ccxconapi.course_info_to_ccxcon(self.course_key))
self.assertEqual(mock_post.call_count, 0)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.fetch_token', fetch_token_mock)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.post')
def test_course_info_to_ccxcon_no_config(self, mock_post):
"""
Test for course with ccx connector credentials not configured
"""
self.course.ccx_connector = "https://www.foo.com"
self.mstore.update_item(self.course, self.instructor.id)
self.assertIsNone(ccxconapi.course_info_to_ccxcon(self.course_key))
self.assertEqual(mock_post.call_count, 0)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.fetch_token', fetch_token_mock)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.post')
def test_course_info_to_ccxcon_ok(self, mock_post):
"""
Test for happy path
"""
mock_response = mock.Mock()
mock_response.status_code = 201
mock_post.return_value = mock_response
ccxconapi.course_info_to_ccxcon(self.course_key)
self.assertEqual(mock_post.call_count, 1)
k_args, k_kwargs = mock_post.call_args
# no args used for the call
self.assertEqual(k_args, tuple())
self.assertEqual(
k_kwargs.get('url'),
urlparse.urljoin(self.course.ccx_connector, ccxconapi.CCXCON_COURSEXS_URL)
)
# second call with different status code
mock_response.status_code = 200
mock_post.return_value = mock_response
ccxconapi.course_info_to_ccxcon(self.course_key)
self.assertEqual(mock_post.call_count, 2)
k_args, k_kwargs = mock_post.call_args
# no args used for the call
self.assertEqual(k_args, tuple())
self.assertEqual(
k_kwargs.get('url'),
urlparse.urljoin(self.course.ccx_connector, ccxconapi.CCXCON_COURSEXS_URL)
)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.fetch_token', fetch_token_mock)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.post')
def test_course_info_to_ccxcon_500_error(self, mock_post):
"""
Test for 500 error: a CCXConnServerError exception is raised
"""
mock_response = mock.Mock()
mock_response.status_code = 500
mock_post.return_value = mock_response
with self.assertRaises(ccxconapi.CCXConnServerError):
ccxconapi.course_info_to_ccxcon(self.course_key)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.fetch_token', fetch_token_mock)
@mock.patch('requests_oauthlib.oauth2_session.OAuth2Session.post')
def test_course_info_to_ccxcon_other_status_codes(self, mock_post):
"""
Test for status codes different from >= 500 and 201:
The called function doesn't raise any exception and simply returns None.
"""
mock_response = mock.Mock()
for status_code in (204, 300, 304, 400, 404):
mock_response.status_code = status_code
mock_post.return_value = mock_response
self.assertIsNone(ccxconapi.course_info_to_ccxcon(self.course_key))
| agpl-3.0 | 778,108,877,563,444,100 | 37.509346 | 113 | 0.650771 | false |
snyderr/robotframework | src/robot/model/message.py | 2 | 2643 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import html_escape, py2to3, setter
from .itemlist import ItemList
from .modelobject import ModelObject
@py2to3
class Message(ModelObject):
"""A message created during the test execution.
Can be a log message triggered by a keyword, or a warning or an error
that occurred during parsing or test execution.
"""
__slots__ = ['message', 'level', 'html', 'timestamp', '_sort_key']
def __init__(self, message='', level='INFO', html=False, timestamp=None,
parent=None):
#: The message content as a string.
self.message = message
#: Severity of the message. Either ``TRACE``, ``DEBUG``, ``INFO``,
#: ``WARN``, ``ERROR``, or ``FAIL``. The latest one is only used with
#: keyword failure messages.
self.level = level
#: ``True`` if the content is in HTML, ``False`` otherwise.
self.html = html
#: Timestamp in format ``%Y%m%d %H:%M:%S.%f``.
self.timestamp = timestamp
self._sort_key = -1
#: The object this message was triggered by.
self.parent = parent
@setter
def parent(self, parent):
if parent and parent is not getattr(self, 'parent', None):
self._sort_key = getattr(parent, '_child_sort_key', -1)
return parent
@property
def html_message(self):
"""Returns the message content as HTML."""
return self.message if self.html else html_escape(self.message)
def visit(self, visitor):
""":mod:`Visitor interface <robot.model.visitor>` entry-point."""
visitor.visit_message(self)
def __unicode__(self):
return self.message
class Messages(ItemList):
__slots__ = []
def __init__(self, message_class=Message, parent=None, messages=None):
ItemList.__init__(self, message_class, {'parent': parent}, messages)
def __setitem__(self, index, item):
old = self[index]
ItemList.__setitem__(self, index, item)
self[index]._sort_key = old._sort_key
| apache-2.0 | -9,209,179,479,988,908,000 | 34.716216 | 77 | 0.640182 | false |
benlangmuir/swift | utils/build_swift/tests/utils.py | 21 | 2090 | # This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import os
import sys
import unittest
from contextlib import contextmanager
__all__ = [
'add_metaclass',
'redirect_stderr',
'redirect_stdout',
'TestCase',
'UTILS_PATH',
]
UTILS_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
))
# -----------------------------------------------------------------------------
def add_metaclass(metacls):
def wrapper(cls):
body = vars(cls).copy()
body.pop('__dict__', None)
body.pop('__weakref__', None)
return metacls(cls.__name__, cls.__bases__, body)
return wrapper
@contextmanager
def redirect_stderr(stream=None):
stream = stream or StringIO()
old_stderr, sys.stderr = sys.stderr, stream
try:
yield stream
finally:
sys.stderr = old_stderr
@contextmanager
def redirect_stdout(stream=None):
stream = stream or StringIO()
old_stdout, sys.stdout = sys.stdout, stream
try:
yield stream
finally:
sys.stdout = old_stdout
# -----------------------------------------------------------------------------
class TestCase(unittest.TestCase):
@contextmanager
def quietOutput(self):
with open(os.devnull, 'w') as devnull:
with redirect_stderr(devnull), redirect_stdout(devnull):
yield
@contextmanager
def assertNotRaises(self, exception=BaseException):
assert issubclass(exception, BaseException)
try:
yield
except exception as e:
message = '{} raised: {}'.format(exception.__name__, str(e))
raise self.failureException(message)
| apache-2.0 | -5,199,293,003,264,466,000 | 22.222222 | 79 | 0.597608 | false |
klahnakoski/esShardBalancer | pyLibrary/env/big_data.py | 2 | 12353 | # encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import gzip
from io import BytesIO
from tempfile import TemporaryFile
import zipfile
import zlib
from mo_logs.exceptions import suppress_exception
from mo_logs import Log
from mo_math import Math
# LIBRARY TO DEAL WITH BIG DATA ARRAYS AS ITERATORS OVER (IR)REGULAR SIZED
# BLOCKS, OR AS ITERATORS OVER LINES
DEBUG = False
MIN_READ_SIZE = 8 * 1024
MAX_STRING_SIZE = 1 * 1024 * 1024
class FileString(object):
"""
ACTS LIKE A STRING, BUT IS A FILE
"""
def __init__(self, file):
self.file = file
def decode(self, encoding):
if encoding != "utf8":
Log.error("can not handle {{encoding}}", encoding= encoding)
self.encoding = encoding
return self
def split(self, sep):
if sep != "\n":
Log.error("Can only split by lines")
self.file.seek(0)
return LazyLines(self.file)
def __len__(self):
temp = self.file.tell()
self.file.seek(0, 2)
file_length = self.file.tell()
self.file.seek(temp)
return file_length
def __getslice__(self, i, j):
j = Math.min(j, len(self))
if j - 1 > 2 ** 28:
Log.error("Slice of {{num}} bytes is too big", num=j - i)
try:
self.file.seek(i)
output = self.file.read(j - i).decode(self.encoding)
return output
except Exception as e:
Log.error(
"Can not read file slice at {{index}}, with encoding {{encoding}}",
index=i,
encoding=self.encoding,
cause=e
)
def __add__(self, other):
self.file.seek(0, 2)
self.file.write(other)
def __radd__(self, other):
new_file = TemporaryFile()
new_file.write(other)
self.file.seek(0)
for l in self.file:
new_file.write(l)
new_file.seek(0)
return FileString(new_file)
def __getattr__(self, attr):
return getattr(self.file, attr)
def __del__(self):
self.file, temp = None, self.file
if temp:
temp.close()
def __iter__(self):
self.file.seek(0)
return self.file
def __unicode__(self):
if self.encoding == "utf8":
temp = self.file.tell()
self.file.seek(0, 2)
file_length = self.file.tell()
self.file.seek(0)
output = self.file.read(file_length).decode(self.encoding)
self.file.seek(temp)
return output
def safe_size(source):
"""
READ THE source UP TO SOME LIMIT, THEN COPY TO A FILE IF TOO BIG
RETURN A str() OR A FileString()
"""
if source is None:
return None
total_bytes = 0
bytes = []
b = source.read(MIN_READ_SIZE)
while b:
total_bytes += len(b)
bytes.append(b)
if total_bytes > MAX_STRING_SIZE:
try:
data = FileString(TemporaryFile())
for bb in bytes:
data.write(bb)
del bytes
del bb
b = source.read(MIN_READ_SIZE)
while b:
total_bytes += len(b)
data.write(b)
b = source.read(MIN_READ_SIZE)
data.seek(0)
Log.note("Using file of size {{length}} instead of str()", length= total_bytes)
return data
except Exception as e:
Log.error("Could not write file > {{num}} bytes", num= total_bytes, cause=e)
b = source.read(MIN_READ_SIZE)
data = b"".join(bytes)
del bytes
return data
class LazyLines(object):
"""
SIMPLE LINE ITERATOR, BUT WITH A BIT OF CACHING TO LOOK LIKE AN ARRAY
"""
def __init__(self, source, encoding="utf8"):
"""
ASSUME source IS A LINE ITERATOR OVER utf8 ENCODED BYTE STREAM
"""
self.source = source
self.encoding = encoding
self._iter = self.__iter__()
self._last = None
self._next = 0
def __getslice__(self, i, j):
if i == self._next - 1:
def output():
yield self._last
for v in self._iter:
self._next += 1
yield v
return output()
if i == self._next:
return self._iter
Log.error("Do not know how to slice this generator")
def __iter__(self):
def output():
for v in self.source:
self._last = v
yield self._last
return output()
def __getitem__(self, item):
try:
if item == self._next:
self._next += 1
return self._iter.next()
elif item == self._next - 1:
return self._last
else:
Log.error("can not index out-of-order too much")
except Exception as e:
Log.error("Problem indexing", e)
class CompressedLines(LazyLines):
"""
KEEP COMPRESSED HTTP (content-type: gzip) IN BYTES ARRAY
WHILE PULLING OUT ONE LINE AT A TIME FOR PROCESSING
"""
def __init__(self, compressed, encoding="utf8"):
"""
USED compressed BYTES TO DELIVER LINES OF TEXT
LIKE LazyLines, BUT HAS POTENTIAL TO seek()
"""
self.compressed = compressed
LazyLines.__init__(self, None, encoding=encoding)
self._iter = self.__iter__()
def __iter__(self):
return LazyLines(ibytes2ilines(compressed_bytes2ibytes(self.compressed, MIN_READ_SIZE), encoding=self.encoding)).__iter__()
def __getslice__(self, i, j):
if i == self._next:
return self._iter
if i == 0:
return self.__iter__()
if i == self._next - 1:
def output():
yield self._last
for v in self._iter:
yield v
return output()
Log.error("Do not know how to slice this generator")
def __getitem__(self, item):
try:
if item == self._next:
self._last = self._iter.next()
self._next += 1
return self._last
elif item == self._next - 1:
return self._last
else:
Log.error("can not index out-of-order too much")
except Exception as e:
Log.error("Problem indexing", e)
def __radd__(self, other):
new_file = TemporaryFile()
new_file.write(other)
self.file.seek(0)
for l in self.file:
new_file.write(l)
new_file.seek(0)
return FileString(new_file)
def compressed_bytes2ibytes(compressed, size):
"""
CONVERT AN ARRAY OF BYTES TO A BYTE-BLOCK GENERATOR
USEFUL IN THE CASE WHEN WE WANT TO LIMIT HOW MUCH WE FEED ANOTHER
GENERATOR (LIKE A DECOMPRESSOR)
"""
decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
for i in range(0, Math.ceiling(len(compressed), size), size):
try:
block = compressed[i: i + size]
yield decompressor.decompress(block)
except Exception as e:
Log.error("Not expected", e)
def ibytes2ilines(generator, encoding="utf8", flexible=False, closer=None):
"""
CONVERT A GENERATOR OF (ARBITRARY-SIZED) byte BLOCKS
TO A LINE (CR-DELIMITED) GENERATOR
:param generator:
:param encoding: None TO DO NO DECODING
:param closer: OPTIONAL FUNCTION TO RUN WHEN DONE ITERATING
:return:
"""
decode = get_decoder(encoding=encoding, flexible=flexible)
_buffer = generator.next()
s = 0
e = _buffer.find(b"\n")
while True:
while e == -1:
try:
next_block = generator.next()
_buffer = _buffer[s:] + next_block
s = 0
e = _buffer.find(b"\n")
except StopIteration:
_buffer = _buffer[s:]
del generator
if closer:
closer()
if _buffer:
yield decode(_buffer)
return
yield decode(_buffer[s:e])
s = e + 1
e = _buffer.find(b"\n", s)
class GzipLines(CompressedLines):
"""
SAME AS CompressedLines, BUT USING THE GzipFile FORMAT FOR COMPRESSED BYTES
"""
def __init__(self, compressed, encoding="utf8"):
CompressedLines.__init__(self, compressed, encoding=encoding)
def __iter__(self):
buff = BytesIO(self.compressed)
return LazyLines(gzip.GzipFile(fileobj=buff, mode='r'), encoding=self.encoding).__iter__()
class ZipfileLines(CompressedLines):
"""
SAME AS CompressedLines, BUT USING THE ZipFile FORMAT FOR COMPRESSED BYTES
"""
def __init__(self, compressed, encoding="utf8"):
CompressedLines.__init__(self, compressed, encoding=encoding)
def __iter__(self):
buff = BytesIO(self.compressed)
archive = zipfile.ZipFile(buff, mode='r')
names = archive.namelist()
if len(names) != 1:
Log.error("*.zip file has {{num}} files, expecting only one.", num= len(names))
stream = archive.open(names[0], "r")
return LazyLines(sbytes2ilines(stream), encoding=self.encoding).__iter__()
def icompressed2ibytes(source):
"""
:param source: GENERATOR OF COMPRESSED BYTES
:return: GENERATOR OF BYTES
"""
decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
last_bytes_count = 0 # Track the last byte count, so we do not show too many debug lines
bytes_count = 0
for bytes_ in source:
try:
data = decompressor.decompress(bytes_)
except Exception as e:
Log.error("problem", cause=e)
bytes_count += len(data)
if Math.floor(last_bytes_count, 1000000) != Math.floor(bytes_count, 1000000):
last_bytes_count = bytes_count
if DEBUG:
Log.note("bytes={{bytes}}", bytes=bytes_count)
yield data
def scompressed2ibytes(stream):
"""
:param stream: SOMETHING WITH read() METHOD TO GET MORE BYTES
:return: GENERATOR OF UNCOMPRESSED BYTES
"""
def more():
try:
while True:
bytes_ = stream.read(4096)
if not bytes_:
return
yield bytes_
except Exception as e:
Log.error("Problem iterating through stream", cause=e)
finally:
with suppress_exception:
stream.close()
return icompressed2ibytes(more())
def sbytes2ilines(stream, encoding="utf8", closer=None):
"""
CONVERT A STREAM (with read() method) OF (ARBITRARY-SIZED) byte BLOCKS
TO A LINE (CR-DELIMITED) GENERATOR
"""
def read():
try:
while True:
bytes_ = stream.read(4096)
if not bytes_:
return
yield bytes_
except Exception as e:
Log.error("Problem iterating through stream", cause=e)
finally:
try:
stream.close()
except Exception:
pass
if closer:
try:
closer()
except Exception:
pass
return ibytes2ilines(read(), encoding=encoding)
def get_decoder(encoding, flexible=False):
"""
RETURN FUNCTION TO PERFORM DECODE
:param encoding: STRING OF THE ENCODING
:param flexible: True IF YOU WISH TO TRY OUR BEST, AND KEEP GOING
:return: FUNCTION
"""
if encoding == None:
def no_decode(v):
return v
return no_decode
elif flexible:
def do_decode1(v):
return v.decode(encoding, 'ignore')
return do_decode1
else:
def do_decode2(v):
return v.decode(encoding)
return do_decode2
| mpl-2.0 | 5,974,824,802,151,708,000 | 27.794872 | 131 | 0.543107 | false |
MTASZTAKI/ApertusVR | plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/examples/getClosestPoints.py | 2 | 2665 | import pybullet as p
import time
p.connect(p.GUI)
useCollisionShapeQuery = True
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
geom = p.createCollisionShape(p.GEOM_SPHERE, radius=0.1)
geomBox = p.createCollisionShape(p.GEOM_BOX, halfExtents=[0.2, 0.2, 0.2])
baseOrientationB = p.getQuaternionFromEuler([0, 0.3, 0]) #[0,0.5,0.5,0]
basePositionB = [1.5, 0, 1]
obA = -1
obB = -1
obA = p.createMultiBody(baseMass=0, baseCollisionShapeIndex=geom, basePosition=[0.5, 0, 1])
obB = p.createMultiBody(baseMass=0,
baseCollisionShapeIndex=geomBox,
basePosition=basePositionB,
baseOrientation=baseOrientationB)
lineWidth = 3
colorRGB = [1, 0, 0]
lineId = p.addUserDebugLine(lineFromXYZ=[0, 0, 0],
lineToXYZ=[0, 0, 0],
lineColorRGB=colorRGB,
lineWidth=lineWidth,
lifeTime=0)
pitch = 0
yaw = 0
while (p.isConnected()):
pitch += 0.01
if (pitch >= 3.1415 * 2.):
pitch = 0
yaw += 0.01
if (yaw >= 3.1415 * 2.):
yaw = 0
baseOrientationB = p.getQuaternionFromEuler([yaw, pitch, 0])
if (obB >= 0):
p.resetBasePositionAndOrientation(obB, basePositionB, baseOrientationB)
if (useCollisionShapeQuery):
pts = p.getClosestPoints(bodyA=-1,
bodyB=-1,
distance=100,
collisionShapeA=geom,
collisionShapeB=geomBox,
collisionShapePositionA=[0.5, 0, 1],
collisionShapePositionB=basePositionB,
collisionShapeOrientationB=baseOrientationB)
#pts = p.getClosestPoints(bodyA=obA, bodyB=-1, distance=100, collisionShapeB=geomBox, collisionShapePositionB=basePositionB, collisionShapeOrientationB=baseOrientationB)
else:
pts = p.getClosestPoints(bodyA=obA, bodyB=obB, distance=100)
if len(pts) > 0:
#print(pts)
distance = pts[0][8]
#print("distance=",distance)
ptA = pts[0][5]
ptB = pts[0][6]
p.addUserDebugLine(lineFromXYZ=ptA,
lineToXYZ=ptB,
lineColorRGB=colorRGB,
lineWidth=lineWidth,
lifeTime=0,
replaceItemUniqueId=lineId)
#time.sleep(1./240.)
#removeCollisionShape is optional:
#only use removeCollisionShape if the collision shape is not used to create a body
#and if you want to keep on creating new collision shapes for different queries (not recommended)
p.removeCollisionShape(geom)
p.removeCollisionShape(geomBox)
| mit | -5,667,727,714,677,863,000 | 36.013889 | 173 | 0.601876 | false |
alexisVallet/anime-character-detection | dpmDetection.py | 1 | 2469 | import cv2
import pymatlab as mlb
class DPMObjectDetection:
def __init__(self):
self.session = mlb.session_factory()
self.session.run('cd ./voc-dpm/')
self.model = None
def trainDPMmodel(self, model, pos, neg, warp, randneg, nbiter, nbnegiter,
maxnumexamples, overlap, numfp, cont, tag, C):
""" Trains a model opptimizing a WL-SSVM or LSVM.
(thin wrapper around matlab code by Girshick, R. B.)
Returns:
model The new model
Arguments
warp 1 => use warped positives
0 => use latent positives
randneg 1 => use random negaties
0 => use hard negatives
iter The number of training iterations
negiter The number of data-mining steps within each training iteration
max_num_examples
The maximum number of negative examples that the feature vector
cache may hold
overlap The minimum overlap in latent positive search
cont True => restart training from a previous run
C Regularization/surrogate loss tradeoff parameter
"""
self.session.run('cd ./train/')
self.session.putvalue('model', model)
self.session.putvalue('pos', pos)
self.session.putvalue('neg', neg)
self.session.putvalue('warp', warp)
self.session.putvalue('randneg', randneg)
self.session.putvalue('iter', nbiter)
self.session.putvalue('negiter', nbnegiter)
self.session.putvalue('maximum_examples', maximumexamples)
self.session.putvalue('overlap', overlap)
self.session.putvalue('num_fp', numfp)
self.session.putvalue('cont', cont)
self.session.putvalue('tag', tag)
self.session.putvalue('C', C)
self.session.run('nmodel = train(model,pos,neg.warp,randneg,iter,negiter,max_num_exampples,overlap,num_fp,cont,tag,C)')
self.model = self.session.getvalue('nmodel')
self.session.run('cd ../')
def detectObject(image, thresh, max_num):
if self.model == None:
self.session.run('cd ./features/')
self.session.putvalue('im', image)
self.session.putvalue('model', self.model)
| gpl-2.0 | -4,481,293,850,010,839,600 | 43.890909 | 131 | 0.563386 | false |
bendk/thesquirrel | thesquirrel/views.py | 1 | 1123 | # thesquirrel.org
#
# Copyright (C) 2015 Flying Squirrel Community Space
#
# thesquirrel.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# thesquirrel.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with thesquirrel.org. If not, see <http://www.gnu.org/licenses/>.
from django.shortcuts import render
from articles.models import Article
from docs.models import Document
def home(request):
return render(request, 'home.html', {
'top_stories': Article.objects.all()[:5],
})
def email_list_signup(request):
return render(request, 'email-list-signup.html')
def contact_us(request):
return render(request, 'contact-us.html')
| agpl-3.0 | 5,060,170,681,026,645,000 | 34.09375 | 78 | 0.749777 | false |
agroknow/aginfra | sites/all/modules/ak_cp/js/leaflet_search/tilestache_server_with_autocomplete.py | 2 | 5844 | import json
import marisa_trie
import os
import shutil
from operator import itemgetter
import TileStache
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
from cartograph import Utils
from cartograph import Config
""" This is an example from a summer research project that adapted Leaflet search to work with a Tilestache tile server running on top of Werkzeug. I had to do a lot of fiddling around with it to get the autocomplete to work, so I thought the example might be helpful for other people"""
def run_server(path_cartograph_cfg, path_tilestache_cfg):
"""Server run function, you probably don't care about this part if all you care about is implementing search"""
Config.initConf(path_cartograph_cfg)
path_tilestache_cfg = os.path.abspath(path_tilestache_cfg)
path_cache = json.load(open(path_tilestache_cfg, 'r'))['cache']['path']
static_files = { '/static': os.path.join(os.path.abspath('./web')) }
if os.path.isdir(path_cache):
assert(len(path_cache) > 5)
shutil.rmtree(path_cache)
app = CartographServer(path_tilestache_cfg, Config.get())
run_simple('0.0.0.0', 8080, app, static_files=static_files)
class CartographServer(TileStache.WSGITileServer):
def __init__(self, path_cfg, cartograph_cfg):
TileStache.WSGITileServer.__init__(self, path_cfg)
self.cartoconfig = cartograph_cfg
#Reading in features from our particular dataset (Wikipedia articles that have popularities, coordinates, and names)
self.popularityDict = Utils.read_features(
self.cartoconfig.get("ExternalFiles", "names_with_id"),
self.cartoconfig.get("GeneratedFiles","popularity_with_id"))
xyDict = Utils.read_features(self.cartoconfig.get("GeneratedFiles", "article_coordinates"),
self.cartoconfig.get("ExternalFiles", "names_with_id"),
self.cartoconfig.get("GeneratedFiles", "zoom_with_id"),
required=('x', 'y', 'name', 'maxZoom'))
self.keyList = []
self.tupleLocZoom = []
self.titleLookupDict = dict()
#Utils.readfeatures gives me a dict keyed by id#, this is going through it to extract and format the useful information"""
for entry in xyDict:
#x and y have to be flipped to get it to match up, quirk of our dataset
y = float(xyDict[entry]['x'])
x = float(xyDict[entry]['y'])
title = xyDict[entry]['name']
self.titleLookupDict[entry] = title
#zoom is the location it shows at, it's passed through the search function so that I can make the map zoom to the proper location on the JavaScript side
zoom = int(xyDict[entry]['maxZoom'])
loc = [x, y]
idnum = int(entry)
#create tuple of info and a title, add them both to separate lists (they'll get zipped together eventually)
locZoom = (x, y, zoom, idnum)
lowertitle = unicode(title.lower(), 'utf-8')
self.keyList.append(lowertitle)
self.tupleLocZoom.append(locZoom)
#after creating lists of all titles and location/zoom, zip them into a trie - extracted to json format later
fmt = "<ddii" #a tuple of double, double, int, string (x, y, zoom, regular case title)
self.trie = marisa_trie.RecordTrie(fmt, zip(self.keyList, self.tupleLocZoom))
def __call__(self, environ, start_response):
path_info = environ.get('PATH_INFO', None)
# if the user tried to search for something (this is done with url routing in the server config file)
if path_info.startswith('/dynamic/search'):
request = Request(environ)
#get the thing the user searched for
title = request.args['q']
#trie autocomplete reponse
results = self.trie.items(unicode(title))
#empty list to hold tuples to sort
tupleList = []
#empty list to hold json-formatted results
jsonList = []
#extract values from tuple in trie - this is needed because it's autocomplete, so there are multiple results
for item in results:
idnum = str(item[1][3])
titlestring = self.titleLookupDict[idnum]
pop = float(self.popularityDict[idnum]['popularity'])
x = item[1][0]
y = item[1][1]
locat = [x,y]
zoom = item[1][2]
itemTuple = (locat, zoom, titlestring, pop)
tupleList.append(itemTuple)
sortedTupleList = sorted(tupleList, key=itemgetter(3))
sortedTupleList.reverse()
#creating the json for each item
for item in sortedTupleList:
locat = item[0]
zoom = item[1]
titlestring = item[2]
rJsonDict = {"loc": locat, "title": titlestring, "zoom" : zoom}
jsonList.append(rJsonDict)
response = Response (json.dumps(jsonList))
response.headers['Content-type'] = 'application/json'
#returning the response to the browser!
return response(environ, start_response)
else:
return TileStache.WSGITileServer.__call__(self, environ, start_response) | gpl-2.0 | 319,583,478,758,305,400 | 44.031496 | 287 | 0.575804 | false |
cuckoobox/cuckoo | tests/test_search.py | 1 | 4842 | # Copyright (C) 2017-2018 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import mock
import os
import tempfile
from cuckoo.common.elastic import Elastic
from cuckoo.common.mongo import Mongo, mongo
from cuckoo.common.objects import File
from cuckoo.main import cuckoo_create
from cuckoo.misc import set_cwd
from cuckoo.reporting.mongodb import MongoDB
def test_mongo_init_nouser():
set_cwd(tempfile.mkdtemp())
cuckoo_create(cfg={
"reporting": {
"mongodb": {
"enabled": True,
"host": "1.2.3.4",
"port": 4242,
"db": "foobar",
},
},
})
m = Mongo()
m.init()
assert m.enabled is True
assert m.hostname == "1.2.3.4"
assert m.port == 4242
assert m.database == "foobar"
assert m.username is None
assert m.password is None
def test_mongo_init_withuser():
set_cwd(tempfile.mkdtemp())
cuckoo_create(cfg={
"reporting": {
"mongodb": {
"enabled": True,
"username": "foo",
"password": "bar",
},
},
})
m = Mongo()
m.init()
assert m.enabled is True
assert m.hostname == "127.0.0.1"
assert m.port == 27017
assert m.database == "cuckoo"
assert m.username == "foo"
assert m.password == "bar"
@mock.patch("cuckoo.common.mongo.pymongo")
def test_mongo_connect_notenabled(p):
set_cwd(tempfile.mkdtemp())
cuckoo_create()
m = Mongo()
m.init()
m.connect()
p.MongoClient.assert_not_called()
@mock.patch("cuckoo.common.mongo.gridfs")
@mock.patch("cuckoo.common.mongo.pymongo")
def test_mongo_connect_success_nouser(p, q):
set_cwd(tempfile.mkdtemp())
cuckoo_create(cfg={
"reporting": {
"mongodb": {
"enabled": True,
},
},
})
m = Mongo()
m.init()
m.connect()
p.MongoClient.assert_called_once_with("127.0.0.1", 27017)
client = p.MongoClient.return_value
client.__getitem__.assert_called_once_with("cuckoo")
db = client.__getitem__.return_value
db.authenticate.assert_not_called()
q.GridFS.assert_called_once_with(db)
assert m.db == db
assert m.grid == q.GridFS.return_value
@mock.patch("cuckoo.common.mongo.gridfs")
@mock.patch("cuckoo.common.mongo.pymongo")
def test_mongo_connect_success_withuser(p, q):
set_cwd(tempfile.mkdtemp())
cuckoo_create(cfg={
"reporting": {
"mongodb": {
"enabled": True,
"username": "foo",
"password": "bar",
},
},
})
m = Mongo()
m.init()
m.connect()
db = p.MongoClient.return_value.__getitem__.return_value
db.authenticate.assert_called_once_with("foo", "bar")
def test_mongo_connect_store_file():
set_cwd(tempfile.mkdtemp())
cuckoo_create(cfg={
"reporting": {
"mongodb": {
"enabled": True,
"db": "cuckootest",
},
},
})
mongo.init()
assert mongo.database == "cuckootest"
fd, filepath = tempfile.mkstemp()
os.write(fd, "hello world")
os.close(fd)
f = File(filepath)
r = MongoDB()
r.init_once()
id1 = r.store_file(f, "foobar.txt")
id2 = r.store_file(f, "foobar.txt")
assert id1 == id2
assert mongo.db.fs.files.find_one({
"sha256": f.get_sha256(),
})["_id"] == id1
assert mongo.grid.get(id1).read() == "hello world"
def test_elastic_init():
set_cwd(tempfile.mkdtemp())
cuckoo_create(cfg={
"reporting": {
"elasticsearch": {
"enabled": True,
"hosts": [
"localhost",
],
"calls": True,
},
},
})
e = Elastic()
e.init()
assert e.enabled is True
assert e.hosts == ["localhost"]
assert e.calls is True
assert e.index == "cuckoo"
assert e.index_time_pattern == "yearly"
assert e.cuckoo_node is None
@mock.patch("elasticsearch.Elasticsearch")
def test_elastic_connect_notenabled(p):
set_cwd(tempfile.mkdtemp())
cuckoo_create(cfg={
"reporting": {
"elasticsearch": {
"enabled": False,
},
},
})
e = Elastic()
e.init()
e.connect()
p.assert_not_called()
@mock.patch("elasticsearch.Elasticsearch")
def test_elastic_connect_enabled(p):
set_cwd(tempfile.mkdtemp())
cuckoo_create(cfg={
"reporting": {
"elasticsearch": {
"enabled": True,
},
},
})
e = Elastic()
e.init()
e.connect()
p.assert_called_once_with(["127.0.0.1"], timeout=300)
| mit | 6,799,490,892,015,483,000 | 24.619048 | 68 | 0.549566 | false |
asterisk/ari-py | ari_test/utils.py | 2 | 2013 | #!/usr/bin/env python
import httpretty
import os
import unittest
import urlparse
import ari
import requests
class AriTestCase(unittest.TestCase):
"""Base class for mock ARI server.
"""
BASE_URL = "http://ari.py/ari"
def setUp(self):
"""Setup httpretty; create ARI client.
"""
super(AriTestCase, self).setUp()
httpretty.enable()
self.serve_api()
self.uut = ari.connect('http://ari.py/', 'test', 'test')
def tearDown(self):
"""Cleanup.
"""
super(AriTestCase, self).tearDown()
httpretty.disable()
httpretty.reset()
@classmethod
def build_url(cls, *args):
"""Build a URL, based off of BASE_URL, with the given args.
>>> AriTestCase.build_url('foo', 'bar', 'bam', 'bang')
'http://ari.py/ari/foo/bar/bam/bang'
:param args: URL components
:return: URL
"""
url = cls.BASE_URL
for arg in args:
url = urlparse.urljoin(url + '/', arg)
return url
def serve_api(self):
"""Register all api-docs with httpretty to serve them for unit tests.
"""
for filename in os.listdir('sample-api'):
if filename.endswith('.json'):
with open(os.path.join('sample-api', filename)) as fp:
body = fp.read()
self.serve(httpretty.GET, 'api-docs', filename, body=body)
def serve(self, method, *args, **kwargs):
"""Serve a single URL for current test.
:param method: HTTP method. httpretty.{GET,PUT,POST,DELETE}.
:param args: URL path segments.
:param kwargs: See httpretty.register_uri()
"""
url = self.build_url(*args)
if kwargs.get('body') is None and 'status' not in kwargs:
kwargs['status'] = requests.codes.no_content
httpretty.register_uri(method, url,
content_type="application/json",
**kwargs)
| bsd-3-clause | 7,237,302,397,208,412,000 | 28.602941 | 77 | 0.556384 | false |
LumaPictures/rez | src/rez/cli/complete.py | 4 | 3454 | """
Prints package completion strings.
"""
from rez.vendor import argparse
__doc__ = argparse.SUPPRESS
def setup_parser(parser, completions=False):
pass
def command(opts, parser, extra_arg_groups=None):
from rez.cli._util import subcommands, hidden_subcommands
import os
import re
# get comp info from environment variables
comp_line = os.getenv("COMP_LINE", "")
comp_point = os.getenv("COMP_POINT", "")
try:
comp_point = int(comp_point)
except:
comp_point = len(comp_line)
last_word = comp_line.split()[-1]
if comp_line.endswith(last_word):
prefix = last_word
else:
prefix = None
def _pop_arg(l, p):
words = l.split()
arg = None
if words:
arg = words[0]
l_ = l.lstrip()
p -= (len(l) - len(l_) + len(arg))
l = l_[len(arg):]
return l, p, arg
return l, p, arg
# determine subcommand, possibly give subcommand completion
subcommand = None
comp_line, comp_point, cmd = _pop_arg(comp_line, comp_point)
if cmd in ("rez", "rezolve"):
comp_line, comp_point, arg = _pop_arg(comp_line, comp_point)
if arg:
if prefix != arg:
subcommand = arg
else:
subcommand = cmd.split("-", 1)[-1]
if subcommand is None:
cmds = set(subcommands) - set(hidden_subcommands)
if prefix:
cmds = (x for x in cmds if x.startswith(prefix))
print " ".join(cmds)
if subcommand not in subcommands:
return
# replace '--' with special '--N#' flag so that subcommands can specify
# custom completions.
regex = re.compile("\s--\s")
ddashes = regex.findall(comp_line)
for i, ddash in enumerate(ddashes):
j = comp_line.find(ddash)
while comp_line[j] != "-":
j += 1
j += 2
s = "N%d" % i
comp_line = comp_line[:j] + s + comp_line[j:]
if comp_point >= j:
comp_point += len(s)
# create parser for subcommand
from rez.backport.importlib import import_module
module_name = "rez.cli.%s" % subcommand
mod = import_module(module_name)
parser = argparse.ArgumentParser()
mod.setup_parser(parser, completions=True)
# have to massage input a little so argcomplete behaves
cmd = "rez-%s" % subcommand
comp_line = cmd + comp_line
comp_point += len(cmd)
# generate the completions
from rez.cli._complete_util import RezCompletionFinder
completer = RezCompletionFinder(parser=parser,
comp_line=comp_line,
comp_point=comp_point)
words = completer.completions
print ' '.join(words)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 | -5,884,535,343,274,367,000 | 29.839286 | 79 | 0.611754 | false |
daviddupont69/CouchPotatoServer | libs/rsa/transform.py | 216 | 6848 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Data transformation functions.
From bytes to a number, number to bytes, etc.
'''
from __future__ import absolute_import
try:
# We'll use psyco if available on 32-bit architectures to speed up code.
# Using psyco (if available) cuts down the execution time on Python 2.5
# at least by half.
import psyco
psyco.full()
except ImportError:
pass
import binascii
from struct import pack
from rsa import common
from rsa._compat import is_integer, b, byte, get_word_alignment, ZERO_BYTE, EMPTY_BYTE
def bytes2int(raw_bytes):
r'''Converts a list of bytes or an 8-bit string to an integer.
When using unicode strings, encode it to some encoding like UTF8 first.
>>> (((128 * 256) + 64) * 256) + 15
8405007
>>> bytes2int('\x80@\x0f')
8405007
'''
return int(binascii.hexlify(raw_bytes), 16)
def _int2bytes(number, block_size=None):
r'''Converts a number to a string of bytes.
Usage::
>>> _int2bytes(123456789)
'\x07[\xcd\x15'
>>> bytes2int(_int2bytes(123456789))
123456789
>>> _int2bytes(123456789, 6)
'\x00\x00\x07[\xcd\x15'
>>> bytes2int(_int2bytes(123456789, 128))
123456789
>>> _int2bytes(123456789, 3)
Traceback (most recent call last):
...
OverflowError: Needed 4 bytes for number, but block size is 3
@param number: the number to convert
@param block_size: the number of bytes to output. If the number encoded to
bytes is less than this, the block will be zero-padded. When not given,
the returned block is not padded.
@throws OverflowError when block_size is given and the number takes up more
bytes than fit into the block.
'''
# Type checking
if not is_integer(number):
raise TypeError("You must pass an integer for 'number', not %s" %
number.__class__)
if number < 0:
raise ValueError('Negative numbers cannot be used: %i' % number)
# Do some bounds checking
if number == 0:
needed_bytes = 1
raw_bytes = [ZERO_BYTE]
else:
needed_bytes = common.byte_size(number)
raw_bytes = []
# You cannot compare None > 0 in Python 3x. It will fail with a TypeError.
if block_size and block_size > 0:
if needed_bytes > block_size:
raise OverflowError('Needed %i bytes for number, but block size '
'is %i' % (needed_bytes, block_size))
# Convert the number to bytes.
while number > 0:
raw_bytes.insert(0, byte(number & 0xFF))
number >>= 8
# Pad with zeroes to fill the block
if block_size and block_size > 0:
padding = (block_size - needed_bytes) * ZERO_BYTE
else:
padding = EMPTY_BYTE
return padding + EMPTY_BYTE.join(raw_bytes)
def bytes_leading(raw_bytes, needle=ZERO_BYTE):
'''
Finds the number of prefixed byte occurrences in the haystack.
Useful when you want to deal with padding.
:param raw_bytes:
Raw bytes.
:param needle:
The byte to count. Default \000.
:returns:
The number of leading needle bytes.
'''
leading = 0
# Indexing keeps compatibility between Python 2.x and Python 3.x
_byte = needle[0]
for x in raw_bytes:
if x == _byte:
leading += 1
else:
break
return leading
def int2bytes(number, fill_size=None, chunk_size=None, overflow=False):
'''
Convert an unsigned integer to bytes (base-256 representation)::
Does not preserve leading zeros if you don't specify a chunk size or
fill size.
.. NOTE:
You must not specify both fill_size and chunk_size. Only one
of them is allowed.
:param number:
Integer value
:param fill_size:
If the optional fill size is given the length of the resulting
byte string is expected to be the fill size and will be padded
with prefix zero bytes to satisfy that length.
:param chunk_size:
If optional chunk size is given and greater than zero, pad the front of
the byte string with binary zeros so that the length is a multiple of
``chunk_size``.
:param overflow:
``False`` (default). If this is ``True``, no ``OverflowError``
will be raised when the fill_size is shorter than the length
of the generated byte sequence. Instead the byte sequence will
be returned as is.
:returns:
Raw bytes (base-256 representation).
:raises:
``OverflowError`` when fill_size is given and the number takes up more
bytes than fit into the block. This requires the ``overflow``
argument to this function to be set to ``False`` otherwise, no
error will be raised.
'''
if number < 0:
raise ValueError("Number must be an unsigned integer: %d" % number)
if fill_size and chunk_size:
raise ValueError("You can either fill or pad chunks, but not both")
# Ensure these are integers.
number & 1
raw_bytes = b('')
# Pack the integer one machine word at a time into bytes.
num = number
word_bits, _, max_uint, pack_type = get_word_alignment(num)
pack_format = ">%s" % pack_type
while num > 0:
raw_bytes = pack(pack_format, num & max_uint) + raw_bytes
num >>= word_bits
# Obtain the index of the first non-zero byte.
zero_leading = bytes_leading(raw_bytes)
if number == 0:
raw_bytes = ZERO_BYTE
# De-padding.
raw_bytes = raw_bytes[zero_leading:]
length = len(raw_bytes)
if fill_size and fill_size > 0:
if not overflow and length > fill_size:
raise OverflowError(
"Need %d bytes for number, but fill size is %d" %
(length, fill_size)
)
raw_bytes = raw_bytes.rjust(fill_size, ZERO_BYTE)
elif chunk_size and chunk_size > 0:
remainder = length % chunk_size
if remainder:
padding_size = chunk_size - remainder
raw_bytes = raw_bytes.rjust(length + padding_size, ZERO_BYTE)
return raw_bytes
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 | -3,417,576,358,140,375,000 | 30.122727 | 86 | 0.631517 | false |
maxamillion/atomic-reactor | atomic_reactor/plugins/pre_inject_parent_image.py | 3 | 5041 | """
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals
from atomic_reactor.build import ImageName
from atomic_reactor.koji_util import create_koji_session
from atomic_reactor.plugin import PreBuildPlugin
from atomic_reactor.plugins.exit_remove_built_image import defer_removal
from osbs.utils import graceful_chain_get
class InjectParentImage(PreBuildPlugin):
"""
Modifies parent image to be used based on given Koji build.
It first attempts to find the list of available repositories
from '.extra.image.index.pull' in Koji build information. If
not found, the first archive in Koji build that defines a non-empty
'.extra.docker.repositories' list is used.
This list provides the pull reference for the container image
associated with Koji build. If it contains multiple item, the
manifest digest, @sha256, is preferred. Otherwise, the first
repository in list is used.
The namespace and repository for the new parent image must match
the namespace and repository for the parent image defined in
Dockerfile.
This plugin returns the identifier of the Koji build used.
"""
key = 'inject_parent_image'
is_allowed_to_fail = False
def __init__(self, tasker, workflow, koji_parent_build, koji_hub, koji_ssl_certs_dir=None):
"""
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param koji_parent_build: str, either Koji build ID or Koji build NVR
:param koji_hub: str, koji hub (xmlrpc)
:param koji_ssl_certs_dir: str, path to "cert", "ca", and "serverca"
used when Koji's identity certificate is not trusted
"""
super(InjectParentImage, self).__init__(tasker, workflow)
koji_auth_info = None
if koji_ssl_certs_dir:
koji_auth_info = {
'ssl_certs_dir': koji_ssl_certs_dir,
}
self.koji_session = create_koji_session(koji_hub, koji_auth_info)
try:
self.koji_parent_build = int(koji_parent_build)
except ValueError:
self.koji_parent_build = koji_parent_build
self._koji_parent_build_info = None
self._repositories = None
self._new_parent_image = None
def run(self):
self.find_repositories()
self.select_new_parent_image()
self.validate_new_parent_image()
self.set_new_parent_image()
return self._koji_parent_build_info['id']
def find_repositories(self):
self._repositories = (self.find_repositories_from_build() or
self.find_repositories_from_archive())
if not self._repositories:
raise RuntimeError('A suitable archive for Koji build {[nvr]} was not found'
.format(self._koji_parent_build_info))
def find_repositories_from_build(self):
self._koji_parent_build_info = self.koji_session.getBuild(self.koji_parent_build)
if not self._koji_parent_build_info:
raise RuntimeError('Koji build, {}, not found'.format(self.koji_parent_build))
repositories = graceful_chain_get(self._koji_parent_build_info,
'extra', 'image', 'index', 'pull')
if repositories:
self.log.info('Using repositories from build info')
return repositories
def find_repositories_from_archive(self):
for archive in self.koji_session.listArchives(self._koji_parent_build_info['id']):
repositories = graceful_chain_get(archive, 'extra', 'docker', 'repositories')
if repositories:
self.log.info('Using repositories from archive %d', archive['id'])
return repositories
return None
def select_new_parent_image(self):
for repository in self._repositories:
if '@' in repository:
self._new_parent_image = repository
break
# v2 manifest digest, not found, just pick the first one.
if not self._new_parent_image:
self._new_parent_image = self._repositories[0]
self.log.info('New parent image is %s', self._new_parent_image)
def validate_new_parent_image(self):
current_repo = self.workflow.builder.base_image.to_str(registry=False, tag=False)
new_repo = ImageName.parse(self._new_parent_image).to_str(registry=False, tag=False)
if current_repo != new_repo:
raise RuntimeError(
'Repository for new parent image, {}, differs from '
'repository for existing parent image, {}'.format(new_repo, current_repo))
def set_new_parent_image(self):
self.workflow.builder.set_base_image(self._new_parent_image)
defer_removal(self.workflow, self._new_parent_image)
| bsd-3-clause | 7,412,242,715,765,343,000 | 38.692913 | 95 | 0.645507 | false |
handroissuazo/tensorflow | tensorflow/python/ops/gradient_checker_test.py | 18 | 11199 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.test.compute_gradient and tf.compute_gradient_error."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
@ops.RegisterGradient("BadGrad")
def _bad_grad(unused_op, grad):
"""A gradient that returns the wrong shape."""
return array_ops.transpose(grad)
@ops.RegisterGradient("NaNGrad")
def _nan_grad(unused_op, grad):
"""A gradient that returns NaN."""
return np.nan * grad
class GradientCheckerTest(test.TestCase):
def testAddSimple(self):
np.random.seed(1) # Fix seed to avoid flakiness
with self.test_session(use_gpu=False):
# a test case for Add operation
size = (2, 3)
x1 = constant_op.constant(2.0, shape=size, name="x1")
x2 = constant_op.constant(3.0, shape=size, name="x2")
y = math_ops.add(x1, x2, name="y")
# checking gradients for x1
error = gradient_checker.compute_gradient_error(x1, size, y, size)
tf_logging.info("x1 error = %f", error)
assert error < 1e-4
def testAddSimpleGPU(self):
np.random.seed(2) # Fix seed to avoid flakiness
with self.test_session(use_gpu=True):
# a test case for Add operation
size = (2, 3)
x1 = constant_op.constant(2.0, shape=size, name="x1")
x2 = constant_op.constant(3.0, shape=size, name="x2")
y = math_ops.add(x1, x2, name="y")
# checking gradients for x1
error = gradient_checker.compute_gradient_error(x1, size, y, size)
tf_logging.info("x1 error = %f", error)
assert error < 1e-4
def testAddCustomized(self):
np.random.seed(3) # Fix seed to avoid flakiness
with self.test_session():
# a test case for Add operation
size = (2, 3)
x1 = constant_op.constant(
2.0, shape=size, dtype=dtypes.float64, name="x1")
x2 = constant_op.constant(
3.0, shape=size, dtype=dtypes.float64, name="x2")
y = math_ops.add(x1, x2, name="y")
# checkint gradients for x2 using a special init_value and delta
x_init_value = np.asarray(np.arange(6, dtype=np.float64).reshape(2, 3))
error = gradient_checker.compute_gradient_error(
x2, size, y, size, x_init_value=x_init_value, delta=1e-2)
tf_logging.info("x2 error = %f", error)
assert error < 1e-10
def testGather(self):
np.random.seed(4) # Fix seed to avoid flakiness
with self.test_session():
p_shape = (4, 2)
p_size = 8
index_values = [1, 3]
y_shape = [2, 2]
params = constant_op.constant(
np.arange(p_size).astype(np.float), shape=p_shape, name="p")
indices = constant_op.constant(index_values, name="i")
y = array_ops.gather(params, indices, name="y")
error = gradient_checker.compute_gradient_error(params, p_shape, y,
y_shape)
tf_logging.info("gather error = %f", error)
assert error < 1e-4
def testNestedGather(self):
np.random.seed(5) # Fix seed to avoid flakiness
with self.test_session():
p_shape = (8, 2)
p_size = 16
index_values = [1, 3, 5, 6]
index_values2 = [0, 2]
y2_shape = [2, 2]
params = constant_op.constant(
np.arange(p_size).astype(np.float), shape=p_shape, name="p")
indices = constant_op.constant(index_values, name="i")
y = array_ops.gather(params, indices, name="y")
indices2 = constant_op.constant(index_values2, name="i2")
y2 = array_ops.gather(y, indices2, name="y2")
error = gradient_checker.compute_gradient_error(params, p_shape, y2,
y2_shape)
tf_logging.info("nested gather error = %f", error)
assert error < 1e-4
def testComplexMul(self):
with self.test_session():
size = ()
c = constant_op.constant(5 + 7j, dtype=dtypes.complex64)
x = constant_op.constant(11 - 13j, dtype=dtypes.complex64)
y = c * x
analytical, numerical = gradient_checker.compute_gradient(x, size, y,
size)
correct = np.array([[5, 7], [-7, 5]])
self.assertAllEqual(correct, analytical)
self.assertAllClose(correct, numerical, rtol=1e-4)
self.assertLess(
gradient_checker.compute_gradient_error(x, size, y, size), 2e-4)
def testComplexConj(self):
with self.test_session():
size = ()
x = constant_op.constant(11 - 13j, dtype=dtypes.complex64)
y = math_ops.conj(x)
analytical, numerical = gradient_checker.compute_gradient(x, size, y,
size)
correct = np.array([[1, 0], [0, -1]])
self.assertAllEqual(correct, analytical)
self.assertAllClose(correct, numerical, rtol=3e-6)
self.assertLess(
gradient_checker.compute_gradient_error(x, size, y, size), 2e-5)
def testEmptySucceeds(self):
with self.test_session():
x = array_ops.placeholder(dtypes.float32)
y = array_ops.identity(x)
for grad in gradient_checker.compute_gradient(x, (0, 3), y, (0, 3)):
self.assertEqual(grad.shape, (0, 0))
error = gradient_checker.compute_gradient_error(x, (0, 3), y, (0, 3))
self.assertEqual(error, 0)
def testEmptyFails(self):
with ops.Graph().as_default() as g:
with self.test_session(graph=g):
x = array_ops.placeholder(dtypes.float32)
with g.gradient_override_map({"Identity": "BadGrad"}):
y = array_ops.identity(x)
bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)"
with self.assertRaisesRegexp(ValueError, bad):
gradient_checker.compute_gradient(x, (0, 3), y, (0, 3))
with self.assertRaisesRegexp(ValueError, bad):
gradient_checker.compute_gradient_error(x, (0, 3), y, (0, 3))
def testNaNGradFails(self):
with ops.Graph().as_default() as g:
with self.test_session(graph=g):
x = array_ops.placeholder(dtypes.float32)
with g.gradient_override_map({"Identity": "NaNGrad"}):
y = array_ops.identity(x)
error = gradient_checker.compute_gradient_error(x, (), y, ())
# Typical test would assert error < max_err, so assert this test would
# raise AssertionError, since NaN is not < 1.0.
with self.assertRaisesRegexp(AssertionError, "False is not true"):
self.assertTrue(error < 1.0)
class MiniMNISTTest(test.TestCase):
# Gradient checker for MNIST.
def _BuildAndTestMiniMNIST(self, param_index, tag):
# Fix seed to avoid occasional flakiness
np.random.seed(6)
# Hyperparameters
batch = 3
inputs = 16
features = 32
classes = 10
# Define the parameters
inp_data = np.random.random_sample(inputs * batch)
hidden_weight_data = np.random.randn(inputs * features) / np.sqrt(inputs)
hidden_bias_data = np.random.random_sample(features)
sm_weight_data = np.random.randn(features * classes) / np.sqrt(features)
sm_bias_data = np.random.random_sample(classes)
# special care for labels since they need to be normalized per batch
label_data = np.random.random(batch * classes).reshape((batch, classes))
s = label_data.sum(axis=1)
label_data /= s[:, None]
with self.test_session(use_gpu=True):
# We treat the inputs as "parameters" here
inp = constant_op.constant(
inp_data.tolist(),
shape=[batch, inputs],
dtype=dtypes.float64,
name="inp")
hidden_weight = constant_op.constant(
hidden_weight_data.tolist(),
shape=[inputs, features],
dtype=dtypes.float64,
name="hidden_weight")
hidden_bias = constant_op.constant(
hidden_bias_data.tolist(),
shape=[features],
dtype=dtypes.float64,
name="hidden_bias")
softmax_weight = constant_op.constant(
sm_weight_data.tolist(),
shape=[features, classes],
dtype=dtypes.float64,
name="softmax_weight")
softmax_bias = constant_op.constant(
sm_bias_data.tolist(),
shape=[classes],
dtype=dtypes.float64,
name="softmax_bias")
# List all the parameter so that we can test them one at a time
all_params = [
inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias
]
param_sizes = [
[batch, inputs], # inp
[inputs, features], # hidden_weight,
[features], # hidden_bias
[features, classes], # softmax_weight,
[classes]
] # softmax_bias
# Now, Building MNIST
features = nn_ops.relu(
nn_ops.xw_plus_b(inp, hidden_weight, hidden_bias), name="features")
logits = nn_ops.xw_plus_b(
features, softmax_weight, softmax_bias, name="logits")
labels = constant_op.constant(
label_data.tolist(),
shape=[batch, classes],
dtype=dtypes.float64,
name="labels")
cost = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name="cost")
# Test the gradients.
err = gradient_checker.compute_gradient_error(
all_params[param_index],
param_sizes[param_index],
cost, [batch],
delta=1e-5)
tf_logging.info("Mini MNIST: %s gradient error = %g", tag, err)
return err
def testInputGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(0, "input"), 1e-8)
def testHiddenWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(1, "hidden_weight"), 1e-8)
def testHiddenBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(2, "hidden_bias"), 1e-8)
def testSoftmaxWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(3, "softmax_weight"), 1e-8)
def testSoftmaxBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(4, "softmax_bias"), 1e-8)
if __name__ == "__main__":
test.main()
| apache-2.0 | 6,003,311,263,979,053,000 | 36.454849 | 80 | 0.626038 | false |
geosharath/sm | drivers/mpath_dmp.py | 4 | 11094 | #!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import util
import xs_errors
import statvfs
import stat
import iscsilib
import mpath_cli
import os
import glob
import time
import scsiutil
import mpp_luncheck
import mpp_mpathutil
import devscan
import re
import wwid_conf
iscsi_mpath_file = "/etc/iscsi/iscsid-mpath.conf"
iscsi_default_file = "/etc/iscsi/iscsid-default.conf"
iscsi_file = "/etc/iscsi/iscsid.conf"
DMPBIN = "/sbin/multipath"
DEVMAPPERPATH = "/dev/mapper"
DEVBYIDPATH = "/dev/disk/by-id"
DEVBYSCSIPATH = "/dev/disk/by-scsibus"
DEVBYMPPPATH = "/dev/disk/by-mpp"
SYSFS_PATH='/sys/class/scsi_host'
UMPD_PATH='/var/run/updatempppathd.py.pid'
MP_INUSEDIR = "/dev/disk/mpInuse"
MPPGETAIDLNOBIN = "/opt/xensource/bin/xe-get-arrayid-lunnum"
def _is_mpath_daemon_running():
cmd = ["/sbin/pidof", "-s", "/sbin/multipathd"]
(rc,stdout,stderr) = util.doexec(cmd)
return (rc==0)
def _is_mpp_daemon_running():
#cmd = ["/sbin/pidof", "-s", "/opt/xensource/sm/updatempppathd.py"]
#(rc,stdout,stderr) = util.doexec(cmd)
if os.path.exists(UMPD_PATH):
return True
else:
return False
def activate_MPdev(sid, dst):
if not os.path.exists(MP_INUSEDIR):
os.mkdir(MP_INUSEDIR)
if (mpp_luncheck.is_RdacLun(sid)):
suffix = get_TargetID_LunNUM(sid)
sid_with_suffix = sid + "-" + suffix
path = os.path.join(MP_INUSEDIR, sid_with_suffix)
else:
path = os.path.join(MP_INUSEDIR, sid)
cmd = ['ln', '-sf', dst, path]
util.pread2(cmd)
def deactivate_MPdev(sid):
if (mpp_luncheck.is_RdacLun(sid)):
pathlist = glob.glob('/dev/disk/mpInuse/%s-*' % sid)
path = pathlist[0]
else:
path = os.path.join(MP_INUSEDIR, sid)
if os.path.exists(path):
os.unlink(path)
def reset(sid,explicit_unmap=False,delete_nodes=False):
util.SMlog("Resetting LUN %s" % sid)
if (mpp_luncheck.is_RdacLun(sid)):
_resetMPP(sid,explicit_unmap)
else:
_resetDMP(sid,explicit_unmap,delete_nodes)
def _resetMPP(sid,explicit_unmap):
deactivate_MPdev(sid)
return
def _delete_node(dev):
try:
path = '/sys/block/' + dev + '/device/delete'
f = os.open(path, os.O_WRONLY)
os.write(f,'1')
os.close(f)
except:
util.SMlog("Failed to delete %s" % dev)
def _resetDMP(sid,explicit_unmap=False,delete_nodes=False):
# If mpath has been turned on since the sr/vdi was attached, we
# might be trying to unmap it before the daemon has been started
# This is unnecessary (and will fail) so just return.
deactivate_MPdev(sid)
if not _is_mpath_daemon_running():
util.SMlog("Warning: Trying to unmap mpath device when multipathd not running")
return
# If the multipath daemon is running, but we were initially plugged
# with multipathing set to no, there may be no map for us in the multipath
# tables. In that case, list_paths will return [], but remove_map might
# throw an exception. Catch it and ignore it.
if explicit_unmap:
util.SMlog("Explicit unmap")
# Remove map from conf file, if any
try:
wwid_conf.edit_wwid(sid, True)
except:
util.SMlog("WARNING: exception raised while attempting to"
" modify multipath.conf")
try:
mpath_cli.reconfigure()
except:
util.SMlog("WARNING: exception raised while attempting to"
" reconfigure")
time.sleep(5)
devices = mpath_cli.list_paths(sid)
try:
mpath_cli.remove_map(sid)
except:
util.SMlog("Warning: Removing the path failed")
pass
for device in devices:
mpath_cli.remove_path(device)
if delete_nodes:
_delete_node(device)
else:
mpath_cli.ensure_map_gone(sid)
path = "/dev/mapper/%s" % sid
if not util.wait_for_nopath(path, 10):
util.SMlog("MPATH: WARNING - path did not disappear [%s]" % path)
else:
util.SMlog("MPATH: path disappeared [%s]" % path)
# expecting e.g. ["/dev/sda","/dev/sdb"] or ["/dev/disk/by-scsibus/...whatever" (links to the real devices)]
def __map_explicit(devices):
for device in devices:
realpath = os.path.realpath(device)
base = os.path.basename(realpath)
util.SMlog("Adding mpath path '%s'" % base)
try:
mpath_cli.add_path(base)
except:
util.SMlog("WARNING: exception raised while attempting to add path %s" % base)
def map_by_scsibus(sid,npaths=0):
# Synchronously creates/refreshs the MP map for a single SCSIid.
# Gathers the device vector from /dev/disk/by-scsibus - we expect
# there to be 'npaths' paths
util.SMlog("map_by_scsibus: sid=%s" % sid)
devices = []
# Wait for up to 60 seconds for n devices to appear
for attempt in range(0,60):
devices = scsiutil._genReverseSCSIidmap(sid)
# If we've got the right number of paths, or we don't know
# how many devices there ought to be, tell multipathd about
# the paths, and return.
if(len(devices)>=npaths or npaths==0):
# Enable this device's sid: it could be blacklisted
# We expect devices to be blacklisted according to their
# wwid only. Checking the first one is sufficient
if wwid_conf.is_blacklisted(devices[0]):
try:
wwid_conf.edit_wwid(sid)
except:
util.SMlog("WARNING: exception raised while attempting to"
" modify multipath.conf")
try:
mpath_cli.reconfigure()
except:
util.SMlog("WARNING: exception raised while attempting to"
" reconfigure")
time.sleep(5)
__map_explicit(devices)
return
time.sleep(1)
__map_explicit(devices)
def refresh(sid,npaths):
# Refresh the multipath status
util.SMlog("Refreshing LUN %s" % sid)
if len(sid):
path = DEVBYIDPATH + "/scsi-" + sid
if not os.path.exists(path):
scsiutil.rescan(scsiutil._genHostList(""))
if not util.wait_for_path(path,60):
raise xs_errors.XenError('Device not appeared yet')
if not (mpp_luncheck.is_RdacLun(sid)):
_refresh_DMP(sid,npaths)
else:
_refresh_MPP(sid,npaths)
else:
raise xs_errors.XenError('MPath not written yet')
def _refresh_DMP(sid, npaths):
map_by_scsibus(sid,npaths)
path = os.path.join(DEVMAPPERPATH, sid)
util.wait_for_path(path, 10)
if not os.path.exists(path):
raise xs_errors.XenError('DMP failed to activate mapper path')
lvm_path = "/dev/disk/by-scsid/"+sid+"/mapper"
util.wait_for_path(lvm_path, 10)
activate_MPdev(sid, path)
def _refresh_MPP(sid, npaths):
path = os.path.join(DEVBYMPPPATH,"%s" % sid)
mpppath = glob.glob(path)
if not len(mpppath):
raise xs_errors.XenError('MPP RDAC activate failed to detect mpp path')
activate_MPdev(sid,mpppath[0])
def activate():
util.SMlog("MPATH: multipath activate called")
cmd = ['ln', '-sf', iscsi_mpath_file, iscsi_file]
try:
if os.path.exists(iscsi_mpath_file):
# Only do this if using our customized open-iscsi package
util.pread2(cmd)
except util.CommandException, ce:
if not ce.reason.endswith(': File exists'):
raise
# If we've got no active sessions, and the deamon is already running,
# we're ok to restart the daemon
if iscsilib.is_iscsi_daemon_running():
if not iscsilib._checkAnyTGT():
iscsilib.restart_daemon()
# Start the updatempppathd daemon
if not _is_mpp_daemon_running():
cmd = ["service", "updatempppathd", "start"]
util.pread2(cmd)
if not _is_mpath_daemon_running():
util.SMlog("Warning: multipath daemon not running. Starting daemon!")
cmd = ["service", "multipathd", "start"]
util.pread2(cmd)
for i in range(0,120):
if mpath_cli.is_working():
util.SMlog("MPATH: dm-multipath activated.")
return
time.sleep(1)
util.SMlog("Failed to communicate with the multipath daemon!")
raise xs_errors.XenError('MultipathdCommsFailure')
def deactivate():
util.SMlog("MPATH: multipath deactivate called")
cmd = ['ln', '-sf', iscsi_default_file, iscsi_file]
if os.path.exists(iscsi_default_file):
# Only do this if using our customized open-iscsi package
util.pread2(cmd)
# Stop the updatempppathd daemon
if _is_mpp_daemon_running():
cmd = ["service", "updatempppathd", "stop"]
util.pread2(cmd)
if _is_mpath_daemon_running():
# Flush the multipath nodes
for sid in mpath_cli.list_maps():
reset(sid,True)
# Disable any active MPP LUN maps (except the root dev)
systemroot = os.path.realpath(util.getrootdev())
for dev in glob.glob(DEVBYMPPPATH + "/*"):
if os.path.realpath(dev) != systemroot:
sid = os.path.basename(dev).split('-')[0]
reset(sid)
else:
util.SMlog("MPP: Found root dev node, not resetting")
# Check the ISCSI daemon doesn't have any active sessions, if not,
# restart in the new mode
if iscsilib.is_iscsi_daemon_running() and not iscsilib._checkAnyTGT():
iscsilib.restart_daemon()
util.SMlog("MPATH: multipath deactivated.")
def path(SCSIid):
if _is_mpath_daemon_running():
if (mpp_luncheck.is_RdacLun(SCSIid)):
pathlist = glob.glob('/dev/disk/mpInuse/%s-*' % SCSIid)
util.SMlog("pathlist is:")
util.SMlog(pathlist)
if len(pathlist):
path = pathlist[0]
else:
path = os.path.join(MP_INUSEDIR, SCSIid)
else:
path = os.path.join(MP_INUSEDIR, SCSIid)
return path
else:
return DEVBYIDPATH + "/scsi-" + SCSIid
def status(SCSIid):
pass
def get_TargetID_LunNUM(SCSIid):
devices = scsiutil._genReverseSCSIidmap(SCSIid)
cmd = [MPPGETAIDLNOBIN, devices[0]]
return util.pread2(cmd).split('\n')[0]
| lgpl-2.1 | -119,632,054,546,002,830 | 32.720365 | 108 | 0.618262 | false |
baris/fullerite | src/diamond/collectors/ipmisensor/ipmisensor.py | 23 | 4467 | # coding=utf-8
"""
This collector uses the [ipmitool](http://openipmi.sourceforge.net/) to read
hardware sensors from servers
using the Intelligent Platform Management Interface (IPMI). IPMI is very common
with server hardware but usually not available in consumer hardware.
#### Dependencies
* [ipmitool](http://openipmi.sourceforge.net/)
"""
import diamond.collector
from subprocess import Popen, PIPE
import os
import getpass
class IPMISensorCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(IPMISensorCollector,
self).get_default_config_help()
config_help.update({
'bin': 'Path to the ipmitool binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
'thresholds': 'Collect thresholds as well as reading'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(IPMISensorCollector, self).get_default_config()
config.update({
'bin': '/usr/bin/ipmitool',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
'path': 'ipmi.sensors',
'thresholds': False,
})
return config
def parse_value(self, value):
"""
Convert value string to float for reporting
"""
value = value.strip()
# Skip missing sensors
if value == 'na':
return None
# Try just getting the float value
try:
return float(value)
except:
pass
# Next best guess is a hex value
try:
return float.fromhex(value)
except:
pass
# No luck, bail
return None
def collect(self):
if (not os.access(self.config['bin'], os.X_OK)
or (self.config['use_sudo']
and not os.access(self.config['sudo_cmd'], os.X_OK))):
return False
command = [self.config['bin'], 'sensor']
if self.config['use_sudo'] and getpass.getuser() != 'root':
command.insert(0, self.config['sudo_cmd'])
p = Popen(command, stdout=PIPE).communicate()[0][:-1]
for i, v in enumerate(p.split("\n")):
data = v.split("|")
try:
# Complex keys are fun!
metric_name = data[0].strip().replace(".",
"_").replace(" ", ".")
metrics = []
# Each sensor line is a column seperated by a | with the
# following descriptions:
# 1. Sensor ID
# 2. Sensor Reading
# 3. Units
# 4. Status
# 5. Lower Non-Recoverable
# 6. Lower Critical
# 7. Lower Non-Critical
# 8. Upper Non-Critical
# 9. Upper Critical
# 10. Upper Non-Recoverable
if not self.config['thresholds']:
metrics.append((metric_name, self.parse_value(data[1])))
else:
metrics.append((metric_name + ".Reading",
self.parse_value(data[1])))
metrics.append((metric_name + ".Lower.NonRecoverable",
self.parse_value(data[4])))
metrics.append((metric_name + ".Lower.Critical",
self.parse_value(data[5])))
metrics.append((metric_name + ".Lower.NonCritical",
self.parse_value(data[6])))
metrics.append((metric_name + ".Upper.NonCritical",
self.parse_value(data[7])))
metrics.append((metric_name + ".Upper.Critical",
self.parse_value(data[8])))
metrics.append((metric_name + ".Upper.NonRecoverable",
self.parse_value(data[9])))
[self.publish(name, value)
for (name, value) in metrics
if value is not None]
except ValueError:
continue
except IndexError:
continue
return True
| apache-2.0 | 8,484,804,694,733,016,000 | 32.335821 | 79 | 0.488023 | false |
lpramuk/robottelo | robottelo/cli/puppet.py | 1 | 1998 | # -*- encoding: utf-8 -*-
"""
Usage::
hammer puppet-class [OPTIONS] SUBCOMMAND [ARG] ...
Parameters::
SUBCOMMAND subcommand
[ARG] ... subcommand arguments
Subcommands::
info Show a puppetclass
list List all puppetclasses.
sc-params List all smart class parameters
smart-variables List all smart variables
"""
from robottelo.cli.base import Base
class Puppet(Base):
"""
Search Foreman's puppet modules.
"""
command_base = 'puppet-class'
@classmethod
def sc_params(cls, options=None):
"""
Usage:
hammer puppet-class sc-params [OPTIONS]
Options:
--order ORDER sort results
--page PAGE paginate results
--per-page PER_PAGE number of entries per request
--puppet-class PUPPET_CLASS_NAME Puppet class name
--puppet-class-id PUPPET_CLASS_ID ID of Puppet class
--search SEARCH filter results
"""
cls.command_sub = 'sc-params'
return cls.execute(cls._construct_command(options), output_format='csv')
@classmethod
def smart_variables(cls, options=None):
"""
Usage:
hammer puppet-class smart-variables [OPTIONS]
Options:
--order ORDER sort results
--page PAGE paginate results
--per-page PER_PAGE number of entries per request
--puppet-class PUPPET_CLASS_NAME Puppet class name
--puppet-class-id PUPPET_CLASS_ID ID of Puppet class
--search SEARCH filter results
"""
cls.command_sub = 'smart-variables'
return cls.execute(cls._construct_command(options), output_format='csv')
| gpl-3.0 | -6,313,773,293,266,725,000 | 31.754098 | 80 | 0.52002 | false |
santiag0aragon/pgp_dongle | pgp_wrapper.py | 1 | 11716 | import os
import gnupg
import socket
import struct
import re
import sys
import getpass
from socket_utils import *
# For the generation of the key you may want to run
# sudo rngd -r /dev/urandom
# to generate randomnes
class PGP:
def __init__(self, path, email=None, verbose=False, pass_phrase=None):
self.DEF_SERVER = 'pgp.mit.edu'
self.pgp = gnupg.GPG(gnupghome=path)
self.pgp.verbose = verbose
self.email = email
#self.kid is only used after calling load_key
# (self.kid, self.fingerprint) = load_key(self.email)
self.fingerprint = self.load_key(self.email)
if self.fingerprint is None:
if pass_phrase is not None:
print 'No key pair found this email. Generating new key_pair...'
self.gen_key_pair(pass_phrase, self.email)
self.key_not_uploaded = True
elif pass_phrase is None:
print 'To generate the key a passphrase is needed'
sys.exit(1)
else:
self.key_not_uploaded = False
# self.send_key(self.fingerprint)
# os.chmod(path, 0x1C0)
'''
returns the last match in the list_keys() dict
'''
def load_key(self, email, private=True):
kdict = self.pgp.list_keys(private)
kid = None
fp = None
for k in kdict:
if email in str(k['uids'][0]):
# kid = str(k['keyid'][0])
fp = str(k['fingerprint'])
# return (kid, fp)
return fp
def gen_key_pair(self, pass_phrase, email=None):
if email != None:
self.email = email
input_data = self.pgp.gen_key_input(name_email=self.email, passphrase=pass_phrase)
key = self.pgp.gen_key(input_data)
self.fingerprint = key.fingerprint
return key
def export_kp_to_file(self, key_fingerprint, file_name):
ascii_armored_public_keys = self.pgp.export_keys(key_fingerprint)
ascii_armored_private_keys = self.pgp.export_keys(key_fingerprint, True)
with open('%s.asc' % file_name, 'w') as f:
f.write(ascii_armored_public_keys)
f.write(ascii_armored_private_keys)
def import_kp_from_file(self, file_name):
key_data = open(file_name).read()
import_result = self.pgp.import_keys(key_data)
print 'Found %d keys with ids:\n' % import_result.count
for id in import_result.fingerprints:
print id
return import_result
'''
recipients: list of fingerprints or one fingerprint
'''
def encrypt_sign_str(self, plain_text, recipients,
sign_key_id=None, sign_passphase=None,
alwaystrust=False):
if sign_passphase is None or sign_key_id is None:
enc = self.pgp.encrypt(plain_text, recipients,
always_trust=alwaystrust)
else:
enc = self.pgp.encrypt(plain_text, recipients,
sign=sign_key_id,
passphrase=sign_passphase,
always_trust=alwaystrust)
if enc.ok is True:
return str(enc)
else:
return enc.stderr
'''
file: should be open in rb mode i.e., file = open('filename.txt','rb')
'''
def encrypt_sign_file(self, file, output_file_name,
recipients, sign_key_id=None,
sign_passphase=None, alwaystrust=False):
enc = self.pgp.encrypt_file(file, recipients,
sign=sign_key_id,
passphrase=sign_passphase,
output=output_file_name,
always_trust=alwaystrust)
if enc.ok is True:
return str(enc)
else:
return enc.stderr
def decrypt_str(self, encrypted_string, pass_phrase):
dec = self.pgp.decrypt(encrypted_string,
passphrase=pass_phrase)
print dec.trust_text
if dec.ok is True:
return dec.data
else:
return dec.stderr
def decrypt_file(self, file, output_file_name,
pass_phrase):
dec = self.pgp.decrypt_file(file, passphrase=pass_phrase,
output=output_file_name)
if dec.ok is True:
return dec.data
else:
return dec.stderr
def sign_str(self, message, pass_phrase):
return self.pgp.sign(message, keyid=self.fingerprint, passphrase=pass_phrase)
def vrf_str(self, sign):
return self.pgp.verify(sign).valid
def local_search(self, email):
kdict = self.pgp.list_keys(False)
kid = None
fp = None
for k in kdict:
if email in str(k['uids'][0]):
fp = str(k['fingerprint'])
print 'Pub key found locally !'
return fp
# def search_key(self, email, connection, key_server=None):
# kid = self.local_search(email)
# if kid is None:
# if key_server is None:
# # key_server = 'hkps.pool.sks-keyservers.net'
# key_server = self.get_default_server()
# key = self.pgp.search_keys(email, key_server)
# if len(key) > 0:
# for k in key:
# # print k['uids'][0]
# if email in str(k['uids'][0]):
# print 'Imporing pub for %s' % str(k['uids'][0])
# kid = k['keyid']
# self.pgp.recv_keys(self.DEF_SERVER, kid)
# return kid
def get_default_server(self):
return self.DEF_SERVER
# def send_key(self, kid, server=None):
# if server is None:
# server = self.get_default_server()
# return self.pgp.send_keys(server, kid)
def email2fp(self, email):
kdict = self.pgp.list_keys()
for k in kdict:
if email in str(k['uids'][0]):
print k['uids'][0]
return str(k['fingerprint'])
def delete_key_fp(self, fp):
dsk = self.pgp.delete_keys(fp, True)
dpk = self.pgp.delete_keys(fp)
return 'Deleted %s %s' % (str(dsk), str(dpk))
def delete_key(self, email):
fp = self.email2fp(email)
return self.delete_key_fp(fp)
def delete_key_pub(self, email):
fp = self.email2fp(email)
if fp is None:
print 'No key to delete for %s' % email
else:
dpk = self.pgp.delete_keys(fp)
return 'Deleted %s' % dpk
def get_priv_keys(self):
kdict = self.pgp.list_keys(True)
priv_key_fps= []
for k in kdict:
priv_key_fps.append(k['fingerprint'])
return priv_key_fps
def get_pub_keys(self):
kdict = self.pgp.list_keys()
pub_key_fps= []
for k in kdict:
pub_key_fps.append(k['fingerprint'])
return pub_key_fps
def reset_database(self):
print 'DELETING all pub/priv keys'
priv_key_fps = self.get_priv_keys()
for k in priv_key_fps:
self.delete_key_fp(k)
def run_server(self, server_ip, server_port, max_connections=5):
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((server_ip, server_port))
serversocket.listen(max_connections) # become a server socket, maximum 5 connections
connection, address = serversocket.accept()
while True:
print 'Waiting for incomming requests...'
buf = recv_one_message(connection)
mode = buf['mode']
data = buf['data']
if len(data) > 0:
if mode == 0:
print 'Processing encryption request...'
# print data
buf = recv_one_message(connection)
recipient_email = buf['data']
# recipient_email = re.findall(r'(?<=To:)*[^@]+@[^@]+\.[^@]+(?=>)', data)[0]
print 'Searchin for pub of %s ...' % recipient_email
recipient_fp = self.local_search(recipient_email)
buf = recv_one_message(connection)
key_data = buf['data']
if recipient_fp is None:
print 'Key downloaded. Importing locally.'
self.pgp.import_keys(key_data)
recipient_fp = self.local_search(recipient_email)
if recipient_fp is not None:
print 'Encrypting using pub of %s ...' % recipient_email
# Signature is not currently supported
# phrase = getpass.getpass('Passphrase:')
# enc = self.encrypt_sign_str(data, recipient_fp, sign_key_id=self.fingerprint, sign_passphase=phrase, alwaystrust=True)
enc = self.encrypt_sign_str(data, recipient_fp, alwaystrust=True)
print enc
send_one_resp(connection, str(enc))
else:
print 'Pub not found for %s' % recipient_email
send_one_resp(connection, 'Key not found')
elif mode == 1:
print data
print 'Decrypting using priv of %s ...' % self.email
phrase = getpass.getpass('Passphrase:')
enc = self.decrypt_str(data, phrase)
print enc
send_one_resp(connection, str(enc))
elif mode == 3:
print data
# VERIFY SIGN
# print 'Verifing message signature using key with fingerprint %s ...' % data.fingerprint
vrf = self.vrf_str(data)
send_one_resp(connection, str(vrf))
elif mode == 4:
print data
# SIGN
print 'Signing message using priv of %s ...' % self.email
phrase = getpass.getpass('Passphrase:')
sgn = self.sign_str(data, phrase)
send_one_resp(connection, str(sgn))
elif mode == 6:
if self.key_not_uploaded:
key_data = self.pgp.export_keys(self.fingerprint)
send_one_upload_key(connection, key_data)
print 'Sending pub to client to be uploaded'
else:
send_one_upload_key(connection, 'Uploaded')
if __name__ == '__main__':
import sys
import pgp_wrapper as g
import argparse
parser = argparse.ArgumentParser(description='DonPGP!')
parser.add_argument('--server-ip', help='IP to run DonPGP', required=True)
parser.add_argument('--server-port', help='port to run DonPGP', required=True)
parser.add_argument('--email', help='Email to generate key pair', required=True)
parser.add_argument('--passphrase', help='Secret to unlock private key. Only needed to generate a new key_pair.', default=None, required=False)
parser.add_argument('--verbose', help='Secret to unlock private key. Only needed to generate a new key_pair.', action='store_true')
args = parser.parse_args()
print args
p = g.PGP('keys',
args.email,
verbose=args.verbose,
pass_phrase=args.passphrase)
p.run_server(args.server_ip, int(args.server_port))
| gpl-3.0 | 6,337,142,791,805,982,000 | 38.053333 | 147 | 0.528167 | false |
sontek/rethinkdb | external/v8_3.30.33.16/build/gyp/test/win/gyptest-link-ordering.py | 225 | 3058 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure the link order of object files is the same between msvs and ninja.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('link-ordering.gyp', chdir=CHDIR)
test.build('link-ordering.gyp', test.ALL, chdir=CHDIR)
def GetDisasm(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
# Get disassembly and drop int3 padding between functions.
return '\n'.join(
x for x in test.run_dumpbin('/disasm', full_path).splitlines()
if 'CC' not in x)
# This is the full dump that we expect. The source files in the .gyp match
# this order which is what determines the ordering in the binary.
expected_disasm_basic = '''
_mainCRTStartup:
00401000: B8 05 00 00 00 mov eax,5
00401005: C3 ret
?z@@YAHXZ:
00401010: B8 03 00 00 00 mov eax,3
00401015: C3 ret
?x@@YAHXZ:
00401020: B8 01 00 00 00 mov eax,1
00401025: C3 ret
?y@@YAHXZ:
00401030: B8 02 00 00 00 mov eax,2
00401035: C3 ret
_main:
00401040: 33 C0 xor eax,eax
00401042: C3 ret
'''
if expected_disasm_basic not in GetDisasm('test_ordering_exe.exe'):
print GetDisasm('test_ordering_exe.exe')
test.fail_test()
# Similar to above. The VS generator handles subdirectories differently.
expected_disasm_subdirs = '''
_mainCRTStartup:
00401000: B8 05 00 00 00 mov eax,5
00401005: C3 ret
_main:
00401010: 33 C0 xor eax,eax
00401012: C3 ret
?y@@YAHXZ:
00401020: B8 02 00 00 00 mov eax,2
00401025: C3 ret
?z@@YAHXZ:
00401030: B8 03 00 00 00 mov eax,3
00401035: C3 ret
'''
if expected_disasm_subdirs not in GetDisasm('test_ordering_subdirs.exe'):
print GetDisasm('test_ordering_subdirs.exe')
test.fail_test()
# Similar, but with directories mixed into folders (crt and main at the same
# level, but with a subdir in the middle).
expected_disasm_subdirs_mixed = '''
_mainCRTStartup:
00401000: B8 05 00 00 00 mov eax,5
00401005: C3 ret
?x@@YAHXZ:
00401010: B8 01 00 00 00 mov eax,1
00401015: C3 ret
_main:
00401020: 33 C0 xor eax,eax
00401022: C3 ret
?z@@YAHXZ:
00401030: B8 03 00 00 00 mov eax,3
00401035: C3 ret
?y@@YAHXZ:
00401040: B8 02 00 00 00 mov eax,2
00401045: C3 ret
'''
if (expected_disasm_subdirs_mixed not in
GetDisasm('test_ordering_subdirs_mixed.exe')):
print GetDisasm('test_ordering_subdirs_mixed.exe')
test.fail_test()
test.pass_test()
| agpl-3.0 | 59,218,725,993,339,760 | 29.277228 | 78 | 0.59189 | false |
40223133/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/sre_compile.py | 630 | 16898 | #
# Secret Labs' Regular Expression Engine
#
# convert template to internal format
#
# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
import sys
import _sre
import sre_parse
from sre_constants import *
from _sre import MAXREPEAT
assert _sre.MAGIC == MAGIC, "SRE module mismatch"
if _sre.CODESIZE == 2:
MAXCODE = 65535
else:
MAXCODE = 0xFFFFFFFF
def _identityfunction(x):
return x
_LITERAL_CODES = set([LITERAL, NOT_LITERAL])
_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT])
_SUCCESS_CODES = set([SUCCESS, FAILURE])
_ASSERT_CODES = set([ASSERT, ASSERT_NOT])
def _compile(code, pattern, flags):
# internal: compile a (sub)pattern
emit = code.append
_len = len
LITERAL_CODES = _LITERAL_CODES
REPEATING_CODES = _REPEATING_CODES
SUCCESS_CODES = _SUCCESS_CODES
ASSERT_CODES = _ASSERT_CODES
for op, av in pattern:
#print('sre_compile.py:_compile:42', op, av)
#print('sre_compile.py:_compile:42', code)
if op in LITERAL_CODES:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
emit(_sre.getlower(av, flags))
else:
emit(OPCODES[op])
emit(av)
elif op is IN:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
def fixup(literal, flags=flags):
return _sre.getlower(literal, flags)
else:
emit(OPCODES[op])
fixup = _identityfunction
skip = _len(code); emit(0)
_compile_charset(av, flags, code, fixup)
code[skip] = _len(code) - skip
elif op is ANY:
if flags & SRE_FLAG_DOTALL:
emit(OPCODES[ANY_ALL])
else:
emit(OPCODES[ANY])
elif op in REPEATING_CODES:
if flags & SRE_FLAG_TEMPLATE:
raise error("internal: unsupported template operator")
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif _simple(av) and op is not REPEAT:
if op is MAX_REPEAT:
emit(OPCODES[REPEAT_ONE])
else:
emit(OPCODES[MIN_REPEAT_ONE])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
else:
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
code[skip] = _len(code) - skip
if op is MAX_REPEAT:
emit(OPCODES[MAX_UNTIL])
else:
emit(OPCODES[MIN_UNTIL])
elif op is SUBPATTERN:
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2)
# _compile_info(code, av[1], flags)
_compile(code, av[1], flags)
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2+1)
elif op in SUCCESS_CODES:
emit(OPCODES[op])
elif op in ASSERT_CODES:
emit(OPCODES[op])
skip = _len(code); emit(0)
if av[0] >= 0:
emit(0) # look ahead
else:
lo, hi = av[1].getwidth()
if lo != hi:
raise error("look-behind requires fixed-width pattern")
emit(lo) # look behind
_compile(code, av[1], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is CALL:
emit(OPCODES[op])
skip = _len(code); emit(0)
_compile(code, av, flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is AT:
emit(OPCODES[op])
if flags & SRE_FLAG_MULTILINE:
av = AT_MULTILINE.get(av, av)
if flags & SRE_FLAG_LOCALE:
av = AT_LOCALE.get(av, av)
elif flags & SRE_FLAG_UNICODE:
av = AT_UNICODE.get(av, av)
emit(ATCODES[av])
elif op is BRANCH:
emit(OPCODES[op])
tail = []
tailappend = tail.append
for av in av[1]:
skip = _len(code); emit(0)
# _compile_info(code, av, flags)
_compile(code, av, flags)
emit(OPCODES[JUMP])
tailappend(_len(code)); emit(0)
code[skip] = _len(code) - skip
emit(0) # end of branch
for tail in tail:
code[tail] = _len(code) - tail
elif op is CATEGORY:
emit(OPCODES[op])
if flags & SRE_FLAG_LOCALE:
av = CH_LOCALE[av]
elif flags & SRE_FLAG_UNICODE:
av = CH_UNICODE[av]
emit(CHCODES[av])
elif op is GROUPREF:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
else:
emit(OPCODES[op])
emit(av-1)
elif op is GROUPREF_EXISTS:
emit(OPCODES[op])
emit(av[0]-1)
skipyes = _len(code); emit(0)
_compile(code, av[1], flags)
if av[2]:
emit(OPCODES[JUMP])
skipno = _len(code); emit(0)
code[skipyes] = _len(code) - skipyes + 1
_compile(code, av[2], flags)
code[skipno] = _len(code) - skipno
else:
code[skipyes] = _len(code) - skipyes + 1
else:
raise ValueError("unsupported operand type", op)
def _compile_charset(charset, flags, code, fixup=None):
# compile charset subprogram
emit = code.append
if fixup is None:
fixup = _identityfunction
for op, av in _optimize_charset(charset, fixup):
emit(OPCODES[op])
if op is NEGATE:
pass
elif op is LITERAL:
emit(fixup(av))
elif op is RANGE:
emit(fixup(av[0]))
emit(fixup(av[1]))
elif op is CHARSET:
code.extend(av)
elif op is BIGCHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CHCODES[CH_LOCALE[av]])
elif flags & SRE_FLAG_UNICODE:
emit(CHCODES[CH_UNICODE[av]])
else:
emit(CHCODES[av])
else:
raise error("internal: unsupported set operator")
emit(OPCODES[FAILURE])
def _optimize_charset(charset, fixup):
# internal: optimize character set
out = []
outappend = out.append
charmap = [0]*256
try:
for op, av in charset:
if op is NEGATE:
outappend((op, av))
elif op is LITERAL:
charmap[fixup(av)] = 1
elif op is RANGE:
for i in range(fixup(av[0]), fixup(av[1])+1):
charmap[i] = 1
elif op is CATEGORY:
# XXX: could append to charmap tail
return charset # cannot compress
except IndexError:
# character set contains unicode characters
return _optimize_unicode(charset, fixup)
# compress character map
i = p = n = 0
runs = []
runsappend = runs.append
for c in charmap:
if c:
if n == 0:
p = i
n = n + 1
elif n:
runsappend((p, n))
n = 0
i = i + 1
if n:
runsappend((p, n))
if len(runs) <= 2:
# use literal/range
for p, n in runs:
if n == 1:
outappend((LITERAL, p))
else:
outappend((RANGE, (p, p+n-1)))
if len(out) < len(charset):
return out
else:
# use bitmap
data = _mk_bitmap(charmap)
outappend((CHARSET, data))
return out
return charset
def _mk_bitmap(bits):
data = []
dataappend = data.append
if _sre.CODESIZE == 2:
start = (1, 0)
else:
start = (1, 0)
m, v = start
for c in bits:
if c:
v = v + m
m = m + m
if m > MAXCODE:
dataappend(v)
m, v = start
return data
# To represent a big charset, first a bitmap of all characters in the
# set is constructed. Then, this bitmap is sliced into chunks of 256
# characters, duplicate chunks are eliminated, and each chunk is
# given a number. In the compiled expression, the charset is
# represented by a 16-bit word sequence, consisting of one word for
# the number of different chunks, a sequence of 256 bytes (128 words)
# of chunk numbers indexed by their original chunk position, and a
# sequence of chunks (16 words each).
# Compression is normally good: in a typical charset, large ranges of
# Unicode will be either completely excluded (e.g. if only cyrillic
# letters are to be matched), or completely included (e.g. if large
# subranges of Kanji match). These ranges will be represented by
# chunks of all one-bits or all zero-bits.
# Matching can be also done efficiently: the more significant byte of
# the Unicode character is an index into the chunk number, and the
# less significant byte is a bit index in the chunk (just like the
# CHARSET matching).
# In UCS-4 mode, the BIGCHARSET opcode still supports only subsets
# of the basic multilingual plane; an efficient representation
# for all of UTF-16 has not yet been developed. This means,
# in particular, that negated charsets cannot be represented as
# bigcharsets.
def _optimize_unicode(charset, fixup):
try:
import array
except ImportError:
return charset
charmap = [0]*65536
negate = 0
try:
for op, av in charset:
if op is NEGATE:
negate = 1
elif op is LITERAL:
charmap[fixup(av)] = 1
elif op is RANGE:
for i in range(fixup(av[0]), fixup(av[1])+1):
charmap[i] = 1
elif op is CATEGORY:
# XXX: could expand category
return charset # cannot compress
except IndexError:
# non-BMP characters; XXX now they should work
return charset
if negate:
if sys.maxunicode != 65535:
# XXX: negation does not work with big charsets
# XXX2: now they should work, but removing this will make the
# charmap 17 times bigger
return charset
for i in range(65536):
charmap[i] = not charmap[i]
comps = {}
mapping = [0]*256
block = 0
data = []
for i in range(256):
chunk = tuple(charmap[i*256:(i+1)*256])
new = comps.setdefault(chunk, block)
mapping[i] = new
if new == block:
block = block + 1
data = data + _mk_bitmap(chunk)
header = [block]
if _sre.CODESIZE == 2:
code = 'H'
else:
code = 'I'
# Convert block indices to byte array of 256 bytes
mapping = array.array('b', mapping).tobytes()
# Convert byte array to word array
mapping = array.array(code, mapping)
assert mapping.itemsize == _sre.CODESIZE
assert len(mapping) * mapping.itemsize == 256
header = header + mapping.tolist()
data[0:0] = header
return [(BIGCHARSET, data)]
def _simple(av):
# check if av is a "simple" operator
lo, hi = av[2].getwidth()
if lo == 0 and hi == MAXREPEAT:
raise error("nothing to repeat")
return lo == hi == 1 and av[2][0][0] != SUBPATTERN
def _compile_info(code, pattern, flags):
# internal: compile an info block. in the current version,
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
#print('sre_compile.py:_compile_info:370', lo, hi)
if lo == 0:
return # not worth it
# look for a literal prefix
prefix = []
prefixappend = prefix.append
prefix_skip = 0
charset = [] # not used
charsetappend = charset.append
if not (flags & SRE_FLAG_IGNORECASE):
# look for literal prefix
for op, av in pattern.data:
#print('sre_compile.py:_code:381',op,av)
if op is LITERAL:
if len(prefix) == prefix_skip:
prefix_skip = prefix_skip + 1
prefixappend(av)
elif op is SUBPATTERN and len(av[1]) == 1:
op, av = av[1][0]
if op is LITERAL:
prefixappend(av)
else:
break
else:
break
# if no prefix, look for charset prefix
if not prefix and pattern.data:
op, av = pattern.data[0]
if op is SUBPATTERN and av[1]:
op, av = av[1][0]
if op is LITERAL:
charsetappend((op, av))
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is IN:
charset = av
#print('sre_compile.py:_code:430', code)
## if prefix:
## print "*** PREFIX", prefix, prefix_skip
## if charset:
## print "*** CHARSET", charset
# add an info block
emit = code.append
emit(OPCODES[INFO])
skip = len(code); emit(0)
# literal flag
mask = 0
if prefix:
mask = SRE_INFO_PREFIX
if len(prefix) == prefix_skip == len(pattern.data):
mask = mask + SRE_INFO_LITERAL
elif charset:
mask = mask + SRE_INFO_CHARSET
emit(mask)
# pattern length
if lo < MAXCODE:
emit(lo)
else:
emit(MAXCODE)
prefix = prefix[:MAXCODE]
if hi < MAXCODE:
emit(hi)
else:
emit(0)
# add literal prefix
#print('sre_compile.py:_code:457', code)
if prefix:
emit(len(prefix)) # length
emit(prefix_skip) # skip
code.extend(prefix)
# generate overlap table
table = [-1] + ([0]*len(prefix))
for i in range(len(prefix)):
table[i+1] = table[i]+1
while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
table[i+1] = table[table[i+1]-1]+1
code.extend(table[1:]) # don't store first entry
elif charset:
_compile_charset(charset, flags, code)
code[skip] = len(code) - skip
def isstring(obj):
return isinstance(obj, (str, bytes))
def _code(p, flags):
flags = p.pattern.flags | flags
code = []
# compile info block
_compile_info(code, p, flags)
# compile the pattern
_compile(code, p.data, flags)
code.append(OPCODES[SUCCESS])
return code
def compile(p, flags=0):
# internal: convert pattern list to internal format
#print("sre_compile.py:compile:504:p", p)
if isstring(p):
pattern = p
p = sre_parse.parse(p, flags)
else:
pattern = None
#print('sre_compile.py:498:p', p)
code = _code(p, flags)
#print('sre_compile.py:501:code', code)
# print code
# XXX: <fl> get rid of this limitation!
if p.pattern.groups > 100:
raise AssertionError(
"sorry, but this version only supports 100 named groups"
)
# map in either direction
groupindex = p.pattern.groupdict
indexgroup = [None] * p.pattern.groups
for k, i in groupindex.items():
indexgroup[i] = k
return _sre.compile(
pattern, flags | p.pattern.flags, code,
p.pattern.groups-1,
groupindex, indexgroup
)
| gpl-3.0 | 5,504,392,161,521,260,000 | 30.703565 | 75 | 0.512013 | false |
pacificclimate/pdp_util | tests/test_util.py | 1 | 3182 | from urllib.parse import urlencode
from datetime import datetime
from pycds import Network, CrmpNetworkGeoserver as cng
from pdp_util.util import get_stn_list, get_clip_dates, get_extension
import pytest
from sqlalchemy import text
from webob.request import Request
def test_get_stn_list(test_session):
stns = get_stn_list(test_session, [])
assert len(stns) == 50
@pytest.mark.parametrize(
("constraints", "to_select", "expected"),
[
(
[cng.network_name == "EC_raw"],
cng.native_id,
["1046332", "1126150", "1106200"],
),
(
[
cng.max_obs_time > datetime(2000, 1, 1),
cng.min_obs_time < datetime(2000, 1, 31),
],
cng.station_id,
[
613,
813,
913,
1113,
1413,
1613,
2113,
2773,
1313,
113,
713,
1213,
2673,
2613,
413,
],
),
(
[cng.min_obs_time < datetime(1965, 1, 1)],
cng.station_id,
[13, 513, 613, 713, 813],
),
(
[cng.freq == "1-hourly"],
cng.network_name,
["FLNRO-WMB", "MoTIe", "EC_raw", "BCH", "ENV-AQN"],
),
],
)
def test_get_stn_list_with_filter(test_session, constraints, to_select, expected):
stns = get_stn_list(test_session, constraints, to_select)
assert set(expected) == set([x[0] for x in stns])
def test_single_column_select(test_session):
stns = get_stn_list(test_session, [], cng.station_id)
assert isinstance(stns[0][0], int)
def test_get_clip_dates():
sdate, edate = datetime(2000, 1, 1), datetime(2000, 1, 31)
params = {
"from-date": sdate.strftime("%Y/%m/%d"),
"to-date": edate.strftime("%Y/%m/%d"),
}
req = Request.blank("?" + urlencode(params))
rv = get_clip_dates(req.environ)
# If cliptodate is not set, then get_clip_dates ignores the dates
assert rv == (None, None)
params["cliptodate"] = "True"
req = Request.blank("?" + urlencode(params))
rv = get_clip_dates(req.environ)
assert rv == (sdate, edate)
# Does it work with just one of the two dates?
del params["from-date"]
req = Request.blank("?" + urlencode(params))
rv = get_clip_dates(req.environ)
assert rv == (None, edate)
def test_get_extension_good():
params = {"data-format": "html"}
req = Request.blank("?" + urlencode(params))
assert get_extension(req.environ) == "html"
def test_get_extension_bad():
params = {"data-format": "unsupported_extension"}
req = Request.blank("?" + urlencode(params))
assert get_extension(req.environ) == None
# data-format not in the request params
req = Request.blank("")
assert get_extension(req.environ) == None
def test_unpublished(test_session_with_unpublished):
sesh = test_session_with_unpublished
stns = get_stn_list(sesh, [Network.name == "MoSecret"])
assert len(stns) == 0
| gpl-3.0 | 2,471,189,183,973,981,700 | 27.159292 | 82 | 0.543997 | false |
numpy/numpy | numpy/distutils/intelccompiler.py | 5 | 4234 | import platform
from distutils.unixccompiler import UnixCCompiler
from numpy.distutils.exec_command import find_executable
from numpy.distutils.ccompiler import simple_version_match
if platform.system() == 'Windows':
from numpy.distutils.msvc9compiler import MSVCCompiler
class IntelCCompiler(UnixCCompiler):
"""A modified Intel compiler compatible with a GCC-built Python."""
compiler_type = 'intel'
cc_exe = 'icc'
cc_args = 'fPIC'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
v = self.get_version()
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
self.cc_exe = ('icc -fPIC -fp-model strict -O3 '
'-fomit-frame-pointer -{}').format(mpopt)
compiler = self.cc_exe
if platform.system() == 'Darwin':
shared_flag = '-Wl,-undefined,dynamic_lookup'
else:
shared_flag = '-shared'
self.set_executables(compiler=compiler,
compiler_so=compiler,
compiler_cxx=compiler,
archiver='xiar' + ' cru',
linker_exe=compiler + ' -shared-intel',
linker_so=compiler + ' ' + shared_flag +
' -shared-intel')
class IntelItaniumCCompiler(IntelCCompiler):
compiler_type = 'intele'
# On Itanium, the Intel Compiler used to be called ecc, let's search for
# it (now it's also icc, so ecc is last in the search).
for cc_exe in map(find_executable, ['icc', 'ecc']):
if cc_exe:
break
class IntelEM64TCCompiler(UnixCCompiler):
"""
A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python.
"""
compiler_type = 'intelem'
cc_exe = 'icc -m64'
cc_args = '-fPIC'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
v = self.get_version()
mpopt = 'openmp' if v and v < '15' else 'qopenmp'
self.cc_exe = ('icc -std=c99 -m64 -fPIC -fp-model strict -O3 '
'-fomit-frame-pointer -{}').format(mpopt)
compiler = self.cc_exe
if platform.system() == 'Darwin':
shared_flag = '-Wl,-undefined,dynamic_lookup'
else:
shared_flag = '-shared'
self.set_executables(compiler=compiler,
compiler_so=compiler,
compiler_cxx=compiler,
archiver='xiar' + ' cru',
linker_exe=compiler + ' -shared-intel',
linker_so=compiler + ' ' + shared_flag +
' -shared-intel')
if platform.system() == 'Windows':
class IntelCCompilerW(MSVCCompiler):
"""
A modified Intel compiler compatible with an MSVC-built Python.
"""
compiler_type = 'intelw'
compiler_cxx = 'icl'
def __init__(self, verbose=0, dry_run=0, force=0):
MSVCCompiler.__init__(self, verbose, dry_run, force)
version_match = simple_version_match(start=r'Intel\(R\).*?32,')
self.__version = version_match
def initialize(self, plat_name=None):
MSVCCompiler.initialize(self, plat_name)
self.cc = self.find_exe('icl.exe')
self.lib = self.find_exe('xilib')
self.linker = self.find_exe('xilink')
self.compile_options = ['/nologo', '/O3', '/MD', '/W3',
'/Qstd=c99']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Qstd=c99', '/Z7', '/D_DEBUG']
class IntelEM64TCCompilerW(IntelCCompilerW):
"""
A modified Intel x86_64 compiler compatible with
a 64bit MSVC-built Python.
"""
compiler_type = 'intelemw'
def __init__(self, verbose=0, dry_run=0, force=0):
MSVCCompiler.__init__(self, verbose, dry_run, force)
version_match = simple_version_match(start=r'Intel\(R\).*?64,')
self.__version = version_match
| bsd-3-clause | 2,835,375,531,588,375,600 | 37.144144 | 78 | 0.541096 | false |
feliperfranca/django-nonrel-example | django/contrib/admindocs/utils.py | 314 | 3796 | "Misc. utility functions/classes for admin documentation generator."
import re
from email.Parser import HeaderParser
from email.Errors import HeaderParseError
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_str
try:
import docutils.core
import docutils.nodes
import docutils.parsers.rst.roles
except ImportError:
docutils_is_available = False
else:
docutils_is_available = True
def trim_docstring(docstring):
"""
Uniformly trims leading/trailing whitespace from docstrings.
Based on http://www.python.org/peps/pep-0257.html#handling-docstring-indentation
"""
if not docstring or not docstring.strip():
return ''
# Convert tabs to spaces and split into lines
lines = docstring.expandtabs().splitlines()
indent = min([len(line) - len(line.lstrip()) for line in lines if line.lstrip()])
trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]]
return "\n".join(trimmed).strip()
def parse_docstring(docstring):
"""
Parse out the parts of a docstring. Returns (title, body, metadata).
"""
docstring = trim_docstring(docstring)
parts = re.split(r'\n{2,}', docstring)
title = parts[0]
if len(parts) == 1:
body = ''
metadata = {}
else:
parser = HeaderParser()
try:
metadata = parser.parsestr(parts[-1])
except HeaderParseError:
metadata = {}
body = "\n\n".join(parts[1:])
else:
metadata = dict(metadata.items())
if metadata:
body = "\n\n".join(parts[1:-1])
else:
body = "\n\n".join(parts[1:])
return title, body, metadata
def parse_rst(text, default_reference_context, thing_being_parsed=None):
"""
Convert the string from reST to an XHTML fragment.
"""
overrides = {
'doctitle_xform' : True,
'inital_header_level' : 3,
"default_reference_context" : default_reference_context,
"link_base" : reverse('django-admindocs-docroot').rstrip('/')
}
if thing_being_parsed:
thing_being_parsed = smart_str("<%s>" % thing_being_parsed)
parts = docutils.core.publish_parts(text, source_path=thing_being_parsed,
destination_path=None, writer_name='html',
settings_overrides=overrides)
return mark_safe(parts['fragment'])
#
# reST roles
#
ROLES = {
'model' : '%s/models/%s/',
'view' : '%s/views/%s/',
'template' : '%s/templates/%s/',
'filter' : '%s/filters/#%s',
'tag' : '%s/tags/#%s',
}
def create_reference_role(rolename, urlbase):
def _role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None: options = {}
if content is None: content = []
node = docutils.nodes.reference(rawtext, text, refuri=(urlbase % (inliner.document.settings.link_base, text.lower())), **options)
return [node], []
docutils.parsers.rst.roles.register_canonical_role(rolename, _role)
def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None: options = {}
if content is None: content = []
context = inliner.document.settings.default_reference_context
node = docutils.nodes.reference(rawtext, text, refuri=(ROLES[context] % (inliner.document.settings.link_base, text.lower())), **options)
return [node], []
if docutils_is_available:
docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role)
docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE = 'cmsreference'
for name, urlbase in ROLES.items():
create_reference_role(name, urlbase)
| bsd-3-clause | -3,556,754,438,577,526,300 | 35.152381 | 140 | 0.643572 | false |
381426068/MissionPlanner | Lib/site-packages/numpy/core/tests/test_scalarmath.py | 53 | 14311 | import sys
from numpy.testing import *
import numpy as np
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
real_types = [ np.byte, np.ubyte, np.short, np.ushort,
np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble ]
complex_types = [ np.csingle, np.cdouble, np.clongdouble ]
# This compares scalarmath against ufuncs.
class TestTypes(TestCase):
def test_types(self, level=1):
for atype in types:
a = atype(1)
assert a == 1, "error with %r: got %r" % (atype,a)
def test_type_add(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
for l, btype in enumerate(types):
valb = btype(1)
val2 = np.array([1],dtype=btype)
val = vala + valb
valo = val1 + val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_subtract(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
for l, btype in enumerate(types):
valb = btype(1)
val2 = np.array([1],dtype=btype)
val = vala - valb
valo = val1 - val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_multiply(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
for l, btype in enumerate(types):
valb = btype(1)
val2 = np.array([1],dtype=btype)
val = vala * valb
valo = val1 * val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_divide(self, level=1):
# Choose more interesting operands for this operation.
# list of types
for k, atype in enumerate(types):
vala = atype(6)
val1 = np.array([6],dtype=atype)
for l, btype in enumerate(types):
valb = btype(2)
val2 = np.array([2],dtype=btype)
val = vala / valb
valo = val1 / val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_remainder(self, level=1):
# Choose more interesting operands for this operation.
# list of types
for k, atype in enumerate(types):
vala = atype(6)
val1 = np.array([6],dtype=atype)
for l, btype in enumerate(types):
valb = btype(2)
val2 = np.array([2],dtype=btype)
try:
val = vala % valb
valo = val1 % val2
if hasattr(val, "dtype") and hasattr(valo, "dtype"):
# Invalid operands in IronPython don't have dtype attributes.
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
except TypeError, e:
# Some combos just don't work, like byte % complex. We
# just don't worry about classifying the cases here, and
# instead just ignore these types of problems. <grin>
pass
def test_type_negative(self, level=1):
# Uhh, "negate" ??? Or maybe "unary minus".
# But shouldn't this fail for unsigned types? Hmmm...
# list of types
# NOTE: unary operators don't require the double loop over types,
# since there's only one operand.
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
val = -vala
valo = -val1
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d)" % (k)
def test_type_positive(self, level=1):
# Otherwise known as "unary plus".
# list of types
# NOTE: unary operators don't require the double loop over types,
# since there's only one operand.
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
val = +vala
valo = +val1
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d)" % (k)
def test_type_power(self, level=1):
# Choose more interesting operands for this operation.
# list of types
for k, atype in enumerate(types):
vala = atype(2)
val1 = np.array([2],dtype=atype)
# Skip the boolean types
if vala.dtype.char == '?': continue
for l, btype in enumerate(types):
valb = btype(3)
val2 = np.array([3],dtype=btype)
# Skip the boolean types
if valb.dtype.char == '?': continue
val = vala ** valb
valo = val1 ** val2
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d,%d)" % (k,l)
def test_type_absolute(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(-3)
val1 = np.array([-3],dtype=atype)
val = abs(vala)
valo = abs(val1)
assert val.dtype.num == valo.dtype.num and \
val.dtype.char == valo.dtype.char, \
"error with (%d)" % (k)
# I guess we can't really test for the right result here, unless
# we can figure out how to exclude the unsigned types.
#assert val == atype(3) and valo == atype(3), \
# "error taking absolute value (%d)." % k
def test_type_hex(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
try:
val = hex(vala)
valo = hex(val1)
except:
#print "Can't hexify ", k
pass
#assert val.dtype.num == valo.dtype.num and \
# val.dtype.char == valo.dtype.char, \
# "error with (%d)" % (k)
# We can't demand equivalent repr's either.
#assert val == valo, "Trouble with hex (%d)" % k
# So there's not really so much we can check here, beyond simply
# that the code executes without throwing exceptions.
def test_type_oct(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
try:
val = oct(vala)
valo = oct(val1)
except:
#print "Can't hexify ", k
pass
#assert val.dtype.num == valo.dtype.num and \
# val.dtype.char == valo.dtype.char, \
# "error with (%d)" % (k)
# We can't demand equivalent repr's either.
#assert val == valo, "Trouble with hex (%d)" % k
# So there's not really so much we can check here, beyond simply
# that the code executes without throwing exceptions.
def test_type_float(self, level=1):
# list of types
for k, atype in enumerate(types):
vala = atype(3)
val1 = np.array([3],dtype=atype)
try:
val = float(vala)
valo = float(val1)
except TypeError, e:
# The complex type, for example, can't be cast to float, so
# just skip it.
continue
assert val == valo, "Trouble with float (%d)" % k
# Skip over bool.
if vala.dtype.char == '?': continue
assert val == 3 and valo == 3, "Trouble with float (%d)" % k
def test_gentype_nonzero( self ):
# This exercises gentype_nonzero, and thus may point the path to
# executing other gentype_* functions.
z = np.clongdouble( 4 + 5j )
r = np.nonzero( z )
@dec.skipif(sys.platform == 'cli',
"Construction of arrays by passing sequences to scalar constructors is not supported on IronPython")
def test_type_create(self, level=1):
for k, atype in enumerate(types):
a = np.array([1,2,3],atype)
b = atype([1,2,3])
assert_equal(a,b)
def test_scalartype_ops( self ):
int_types = [ np.byte, np.ubyte, np.short, np.ushort,
np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong ]
for t in int_types:
x = t(7)
y = x ^ x
assert y == 0, "xor on scalartype"
x = t(1)
y = x << 1
assert y == 2, "left shift on scalartype"
# NOTE: y came back not the same type as t, so a right shift on
# y doesn't exercise the <t>_rshift function. To get the
# <t>_rshift, we have to go back to a <t> instance.
y = t(2)
z = y >> 1
assert z == 1, "right shift on scalartype"
assert np.invert(x) != 1, "invert on scalartype"
assert np.invert( np.invert( x ) ) == x, "invert on scalartype"
y = t(0)
z = x & y
assert z == 0, "bitwise and on scalartype"
z = x | y
assert z == 1, "bitwise or on scalartype"
assert z, "nonzero on scalartype"
x = t(0)
assert ~x, "Invert on numpy scalar types"
#x = t(5)
#y = x // 2
#assert y, "nonzero on numpy scalar types"
#assert y == 2, "floor divide on numpy scalar types"
for t in real_types:
x = t(5)
assert x, "nonzero on scalartype"
y = x // 2.
assert y == 2, "floor divide on scalartype"
y = t(2)
n,r = divmod( x, y )
assert n == t(2), "divmod on scalartype"
assert r == t(1), "divmod on scalartype"
for t in complex_types:
x = t(5)
assert x, "nonzero on complex scalartype"
y = x // 2
assert y == 2, "Floor divide on complex scalartype"
from operator import itruediv
itruediv( z, x )
for t in types[1:]:
z = t(5)
x = t(2)
itruediv( z, x )
x = t(5)
y = np.long(x)
assert y == x, "Cast scalartype to long"
y = np.int(x)
assert y == x, "Cast scalartype to int"
class TestPower(TestCase):
def test_small_types(self):
for t in [np.int8, np.int16]:
a = t(3)
b = a ** 4
assert b == 81, "error with %r: got %r" % (t,b)
def test_large_types(self):
for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
a = t(51)
b = a ** 4
msg = "error with %r: got %r" % (t,b)
if np.issubdtype(t, np.integer):
assert b == 6765201, msg
else:
assert_almost_equal(b, 6765201, err_msg=msg)
class TestConversion(TestCase):
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
for T in [None, np.float64, np.int64]:
a = np.array(l,dtype=T)
assert_equal(map(int,a), li)
a = np.array(l[:3], dtype=np.uint64)
assert_equal(map(int,a), li[:3])
#class TestRepr(TestCase):
# def test_repr(self):
# for t in types:
# val = t(1197346475.0137341)
# val_repr = repr(val)
# val2 = eval(val_repr)
# assert_equal( val, val2 )
class TestRepr(TestCase):
def _test_type_repr(self, t):
finfo=np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm','small norm']:
# Values from http://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes,dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7-(last_fraction_bit_idx % 8)
constr[byte] = 1<<bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7-(last_exponent_bit_idx % 8)
constr[byte] = 1<<bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)
def test_float_repr(self):
# long double test cannot work, because eval goes through a python
# float
for t in [np.float32, np.float64]:
yield test_float_repr, t
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 | 3,984,565,927,031,687,000 | 33.484337 | 116 | 0.488086 | false |
da2ce7/cjdns | node_build/dependencies/libuv/build/gyp/test/lib/TestCmd.py | 330 | 52544 | """
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing
of executable commands and scripts (in any language, not just Python),
especially commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd
module manages and cleans up one or more temporary workspace
directories, and provides methods for creating files and directories in
those workspace directories from in-line data, here-documents), allowing
tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
import TestCmd
test = TestCmd.TestCmd()
There are a bunch of keyword arguments available at instantiation:
test = TestCmd.TestCmd(description = 'string',
program = 'program_or_script_to_test',
interpreter = 'script_interpreter',
workdir = 'prefix',
subdir = 'subdir',
verbose = Boolean,
match = default_match_function,
diff = default_diff_function,
combine = Boolean)
There are a bunch of methods that let you do different things:
test.verbose_set(1)
test.description_set('string')
test.program_set('program_or_script_to_test')
test.interpreter_set('script_interpreter')
test.interpreter_set(['script_interpreter', 'arg'])
test.workdir_set('prefix')
test.workdir_set('')
test.workpath('file')
test.workpath('subdir', 'file')
test.subdir('subdir', ...)
test.rmdir('subdir', ...)
test.write('file', "contents\n")
test.write(['subdir', 'file'], "contents\n")
test.read('file')
test.read(['subdir', 'file'])
test.read('file', mode)
test.read(['subdir', 'file'], mode)
test.writable('dir', 1)
test.writable('dir', None)
test.preserve(condition, ...)
test.cleanup(condition)
test.command_args(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program')
test.run(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
chdir = 'directory_to_chdir_to',
stdin = 'input to feed to the program\n')
universal_newlines = True)
p = test.start(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
universal_newlines = None)
test.finish(self, p)
test.pass_test()
test.pass_test(condition)
test.pass_test(condition, function)
test.fail_test()
test.fail_test(condition)
test.fail_test(condition, function)
test.fail_test(condition, function, skip)
test.no_result()
test.no_result(condition)
test.no_result(condition, function)
test.no_result(condition, function, skip)
test.stdout()
test.stdout(run)
test.stderr()
test.stderr(run)
test.symlink(target, link)
test.banner(string)
test.banner(string, width)
test.diff(actual, expected)
test.match(actual, expected)
test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
test.match_exact(["actual 1\n", "actual 2\n"],
["expected 1\n", "expected 2\n"])
test.match_re("actual 1\nactual 2\n", regex_string)
test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
test.match_re_dotall("actual 1\nactual 2\n", regex_string)
test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
test.tempdir()
test.tempdir('temporary-directory')
test.sleep()
test.sleep(seconds)
test.where_is('foo')
test.where_is('foo', 'PATH1:PATH2')
test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
test.unlink('file')
test.unlink('subdir', 'file')
The TestCmd module provides pass_test(), fail_test(), and no_result()
unbound functions that report test results for use with the Aegis change
management system. These methods terminate the test immediately,
reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
status 0 (success), 1 or 2 respectively. This allows for a distinction
between an actual failed test and a test that could not be properly
evaluated because of an external condition (such as a full file system
or incorrect permissions).
import TestCmd
TestCmd.pass_test()
TestCmd.pass_test(condition)
TestCmd.pass_test(condition, function)
TestCmd.fail_test()
TestCmd.fail_test(condition)
TestCmd.fail_test(condition, function)
TestCmd.fail_test(condition, function, skip)
TestCmd.no_result()
TestCmd.no_result(condition)
TestCmd.no_result(condition, function)
TestCmd.no_result(condition, function, skip)
The TestCmd module also provides unbound functions that handle matching
in the same way as the match_*() methods described above.
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_exact)
test = TestCmd.TestCmd(match = TestCmd.match_re)
test = TestCmd.TestCmd(match = TestCmd.match_re_dotall)
The TestCmd module provides unbound functions that can be used for the
"diff" argument to TestCmd.TestCmd instantiation:
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_re,
diff = TestCmd.diff_re)
test = TestCmd.TestCmd(diff = TestCmd.simple_diff)
The "diff" argument can also be used with standard difflib functions:
import difflib
test = TestCmd.TestCmd(diff = difflib.context_diff)
test = TestCmd.TestCmd(diff = difflib.unified_diff)
Lastly, the where_is() method also exists in an unbound function
version.
import TestCmd
TestCmd.where_is('foo')
TestCmd.where_is('foo', 'PATH1:PATH2')
TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
"""
# Copyright 2000-2010 Steven Knight
# This module is free software, and you may redistribute it and/or modify
# it under the same terms as Python itself, so long as this copyright message
# and disclaimer are retained in their original form.
#
# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCmd.py 0.37.D001 2010/01/11 16:55:50 knight"
__version__ = "0.37"
import errno
import os
import os.path
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import traceback
import types
import UserList
__all__ = [
'diff_re',
'fail_test',
'no_result',
'pass_test',
'match_exact',
'match_re',
'match_re_dotall',
'python_executable',
'TestCmd'
]
try:
import difflib
except ImportError:
__all__.append('simple_diff')
def is_List(e):
return type(e) is types.ListType \
or isinstance(e, UserList.UserList)
try:
from UserString import UserString
except ImportError:
class UserString:
pass
if hasattr(types, 'UnicodeType'):
def is_String(e):
return type(e) is types.StringType \
or type(e) is types.UnicodeType \
or isinstance(e, UserString)
else:
def is_String(e):
return type(e) is types.StringType or isinstance(e, UserString)
tempfile.template = 'testcmd.'
if os.name in ('posix', 'nt'):
tempfile.template = 'testcmd.' + str(os.getpid()) + '.'
else:
tempfile.template = 'testcmd.'
re_space = re.compile('\s')
_Cleanup = []
_chain_to_exitfunc = None
def _clean():
global _Cleanup
cleanlist = filter(None, _Cleanup)
del _Cleanup[:]
cleanlist.reverse()
for test in cleanlist:
test.cleanup()
if _chain_to_exitfunc:
_chain_to_exitfunc()
try:
import atexit
except ImportError:
# TODO(1.5): atexit requires python 2.0, so chain sys.exitfunc
try:
_chain_to_exitfunc = sys.exitfunc
except AttributeError:
pass
sys.exitfunc = _clean
else:
atexit.register(_clean)
try:
zip
except NameError:
def zip(*lists):
result = []
for i in xrange(min(map(len, lists))):
result.append(tuple(map(lambda l, i=i: l[i], lists)))
return result
class Collector:
def __init__(self, top):
self.entries = [top]
def __call__(self, arg, dirname, names):
pathjoin = lambda n, d=dirname: os.path.join(d, n)
self.entries.extend(map(pathjoin, names))
def _caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name in ("?", "<module>"):
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self = None, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED
and exits with a status of 1. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at)
sys.exit(1)
def no_result(self = None, condition = 1, function = None, skip = 0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test
and exits with a status of 2. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
if os.environ.get('TESTCMD_DEBUG_SKIPS'):
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
else:
sys.stderr.write("NO RESULT\n")
sys.exit(2)
def pass_test(self = None, condition = 1, function = None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test
and exits with a status of 0. If a condition argument is supplied,
the test passes only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
def match_exact(lines = None, matches = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(matches):
matches = string.split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return
return 1
def match_re(lines = None, res = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(res):
res = string.split(res, "\n")
if len(lines) != len(res):
return
for i in range(len(lines)):
s = "^" + res[i] + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(lines[i]):
return
return 1
def match_re_dotall(lines = None, res = None):
"""
"""
if not type(lines) is type(""):
lines = string.join(lines, "\n")
if not type(res) is type(""):
res = string.join(res, "\n")
s = "^" + res + "$"
try:
expr = re.compile(s, re.DOTALL)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if expr.match(lines):
return 1
try:
import difflib
except ImportError:
pass
else:
def simple_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A function with the same calling signature as difflib.context_diff
(diff -c) and difflib.unified_diff (diff -u) but which prints
output like the simple, unadorned 'diff" command.
"""
sm = difflib.SequenceMatcher(None, a, b)
def comma(x1, x2):
return x1+1 == x2 and str(x2) or '%s,%s' % (x1+1, x2)
result = []
for op, a1, a2, b1, b2 in sm.get_opcodes():
if op == 'delete':
result.append("%sd%d" % (comma(a1, a2), b1))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
elif op == 'insert':
result.append("%da%s" % (a1, comma(b1, b2)))
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
elif op == 'replace':
result.append("%sc%s" % (comma(a1, a2), comma(b1, b2)))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
result.append('---')
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
return result
def diff_re(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A simple "diff" of two sets of lines when the expected lines
are regular expressions. This is a really dumb thing that
just compares each line in turn, so it doesn't look for
chunks of matching lines and the like--but at least it lets
you know exactly which line first didn't compare correctl...
"""
result = []
diff = len(a) - len(b)
if diff < 0:
a = a + ['']*(-diff)
elif diff > 0:
b = b + ['']*diff
i = 0
for aline, bline in zip(a, b):
s = "^" + aline + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(bline):
result.append("%sc%s" % (i+1, i+1))
result.append('< ' + repr(a[i]))
result.append('---')
result.append('> ' + repr(b[i]))
i = i+1
return result
if os.name == 'java':
python_executable = os.path.join(sys.prefix, 'jython')
else:
python_executable = sys.executable
if sys.platform == 'win32':
default_sleep_seconds = 2
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
if pathext is None:
pathext = os.environ['PATHEXT']
if is_String(pathext):
pathext = string.split(pathext, os.pathsep)
for ext in pathext:
if string.lower(ext) == string.lower(file[-len(ext):]):
pathext = ['']
break
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
else:
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
for dir in path:
f = os.path.join(dir, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
return f
return None
default_sleep_seconds = 1
try:
import subprocess
except ImportError:
# The subprocess module doesn't exist in this version of Python,
# so we're going to cobble up something that looks just enough
# like its API for our purposes below.
import new
subprocess = new.module('subprocess')
subprocess.PIPE = 'PIPE'
subprocess.STDOUT = 'STDOUT'
subprocess.mswindows = (sys.platform == 'win32')
try:
import popen2
popen2.Popen3
except AttributeError:
class Popen3:
universal_newlines = 1
def __init__(self, command, **kw):
if sys.platform == 'win32' and command[0] == '"':
command = '"' + command + '"'
(stdin, stdout, stderr) = os.popen3(' ' + command)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def close_output(self):
self.stdout.close()
self.resultcode = self.stderr.close()
def wait(self):
resultcode = self.resultcode
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
else:
try:
popen2.Popen4
except AttributeError:
# A cribbed Popen4 class, with some retrofitted code from
# the Python 1.5 Popen3 class methods to do certain things
# by hand.
class Popen4(popen2.Popen3):
childerr = None
def __init__(self, cmd, bufsize=-1):
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
self.pid = os.fork()
if self.pid == 0:
# Child
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
os.dup2(c2pwrite, 2)
for i in range(3, popen2.MAXFD):
try:
os.close(i)
except: pass
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
# Shouldn't come here, I guess
os._exit(1)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
popen2._active.append(self)
popen2.Popen4 = Popen4
class Popen3(popen2.Popen3, popen2.Popen4):
universal_newlines = 1
def __init__(self, command, **kw):
if kw.get('stderr') == 'STDOUT':
apply(popen2.Popen4.__init__, (self, command, 1))
else:
apply(popen2.Popen3.__init__, (self, command, 1))
self.stdin = self.tochild
self.stdout = self.fromchild
self.stderr = self.childerr
def wait(self, *args, **kw):
resultcode = apply(popen2.Popen3.wait, (self,)+args, kw)
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
subprocess.Popen = Popen3
# From Josiah Carlson,
# ASPN : Python Cookbook : Module to allow Asynchronous subprocess use on Windows and Posix platforms
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554
PIPE = subprocess.PIPE
if subprocess.mswindows:
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
import msvcrt
else:
import select
import fcntl
try: fcntl.F_GETFL
except AttributeError: fcntl.F_GETFL = 3
try: fcntl.F_SETFL
except AttributeError: fcntl.F_SETFL = 4
class Popen(subprocess.Popen):
def recv(self, maxsize=None):
return self._recv('stdout', maxsize)
def recv_err(self, maxsize=None):
return self._recv('stderr', maxsize)
def send_recv(self, input='', maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if subprocess.mswindows:
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
except ValueError:
return self._close('stdin')
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
#if self.universal_newlines:
# read = self._translate_newlines(read)
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
except OSError, why:
if why[0] == errno.EPIPE: #broken pipe
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
except TypeError:
flags = None
else:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags| os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ''
r = conn.read(maxsize)
if not r:
return self._close(which)
#if self.universal_newlines:
# r = self._translate_newlines(r)
return r
finally:
if not conn.closed and not flags is None:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
disconnect_message = "Other end disconnected!"
def recv_some(p, t=.1, e=1, tr=5, stderr=0):
if tr < 1:
tr = 1
x = time.time()+t
y = []
r = ''
pr = p.recv
if stderr:
pr = p.recv_err
while time.time() < x or r:
r = pr()
if r is None:
if e:
raise Exception(disconnect_message)
else:
break
elif r:
y.append(r)
else:
time.sleep(max((x-time.time())/tr, 0))
return ''.join(y)
# TODO(3.0: rewrite to use memoryview()
def send_all(p, data):
while len(data):
sent = p.send(data)
if sent is None:
raise Exception(disconnect_message)
data = buffer(data, sent)
try:
object
except NameError:
class object:
pass
class TestCmd(object):
"""Class TestCmd
"""
def __init__(self, description = None,
program = None,
interpreter = None,
workdir = None,
subdir = None,
verbose = None,
match = None,
diff = None,
combine = 0,
universal_newlines = 1):
self._cwd = os.getcwd()
self.description_set(description)
self.program_set(program)
self.interpreter_set(interpreter)
if verbose is None:
try:
verbose = max( 0, int(os.environ.get('TESTCMD_VERBOSE', 0)) )
except ValueError:
verbose = 0
self.verbose_set(verbose)
self.combine = combine
self.universal_newlines = universal_newlines
if match is not None:
self.match_function = match
else:
self.match_function = match_re
if diff is not None:
self.diff_function = diff
else:
try:
difflib
except NameError:
pass
else:
self.diff_function = simple_diff
#self.diff_function = difflib.context_diff
#self.diff_function = difflib.unified_diff
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
if os.environ.has_key('PRESERVE') and not os.environ['PRESERVE'] is '':
self._preserve['pass_test'] = os.environ['PRESERVE']
self._preserve['fail_test'] = os.environ['PRESERVE']
self._preserve['no_result'] = os.environ['PRESERVE']
else:
try:
self._preserve['pass_test'] = os.environ['PRESERVE_PASS']
except KeyError:
pass
try:
self._preserve['fail_test'] = os.environ['PRESERVE_FAIL']
except KeyError:
pass
try:
self._preserve['no_result'] = os.environ['PRESERVE_NO_RESULT']
except KeyError:
pass
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
banner_char = '='
banner_width = 80
def banner(self, s, width=None):
if width is None:
width = self.banner_width
return s + self.banner_char * (width - len(s))
if os.name == 'posix':
def escape(self, arg):
"escape shell special characters"
slash = '\\'
special = '"$'
arg = string.replace(arg, slash, slash+slash)
for c in special:
arg = string.replace(arg, c, slash+c)
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
else:
# Windows does not allow special characters in file names
# anyway, so no need for an escape function, we will just quote
# the arg.
def escape(self, arg):
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
def canonicalize(self, path):
if is_List(path):
path = apply(os.path.join, tuple(path))
if not os.path.isabs(path):
path = os.path.join(self.workdir, path)
return path
def chmod(self, path, mode):
"""Changes permissions on the specified file or directory
path name."""
path = self.canonicalize(path)
os.chmod(path, mode)
def cleanup(self, condition = None):
"""Removes any temporary working directories for the specified
TestCmd environment. If the environment variable PRESERVE was
set when the TestCmd environment was created, temporary working
directories are not removed. If any of the environment variables
PRESERVE_PASS, PRESERVE_FAIL, or PRESERVE_NO_RESULT were set
when the TestCmd environment was created, then temporary working
directories are not removed if the test passed, failed, or had
no result, respectively. Temporary working directories are also
preserved for conditions specified via the preserve method.
Typically, this method is not called directly, but is used when
the script exits to clean up temporary working directories as
appropriate for the exit status.
"""
if not self._dirlist:
return
os.chdir(self._cwd)
self.workdir = None
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print "Preserved directory", dir
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors = 1)
self._dirlist = []
try:
global _Cleanup
_Cleanup.remove(self)
except (AttributeError, ValueError):
pass
def command_args(self, program = None,
interpreter = None,
arguments = None):
if program:
if type(program) == type('') and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
else:
program = self.program
if not interpreter:
interpreter = self.interpreter
if not type(program) in [type([]), type(())]:
program = [program]
cmd = list(program)
if interpreter:
if not type(interpreter) in [type([]), type(())]:
interpreter = [interpreter]
cmd = list(interpreter) + cmd
if arguments:
if type(arguments) == type(''):
arguments = string.split(arguments)
cmd.extend(arguments)
return cmd
def description_set(self, description):
"""Set the description of the functionality being tested.
"""
self.description = description
try:
difflib
except NameError:
def diff(self, a, b, name, *args, **kw):
print self.banner('Expected %s' % name)
print a
print self.banner('Actual %s' % name)
print b
else:
def diff(self, a, b, name, *args, **kw):
print self.banner(name)
args = (a.splitlines(), b.splitlines()) + args
lines = apply(self.diff_function, args, kw)
for l in lines:
print l
def fail_test(self, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
"""
if not condition:
return
self.condition = 'fail_test'
fail_test(self = self,
condition = condition,
function = function,
skip = skip)
def interpreter_set(self, interpreter):
"""Set the program to be used to interpret the program
under test as a script.
"""
self.interpreter = interpreter
def match(self, lines, matches):
"""Compare actual and expected file contents.
"""
return self.match_function(lines, matches)
def match_exact(self, lines, matches):
"""Compare actual and expected file contents.
"""
return match_exact(lines, matches)
def match_re(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re(lines, res)
def match_re_dotall(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re_dotall(lines, res)
def no_result(self, condition = 1, function = None, skip = 0):
"""Report that the test could not be run.
"""
if not condition:
return
self.condition = 'no_result'
no_result(self = self,
condition = condition,
function = function,
skip = skip)
def pass_test(self, condition = 1, function = None):
"""Cause the test to pass.
"""
if not condition:
return
self.condition = 'pass_test'
pass_test(self = self, condition = condition, function = function)
def preserve(self, *conditions):
"""Arrange for the temporary working directories for the
specified TestCmd environment to be preserved for one or more
conditions. If no conditions are specified, arranges for
the temporary working directories to be preserved for all
conditions.
"""
if conditions is ():
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program):
"""Set the executable program or script to be tested.
"""
if program and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
self.program = program
def read(self, file, mode = 'rb'):
"""Reads and returns the contents of the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name. The I/O mode for the file may
be specified; it must begin with an 'r'. The default is
'rb' (binary read).
"""
file = self.canonicalize(file)
if mode[0] != 'r':
raise ValueError, "mode must begin with 'r'"
with open(file, mode) as f:
result = f.read()
return result
def rmdir(self, dir):
"""Removes the specified dir name.
The dir name may be a list, in which case the elements are
concatenated with the os.path.join() method. The dir is
assumed to be under the temporary working directory unless it
is an absolute path name.
The dir must be empty.
"""
dir = self.canonicalize(dir)
os.rmdir(dir)
def start(self, program = None,
interpreter = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
cmd = self.command_args(program, interpreter, arguments)
cmd_string = string.join(map(self.escape, cmd), ' ')
if self.verbose:
sys.stderr.write(cmd_string + "\n")
if universal_newlines is None:
universal_newlines = self.universal_newlines
# On Windows, if we make stdin a pipe when we plan to send
# no input, and the test program exits before
# Popen calls msvcrt.open_osfhandle, that call will fail.
# So don't use a pipe for stdin if we don't need one.
stdin = kw.get('stdin', None)
if stdin is not None:
stdin = subprocess.PIPE
combine = kw.get('combine', self.combine)
if combine:
stderr_value = subprocess.STDOUT
else:
stderr_value = subprocess.PIPE
return Popen(cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=stderr_value,
universal_newlines=universal_newlines)
def finish(self, popen, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument, recording the exit status,
standard output and error output.
"""
popen.stdin.close()
self.status = popen.wait()
if not self.status:
self.status = 0
self._stdout.append(popen.stdout.read())
if popen.stderr:
stderr = popen.stderr.read()
else:
stderr = ''
self._stderr.append(stderr)
def run(self, program = None,
interpreter = None,
arguments = None,
chdir = None,
stdin = None,
universal_newlines = None):
"""Runs a test of the program or script for the test
environment. Standard output and error output are saved for
future retrieval via the stdout() and stderr() methods.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
if chdir:
oldcwd = os.getcwd()
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
os.chdir(chdir)
p = self.start(program,
interpreter,
arguments,
universal_newlines,
stdin=stdin)
if stdin:
if is_List(stdin):
for line in stdin:
p.stdin.write(line)
else:
p.stdin.write(stdin)
p.stdin.close()
out = p.stdout.read()
if p.stderr is None:
err = ''
else:
err = p.stderr.read()
try:
close_output = p.close_output
except AttributeError:
p.stdout.close()
if not p.stderr is None:
p.stderr.close()
else:
close_output()
self._stdout.append(out)
self._stderr.append(err)
self.status = p.wait()
if not self.status:
self.status = 0
if chdir:
os.chdir(oldcwd)
if self.verbose >= 2:
write = sys.stdout.write
write('============ STATUS: %d\n' % self.status)
out = self.stdout()
if out or self.verbose >= 3:
write('============ BEGIN STDOUT (len=%d):\n' % len(out))
write(out)
write('============ END STDOUT\n')
err = self.stderr()
if err or self.verbose >= 3:
write('============ BEGIN STDERR (len=%d)\n' % len(err))
write(err)
write('============ END STDERR\n')
def sleep(self, seconds = default_sleep_seconds):
"""Sleeps at least the specified number of seconds. If no
number is specified, sleeps at least the minimum number of
seconds necessary to advance file time stamps on the current
system. Sleeping more seconds is all right.
"""
time.sleep(seconds)
def stderr(self, run = None):
"""Returns the error output from the specified run number.
If there is no specified run number, then returns the error
output of the last run. If the run number is less than zero,
then returns the error output from that many runs back from the
current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run = run - 1
return self._stderr[run]
def stdout(self, run = None):
"""Returns the standard output from the specified run number.
If there is no specified run number, then returns the standard
output of the last run. If the run number is less than zero,
then returns the standard output from that many runs back from
the current run.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run = run - 1
return self._stdout[run]
def subdir(self, *subdirs):
"""Create new subdirectories under the temporary working
directory, one for each argument. An argument may be a list,
in which case the list elements are concatenated using the
os.path.join() method. Subdirectories multiple levels deep
must be created using a separate argument for each level:
test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
Returns the number of subdirectories actually created.
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if is_List(sub):
sub = apply(os.path.join, tuple(sub))
new = os.path.join(self.workdir, sub)
try:
os.mkdir(new)
except OSError:
pass
else:
count = count + 1
return count
def symlink(self, target, link):
"""Creates a symlink to the specified target.
The link name may be a list, in which case the elements are
concatenated with the os.path.join() method. The link is
assumed to be under the temporary working directory unless it
is an absolute path name. The target is *not* assumed to be
under the temporary working directory.
"""
link = self.canonicalize(link)
os.symlink(target, link)
def tempdir(self, path=None):
"""Creates a temporary directory.
A unique directory name is generated if no path name is specified.
The directory is created, and will be removed when the TestCmd
object is destroyed.
"""
if path is None:
try:
path = tempfile.mktemp(prefix=tempfile.template)
except TypeError:
path = tempfile.mktemp()
os.mkdir(path)
# Symlinks in the path will report things
# differently from os.getcwd(), so chdir there
# and back to fetch the canonical path.
cwd = os.getcwd()
try:
os.chdir(path)
path = os.getcwd()
finally:
os.chdir(cwd)
# Uppercase the drive letter since the case of drive
# letters is pretty much random on win32:
drive,rest = os.path.splitdrive(path)
if drive:
path = string.upper(drive) + rest
#
self._dirlist.append(path)
global _Cleanup
try:
_Cleanup.index(self)
except ValueError:
_Cleanup.append(self)
return path
def touch(self, path, mtime=None):
"""Updates the modification time on the specified file or
directory path name. The default is to update to the
current time if no explicit modification time is specified.
"""
path = self.canonicalize(path)
atime = os.path.getatime(path)
if mtime is None:
mtime = time.time()
os.utime(path, (atime, mtime))
def unlink(self, file):
"""Unlinks the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name.
"""
file = self.canonicalize(file)
os.unlink(file)
def verbose_set(self, verbose):
"""Set the verbose level.
"""
self.verbose = verbose
def where_is(self, file, path=None, pathext=None):
"""Find an executable file.
"""
if is_List(file):
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = where_is(file, path, pathext)
return file
def workdir_set(self, path):
"""Creates a temporary working directory with the specified
path name. If the path is a null string (''), a unique
directory name is created.
"""
if (path != None):
if path == '':
path = None
path = self.tempdir(path)
self.workdir = path
def workpath(self, *args):
"""Returns the absolute path name to a subdirectory or file
within the current temporary working directory. Concatenates
the temporary working directory name with the specified
arguments using the os.path.join() method.
"""
return apply(os.path.join, (self.workdir,) + tuple(args))
def readable(self, top, read=1):
"""Make the specified directory tree readable (read == 1)
or not (read == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file readability.
"""
if sys.platform == 'win32':
return
if read:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IREAD))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IREAD))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif read:
# It's a directory and we're trying to turn on read
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree. Because os.path.walk() is top-down, we'll enable
# read permission on any directories that have it disabled
# before os.path.walk() tries to list their contents.
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
# It's a directory and we're trying to turn off read
# permission, which means we have to chmod the directoreis
# in the tree bottom-up, lest disabling read permission from
# the top down get in the way of being able to get at lower
# parts of the tree. But os.path.walk() visits things top
# down, so we just use an object to collect a list of all
# of the entries in the tree, reverse the list, and then
# chmod the reversed (bottom-up) list.
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def writable(self, top, write=1):
"""Make the specified directory tree writable (write == 1)
or not (write == None).
"""
if sys.platform == 'win32':
if write:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IWRITE)
except OSError: pass
else:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IREAD)
except OSError: pass
else:
if write:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0200))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0200))
if os.path.isfile(top):
do_chmod(top)
else:
col = Collector(top)
os.path.walk(top, col, None)
for d in col.entries: do_chmod(d)
def executable(self, top, execute=1):
"""Make the specified directory tree executable (execute == 1)
or not (execute == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file executability.
"""
if sys.platform == 'win32':
return
if execute:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IEXEC))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IEXEC))
if os.path.isfile(top):
# If it's a file, that's easy, just chmod it.
do_chmod(top)
elif execute:
# It's a directory and we're trying to turn on execute
# permission, so it's also pretty easy, just chmod the
# directory and then chmod every entry on our walk down the
# tree. Because os.path.walk() is top-down, we'll enable
# execute permission on any directories that have it disabled
# before os.path.walk() tries to list their contents.
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
# It's a directory and we're trying to turn off execute
# permission, which means we have to chmod the directories
# in the tree bottom-up, lest disabling execute permission from
# the top down get in the way of being able to get at lower
# parts of the tree. But os.path.walk() visits things top
# down, so we just use an object to collect a list of all
# of the entries in the tree, reverse the list, and then
# chmod the reversed (bottom-up) list.
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def write(self, file, content, mode = 'wb'):
"""Writes the specified content text (second argument) to the
specified file name (first argument). The file name may be
a list, in which case the elements are concatenated with the
os.path.join() method. The file is created under the temporary
working directory. Any subdirectories in the path must already
exist. The I/O mode for the file may be specified; it must
begin with a 'w'. The default is 'wb' (binary write).
"""
file = self.canonicalize(file)
if mode[0] != 'w':
raise ValueError, "mode must begin with 'w'"
with open(file, mode) as f:
f.write(content)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 | -8,915,685,874,621,644,000 | 31.901691 | 101 | 0.551005 | false |
nwjs/chromium.src | ios/build/bots/scripts/run_test.py | 1 | 5516 | #!/usr/bin/python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for run.py."""
import json
import re
import unittest
import run
class UnitTest(unittest.TestCase):
def test_parse_args_ok(self):
cmd = [
'--app',
'./foo-Runner.app',
'--host-app',
'./bar.app',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
]
runner = run.Runner()
runner.parse_args(cmd)
self.assertTrue(runner.args.app == './foo-Runner.app')
def test_parse_args_iossim_platform_version(self):
"""
iossim, platforma and version should all be set.
missing iossim
"""
test_cases = [
{
'error':
2,
'cmd': [
'--platform',
'iPhone X',
'--version',
'13.2.2',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
],
},
{
'error':
2,
'cmd': [
'--iossim',
'path/to/iossim',
'--version',
'13.2.2',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
],
},
{
'error':
2,
'cmd': [
'--iossim',
'path/to/iossim',
'--platform',
'iPhone X',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
],
},
]
runner = run.Runner()
for test_case in test_cases:
with self.assertRaises(SystemExit) as ctx:
runner.parse_args(test_case['cmd'])
self.assertTrue(re.match('must specify all or none of *', ctx.message))
self.assertEqual(ctx.exception.code, test_case['error'])
def test_parse_args_xcode_parallelization_requirements(self):
"""
xcode parallelization set requires both platform and version
"""
test_cases = [
{
'error':
2,
'cmd': [
'--xcode-parallelization',
'--platform',
'iPhone X',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
]
},
{
'error':
2,
'cmd': [
'--xcode-parallelization',
'--version',
'13.2.2',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
]
}
]
runner = run.Runner()
for test_case in test_cases:
with self.assertRaises(SystemExit) as ctx:
runner.parse_args(test_case['cmd'])
self.assertTrue(
re.match('--xcode-parallelization also requires both *',
ctx.message))
self.assertEqual(ctx.exception.code, test_case['error'])
def test_parse_args_xcodebuild_device_runner_requirements(self):
"""
xcodebuild_device_runner requires both platform and version
"""
test_cases = [
{
'error':
2,
'cmd': [
'--xcodebuild-device-runner',
'--platform',
'iPhone X',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
]
},
{
'error':
2,
'cmd': [
'--xcodebuild-device-runner',
'--version',
'13.2.2',
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
]
}
]
runner = run.Runner()
for test_case in test_cases:
with self.assertRaises(SystemExit) as ctx:
runner.parse_args(test_case['cmd'])
self.assertTrue(
re.match('--xcodebuild-device-runner also requires '
'both *', ctx.message))
self.assertEqual(ctx.exception.code, test_case['error'])
def test_parse_args_from_json(self):
json_args = {
'test_cases': ['test1'],
'restart': 'true',
'xcode_parallelization': True,
'shards': 2
}
cmd = [
'--shards',
'1',
'--platform',
'iPhone X',
'--version',
'13.2.2',
'--args-json',
json.dumps(json_args),
# Required
'--xcode-build-version',
'123abc',
'--out-dir',
'some/dir'
]
# shards should be 2, since json arg takes precedence over cmd line
runner = run.Runner()
runner.parse_args(cmd)
# Empty array
self.assertEquals(len(runner.args.env_var), 0)
self.assertTrue(runner.args.xcode_parallelization)
self.assertTrue(runner.args.restart)
self.assertEquals(runner.args.shards, 2)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -4,793,199,857,715,615,000 | 23.515556 | 79 | 0.433829 | false |
meituan/mcsapi_python | mosclient/common/client.py | 1 | 4879 | import sys
import json
import urllib
import urllib2
from urlparse import urlparse
from datetime import datetime
import ec2utils
from xmltodict import parse
class BaseClient(object):
def __init__(self, access, secret, url, format=None,
timeout=300, debug=False, region='Beijing'):
self.access = access
self.secret = secret
self.url = url
self.format = format
self.timeout = timeout
self.debug = debug
self.region = region
def _get_action(self, level):
if getattr(sys, '_getframe', None) is not None:
co = sys._getframe(level).f_code
func = getattr(self, co.co_name, None)
if func is not None and callable(func):
return co.co_name
else:
raise Exception('Cannot retrieve action name on this platform')
def get_signature(self, params):
req = urlparse(self.url)
host = req.netloc
if (req.scheme == 'http' and host.endswith(':80')) or \
(req.scheme == 'https' and host.endswith(':443')):
host = host[:host.rfind(':')]
path = req.path
if req.path == '':
path = '/'
cred_dict = {
'access': self.access,
'host': host,
'verb': 'POST',
'path': path,
'params': params,
}
signer = ec2utils.Ec2Signer(self.secret)
return signer.generate(cred_dict)
def get_httperror(self, e, debug):
details = e.read()
if debug:
print details
try:
if 'application/xml' in e.headers.get('Content-Type', None):
from common.xmltodict import parse
details = parse(details)
else:
import json
details = json.loads(details)
if 'ErrorResponse' in details:
details = details['ErrorResponse']
if 'Error' in details:
details = details['Error']
if 'error' in details:
details = details['error']
if 'message' in details:
details = details['message']
elif 'details' in details:
details = details['details']
except:
pass
if not isinstance(details, basestring):
details = str(details)
return '%s(%d): %s' % (e.msg, e.code, details)
def _request(self, **kwargs):
params = {}
params['Action'] = self._get_action(3)
params['AWSAccessKeyId'] = self.access
params['Timestamp'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z')
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'
params['Region'] = self.region
for k, v in kwargs.iteritems():
if isinstance(v, list):
i = 1
for vi in v:
params['%s.%d' % (k, i)] = vi
i += 1
else:
params[k] = v
if self.format:
params['Format'] = self.format
sig = self.get_signature(params)
params['Signature'] = sig
headers = {}
headers['User-Agent'] = 'python-mosclient'
data = urllib.urlencode(params)
if self.debug:
print self.url + '?' + data
req = urllib2.Request(self.url, data, headers)
try:
resp = urllib2.urlopen(req, None, self.timeout)
return resp
except urllib2.HTTPError, e:
print self.get_httperror(e, self.debug)
except Exception, e:
raise e
def raw_request(self, **kwargs):
return self._request(**kwargs)
def request(self, **kwargs):
resp = self._request(**kwargs)
if not resp:
return
body = resp.read()
if self.debug:
print resp.headers
print body
try:
if resp.headers['Content-Type'].startswith('application/json'):
body = json.loads(body)
else:
body = parse(body)
action = self._get_action(2)
return body['%sResponse' % action]
except:
return body
@classmethod
def parse_list_params(self, limit, offset, filters, kwargs):
if limit > 0:
kwargs['Limit'] = limit
if offset > 0:
kwargs['Offset'] = offset
if filters is not None:
fidx = 1
for k, vs in filters.iteritems():
kwargs['Filter.%d.Name' % fidx] = k
if not isinstance(vs, list):
vs = [vs]
vidx = 1
for v in vs:
kwargs['Filter.%d.Value.%d' % (fidx, vidx)] = v
vidx += 1
fidx += 1
| mit | 1,717,570,367,665,442,300 | 30.681818 | 82 | 0.502152 | false |
yongshengwang/builthue | desktop/core/ext-py/Paste-1.7.2/tests/test_auth/test_auth_digest.py | 10 | 2959 | # (c) 2005 Clark C. Evans
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from paste.auth.digest import *
from paste.wsgilib import raw_interactive
from paste.response import header_value
from paste.httpexceptions import *
from paste.httpheaders import AUTHORIZATION, WWW_AUTHENTICATE, REMOTE_USER
import os
def application(environ, start_response):
content = REMOTE_USER(environ)
start_response("200 OK",(('Content-Type', 'text/plain'),
('Content-Length', len(content))))
return content
realm = "tag:clarkevans.com,2005:testing"
def backwords(environ, realm, username):
""" dummy password hash, where user password is just reverse """
password = list(username)
password.reverse()
password = "".join(password)
return digest_password(realm, username, password)
application = AuthDigestHandler(application,realm,backwords)
application = HTTPExceptionHandler(application)
def check(username, password, path="/"):
""" perform two-stage authentication to verify login """
(status,headers,content,errors) = \
raw_interactive(application,path, accept='text/html')
assert status.startswith("401")
challenge = WWW_AUTHENTICATE(headers)
response = AUTHORIZATION(username=username, password=password,
challenge=challenge, path=path)
assert "Digest" in response and username in response
(status,headers,content,errors) = \
raw_interactive(application,path,
HTTP_AUTHORIZATION=response)
if status.startswith("200"):
return content
if status.startswith("401"):
return None
assert False, "Unexpected Status: %s" % status
def test_digest():
assert 'bing' == check("bing","gnib")
assert check("bing","bad") is None
#
# The following code uses sockets to test the functionality,
# to enable use:
#
# $ TEST_SOCKET py.test
#
if os.environ.get("TEST_SOCKET",""):
import urllib2
from paste.debug.testserver import serve
server = serve(application)
def authfetch(username,password,path="/",realm=realm):
server.accept(2)
import socket
socket.setdefaulttimeout(5)
uri = ("http://%s:%s" % server.server_address) + path
auth = urllib2.HTTPDigestAuthHandler()
auth.add_password(realm,uri,username,password)
opener = urllib2.build_opener(auth)
result = opener.open(uri)
return result.read()
def test_success():
assert "bing" == authfetch('bing','gnib')
def test_failure():
# urllib tries 5 more times before it gives up
server.accept(5)
try:
authfetch('bing','wrong')
assert False, "this should raise an exception"
except urllib2.HTTPError, e:
assert e.code == 401
def test_shutdown():
server.stop()
| apache-2.0 | 7,419,111,466,484,620,000 | 32.247191 | 74 | 0.663738 | false |
FCP-INDI/C-PAC | CPAC/pipeline/engine.py | 1 | 92736 | import os
import ast
import six
import json
import warnings
import logging
import copy
from unittest import TestCase
from CPAC.pipeline import nipype_pipeline_engine as pe
import nipype.interfaces.utility as util
from nipype.interfaces.utility import Rename
from CPAC.utils.interfaces.function import Function
from CPAC.utils.interfaces.datasink import DataSink
from CPAC.registration.registration import transform_derivative
from CPAC.nuisance import NuisanceRegressor
from CPAC.utils.utils import read_json, create_id_string, write_output_json, \
get_last_prov_entry, ordereddict_to_dict, check_prov_for_regtool
from CPAC.utils.datasource import (
create_anat_datasource,
create_func_datasource,
ingress_func_metadata,
create_general_datasource,
create_check_for_s3_node,
resolve_resolution
)
from CPAC.image_utils.spatial_smoothing import spatial_smoothing
from CPAC.image_utils.statistical_transforms import z_score_standardize, \
fisher_z_score_standardize
logger = logging.getLogger('workflow')
class ResourcePool(object):
def __init__(self, rpool=None, name=None, cfg=None, pipe_list=None):
if not rpool:
self.rpool = {}
else:
self.rpool = rpool
if not pipe_list:
self.pipe_list = []
else:
self.pipe_list = pipe_list
self.name = name
if cfg:
self.cfg = cfg
self.logdir = cfg.pipeline_setup['log_directory']['path']
self.num_cpus = cfg.pipeline_setup['system_config'][
'max_cores_per_participant']
self.num_ants_cores = cfg.pipeline_setup['system_config'][
'num_ants_threads']
self.ants_interp = cfg.registration_workflows[
'functional_registration']['func_registration_to_template'][
'ANTs_pipelines']['interpolation']
self.fsl_interp = cfg.registration_workflows[
'functional_registration']['func_registration_to_template'][
'FNIRT_pipelines']['interpolation']
self.func_reg = cfg.registration_workflows[
'functional_registration']['func_registration_to_template'][
'run']
self.run_smoothing = 'smoothed' in cfg.post_processing[
'spatial_smoothing']['output']
self.run_zscoring = 'z-scored' in cfg.post_processing[
'z-scoring']['output']
self.fwhm = cfg.post_processing['spatial_smoothing']['fwhm']
self.smooth_opts = cfg.post_processing['spatial_smoothing'][
'smoothing_method']
self.xfm = ['alff', 'falff', 'reho']
self.smooth = ['alff', 'falff', 'reho',
'space-template_alff',
'space-template_falff',
'space-template_reho',
'degree-centrality',
'eigen-centrality',
'lfcd']
self.zscore = self.smooth + ['alff', 'falff', 'reho',
'desc-sm_alff',
'desc-sm_falff',
'desc-sm_reho',
'space-template_alff',
'space-template_falff',
'space-template_reho',
'space-template_degree-centrality',
'space-template_eigen-centrality',
'space-template_lfcd'
'space-template_desc-sm_alff',
'space-template_desc-sm_falff',
'space-template_desc-sm_reho',
'space-template_desc-sm_degree-centrality',
'space-template_desc-sm_eigen-centrality',
'space-template_desc-sm_lfcd']
self.fisher_zscore = ['desc-MeanSCA_correlations']
def append_name(self, name):
self.name.append(name)
def get_name(self):
return self.name
def check_rpool(self, resource):
if resource not in self.rpool:
return False
return True
def get_pipe_number(self, pipe_idx):
return self.pipe_list.index(pipe_idx)
def get_entire_rpool(self):
return self.rpool
def get_resources(self):
return self.rpool.keys()
def copy_rpool(self):
return ResourcePool(rpool=copy.deepcopy(self.get_entire_rpool()),
name=self.name,
cfg=self.cfg,
pipe_list=copy.deepcopy(self.pipe_list))
def get_raw_label(self, resource):
# remove desc-* label
for tag in resource.split('_'):
if 'desc-' in tag:
resource = resource.replace(f'{tag}_', '')
break
return resource
def get_strat_info(self, prov, label=None, logdir=None):
strat_info = {}
for entry in prov:
if isinstance(entry, list):
strat_info[entry[-1].split(':')[0]] = entry
elif isinstance(entry, str):
strat_info[entry.split(':')[0]] = entry.split(':')[1]
if label:
if not logdir:
logdir = self.logdir
print(f'\n\nPrinting out strategy info for {label} in {logdir}\n')
write_output_json(strat_info, f'{label}_strat_info',
indent=4, basedir=logdir)
def set_json_info(self, resource, pipe_idx, key, val):
#TODO: actually should probably be able to inititialize resource/pipe_idx
if pipe_idx not in self.rpool[resource]:
raise Exception('\n[!] DEV: The pipeline/strat ID does not exist '
f'in the resource pool.\nResource: {resource}'
f'Pipe idx: {pipe_idx}\nKey: {key}\nVal: {val}\n')
else:
if 'json' not in self.rpool[resource][pipe_idx]:
self.rpool[resource][pipe_idx]['json'] = {}
self.rpool[resource][pipe_idx]['json'][key] = val
def get_json_info(self, resource, pipe_idx, key):
#TODO: key checks
return self.rpool[resource][pipe_idx][key]
def get_resource_from_prov(self, prov):
# each resource (i.e. "desc-cleaned_bold" AKA nuisance-regressed BOLD
# data) has its own provenance list. the name of the resource, and
# the node that produced it, is always the last item in the provenance
# list, with the two separated by a colon :
if isinstance(prov[-1], list):
return prov[-1][-1].split(':')[0]
elif isinstance(prov[-1], str):
return prov[-1].split(':')[0]
def set_data(self, resource, node, output, json_info, pipe_idx, node_name,
fork=False, inject=False):
'''
pipe_idx, node_name = new_id
if f';{resource}:' not in pipe_idx:
pipe_idx = f'{pipe_idx};{resource}:' # <--- doing this up here, now, because the del self.rpool[resource][pipe_idx] below should only get deleted for the same input/output tag!
if resource not in self.rpool.keys():
self.rpool[resource] = {}
else:
if not fork: # <--- in the event of multiple strategies/options, this will run for every option; just keep in mind
if pipe_idx in self.rpool[resource].keys(): # <--- in case the resource name is now new, and not the original
del self.rpool[resource][pipe_idx] # <--- remove old keys so we don't end up with a new strat for every new node unit (unless we fork)
if pipe_idx[-1] == ';' or pipe_idx[-1] == ':': # <--- if the ':', this kicks off when the pipe_idx is only something like 'T1w:', at the beginning
new_name = node_name # but how do we manage new threads, mid-pipeline?
else:
new_name = f',{node_name}'
new_pipe_idx = f'{pipe_idx}{new_name}'
'''
json_info = json_info.copy()
cpac_prov = []
if 'CpacProvenance' in json_info:
cpac_prov = json_info['CpacProvenance']
new_prov_list = list(cpac_prov) # <---- making a copy, it was already a list
if not inject:
new_prov_list.append(f'{resource}:{node_name}')
res, new_pipe_idx = self.generate_prov_string(new_prov_list)
if not json_info:
json_info = {'RawSources': [resource]} # <---- this will be repopulated to the full file path at the end of the pipeline building, in gather_pipes()
json_info['CpacProvenance'] = new_prov_list
if resource not in self.rpool.keys():
self.rpool[resource] = {}
else:
if not fork: # <--- in the event of multiple strategies/options, this will run for every option; just keep in mind
if pipe_idx in self.rpool[resource].keys(): # <--- in case the resource name is now new, and not the original
del self.rpool[resource][pipe_idx] # <--- remove old keys so we don't end up with a new strat for every new node unit (unless we fork)
if new_pipe_idx not in self.rpool[resource]:
self.rpool[resource][new_pipe_idx] = {}
if new_pipe_idx not in self.pipe_list:
self.pipe_list.append(new_pipe_idx)
self.rpool[resource][new_pipe_idx]['data'] = (node, output)
self.rpool[resource][new_pipe_idx]['json'] = json_info
def get(self, resource, pipe_idx=None, report_fetched=False,
optional=False):
# NOTE!!!
# if this is the main rpool, this will return a dictionary of strats, and inside those, are dictionaries like {'data': (node, out), 'json': info}
# BUT, if this is a sub rpool (i.e. a strat_pool), this will return a one-level dictionary of {'data': (node, out), 'json': info} WITHOUT THE LEVEL OF STRAT KEYS ABOVE IT
if isinstance(resource, list):
# if a list of potential inputs are given, pick the first one
# found
for label in resource:
if label in self.rpool.keys():
if report_fetched:
return (self.rpool[label], label)
return self.rpool[label]
else:
if optional:
if report_fetched:
return (None, None)
return None
raise Exception("\n[!] C-PAC says: None of the listed "
"resources are in the resource pool:\n"
f"{resource}\n")
else:
if resource not in self.rpool.keys():
if optional:
if report_fetched:
return (None, None)
return None
raise LookupError("\n\n[!] C-PAC says: The listed resource is "
f"not in the resource pool:\n{resource}\n\n"
"Developer Note: This may be due to a mis"
"match between the node block's docstring "
"'input' field and a strat_pool.get_data() "
"call within the block function.\n")
if report_fetched:
if pipe_idx:
return (self.rpool[resource][pipe_idx], resource)
return (self.rpool[resource], resource)
if pipe_idx:
return self.rpool[resource][pipe_idx]
return self.rpool[resource]
def get_data(self, resource, pipe_idx=None, report_fetched=False,
quick_single=False):
if quick_single:
for key, val in self.get(resource).items():
return val['data']
if report_fetched:
if pipe_idx:
connect, fetched = self.get(resource, pipe_idx=pipe_idx,
report_fetched=report_fetched)
return (connect['data'], fetched)
connect, fetched =self.get(resource,
report_fetched=report_fetched)
return (connect['data'], fetched)
if pipe_idx:
return self.get(resource, pipe_idx=pipe_idx)['data']
return self.get(resource)['data']
def copy_resource(self, resource, new_name):
self.rpool[new_name] = self.rpool[resource]
def get_pipe_idxs(self, resource):
return self.rpool[resource].keys()
def get_json(self, resource, strat=None):
# NOTE: resource_strat_dct has to be entered properly by the developer
# it has to either be rpool[resource][strat] or strat_pool[resource]
if strat:
resource_strat_dct = self.rpool[resource][strat]
else:
# for strat_pools mainly, where there is no 'strat' key level
resource_strat_dct = self.rpool[resource]
# TODO: the below hits the exception if you use get_cpac_provenance on
# TODO: the main rpool (i.e. if strat=None)
if 'json' in resource_strat_dct:
strat_json = resource_strat_dct['json']
else:
raise Exception('\n[!] Developer info: the JSON '
f'information for {resource} and {strat} '
f'is incomplete.\n')
return strat_json
def get_cpac_provenance(self, resource, strat=None):
# NOTE: resource_strat_dct has to be entered properly by the developer
# it has to either be rpool[resource][strat] or strat_pool[resource]
json_data = self.get_json(resource, strat)
return json_data['CpacProvenance']
def generate_prov_string(self, prov):
# this will generate a string from a SINGLE RESOURCE'S dictionary of
# MULTIPLE PRECEDING RESOURCES (or single, if just one)
# NOTE: this DOES NOT merge multiple resources!!! (i.e. for merging-strat pipe_idx generation)
if not isinstance(prov, list):
raise Exception('\n[!] Developer info: the CpacProvenance '
f'entry for {prov} has to be a list.\n')
last_entry = get_last_prov_entry(prov)
resource = last_entry.split(':')[0]
return (resource, str(prov))
def generate_prov_list(self, prov_str):
if not isinstance(prov_str, str):
raise Exception('\n[!] Developer info: the CpacProvenance '
f'entry for {prov} has to be a string.\n')
return (ast.literal_eval(prov_str))
def get_resource_strats_from_prov(self, prov):
# if you provide the provenance of a resource pool output, this will
# return a dictionary of all the preceding resource pool entries that
# led to that one specific output:
# {rpool entry}: {that entry's provenance}
# {rpool entry}: {that entry's provenance}
resource_strat_dct = {}
if isinstance(prov, str):
resource = prov.split(':')[0]
resource_strat_dct[resource] = prov
else:
for spot, entry in enumerate(prov):
if isinstance(entry, list):
resource = entry[-1].split(':')[0]
resource_strat_dct[resource] = entry
elif isinstance(entry, str):
resource = entry.split(':')[0]
resource_strat_dct[resource] = entry
return resource_strat_dct
def flatten_prov(self, prov):
if isinstance(prov, str):
return [prov]
elif isinstance(prov, list):
flat_prov = []
for entry in prov:
if isinstance(entry, list):
flat_prov += self.flatten_prov(entry)
else:
flat_prov.append(entry)
return flat_prov
def get_strats(self, resources):
# TODO: NOTE: NOT COMPATIBLE WITH SUB-RPOOL/STRAT_POOLS
# TODO: (and it doesn't have to be)
import itertools
linked_resources = []
resource_list = []
for resource in resources:
# grab the linked-input tuples
if isinstance(resource, tuple):
linked = []
for label in list(resource):
rp_dct, fetched_resource = self.get(label,
report_fetched=True,
optional=True)
if not rp_dct:
continue
linked.append(fetched_resource)
resource_list += linked
if len(linked) < 2:
continue
linked_resources.append(linked)
else:
resource_list.append(resource)
total_pool = []
variant_pool = {}
len_inputs = len(resource_list)
for resource in resource_list:
rp_dct, fetched_resource = self.get(resource,
report_fetched=True, # <---- rp_dct has the strats/pipe_idxs as the keys on first level, then 'data' and 'json' on each strat level underneath
optional=True) # oh, and we make the resource fetching in get_strats optional so we can have optional inputs, but they won't be optional in the node block unless we want them to be
if not rp_dct:
len_inputs -= 1
continue
sub_pool = []
for strat in rp_dct.keys():
json_info = self.get_json(fetched_resource, strat)
cpac_prov = json_info['CpacProvenance']
sub_pool.append(cpac_prov)
if fetched_resource not in variant_pool:
variant_pool[fetched_resource] = []
if 'CpacVariant' in json_info:
for key, val in json_info['CpacVariant'].items():
if val not in variant_pool[fetched_resource]:
variant_pool[fetched_resource] += val
variant_pool[fetched_resource].append(f'NO-{val[0]}')
total_pool.append(sub_pool)
# TODO: right now total_pool is:
# TODO: [[[T1w:anat_ingress, desc-preproc_T1w:anatomical_init, desc-preproc_T1w:acpc_alignment], [T1w:anat_ingress,desc-preproc_T1w:anatomical_init]],
# TODO: [[T1w:anat_ingress, desc-preproc_T1w:anatomical_init, desc-preproc_T1w:acpc_alignment, desc-brain_mask:brain_mask_afni], [T1w:anat_ingress, desc-preproc_T1w:anatomical_init, desc-brain_mask:brain_mask_afni]]]
# TODO: and the code below thinks total_pool is a list of lists, like [[pipe_idx, pipe_idx], [pipe_idx, pipe_idx, pipe_idx], etc.]
# TODO: and the actual resource is encoded in the tag: of the last item, every time!
# keying the strategies to the resources, inverting it
if len_inputs > 1:
strats = itertools.product(*total_pool)
# we now currently have "strats", the combined permutations of all the strategies, as a list of tuples, each tuple combining one version of input each, being one of the permutations.
# OF ALL THE DIFFERENT INPUTS. and they are tagged by their fetched inputs with {name}:{strat}.
# so, each tuple has ONE STRAT FOR EACH INPUT, so if there are three inputs, each tuple will have 3 items.
new_strats = {}
# get rid of duplicates - TODO: refactor .product
strat_str_list = []
strat_list_list = []
for strat_tuple in strats:
strat_list = list(copy.deepcopy(strat_tuple))
strat_str = str(strat_list)
if strat_str not in strat_str_list:
strat_str_list.append(strat_str)
strat_list_list.append(strat_list)
for strat_list in strat_list_list:
json_dct = {}
for strat in strat_list:
# strat is a prov list for a single resource/input
strat_resource, strat_idx = \
self.generate_prov_string(strat)
strat_json = self.get_json(strat_resource,
strat=strat_idx)
json_dct[strat_resource] = strat_json
drop = False
if linked_resources:
for linked in linked_resources: # <--- 'linked' is each tuple
if drop:
break
for xlabel in linked:
if drop:
break
xjson = copy.deepcopy(json_dct[xlabel])
for ylabel in linked:
if xlabel == ylabel:
continue
yjson = copy.deepcopy(json_dct[ylabel])
if 'CpacVariant' not in xjson:
xjson['CpacVariant'] = {}
if 'CpacVariant' not in yjson:
yjson['CpacVariant'] = {}
current_strat = []
for key, val in xjson['CpacVariant'].items():
if isinstance(val, list):
current_strat.append(val[0])
else:
current_strat.append(val)
current_spread = list(set(variant_pool[xlabel]))
for spread_label in current_spread:
if 'NO-' in spread_label:
continue
if spread_label not in current_strat:
current_strat.append(f'NO-{spread_label}')
other_strat = []
for key, val in yjson['CpacVariant'].items():
if isinstance(val, list):
other_strat.append(val[0])
else:
other_strat.append(val)
other_spread = list(set(variant_pool[ylabel]))
for spread_label in other_spread:
if 'NO-' in spread_label:
continue
if spread_label not in other_strat:
other_strat.append(f'NO-{spread_label}')
for variant in current_spread:
in_current_strat = False
in_other_strat = False
in_other_spread = False
if variant is None:
in_current_strat = True
if None in other_spread:
in_other_strat = True
if variant in current_strat:
in_current_strat = True
if variant in other_strat:
in_other_strat = True
if variant in other_spread:
in_other_spread = True
if not in_other_strat:
if in_other_spread:
if in_current_strat:
drop = True
break
if in_other_strat:
if in_other_spread:
if not in_current_strat:
drop = True
break
if drop:
break
if drop:
continue
# make the merged strat label from the multiple inputs
# strat_list is actually the merged CpacProvenance lists
pipe_idx = str(strat_list)
new_strats[pipe_idx] = ResourcePool() # <----- new_strats is A DICTIONARY OF RESOURCEPOOL OBJECTS!
new_strats[pipe_idx].rpool['json'] = {}
new_strats[pipe_idx].rpool['json']['CpacProvenance'] = strat_list
# now just invert resource:strat to strat:resource for each resource:strat
for cpac_prov in strat_list:
resource, strat = self.generate_prov_string(cpac_prov)
resource_strat_dct = self.rpool[resource][strat] # <----- remember, this is the dct of 'data' and 'json'.
new_strats[pipe_idx].rpool[resource] = resource_strat_dct # <----- new_strats is A DICTIONARY OF RESOURCEPOOL OBJECTS! each one is a new slice of the resource pool combined together.
self.pipe_list.append(pipe_idx)
if 'CpacVariant' in resource_strat_dct['json']:
if 'CpacVariant' not in new_strats[pipe_idx].rpool['json']:
new_strats[pipe_idx].rpool['json']['CpacVariant'] = {}
for younger_resource, variant_list in resource_strat_dct['json']['CpacVariant'].items():
if younger_resource not in new_strats[pipe_idx].rpool['json']['CpacVariant']:
new_strats[pipe_idx].rpool['json']['CpacVariant'][younger_resource] = variant_list
else:
new_strats = {}
for resource_strat_list in total_pool: # total_pool will have only one list of strats, for the one input
for cpac_prov in resource_strat_list: # <------- cpac_prov here doesn't need to be modified, because it's not merging with other inputs
resource, pipe_idx = self.generate_prov_string(cpac_prov)
resource_strat_dct = self.rpool[resource][pipe_idx] # <----- remember, this is the dct of 'data' and 'json'.
new_strats[pipe_idx] = ResourcePool(rpool={resource: resource_strat_dct}) # <----- again, new_strats is A DICTIONARY OF RESOURCEPOOL OBJECTS!
new_strats[pipe_idx].rpool['json'] = resource_strat_dct['json'] # TODO: WARNING- THIS IS A LEVEL HIGHER THAN THE ORIGINAL 'JSON' FOR EASE OF ACCESS IN CONNECT_BLOCK WITH THE .GET(JSON)
new_strats[pipe_idx].rpool['json']['CpacProvenance'] = cpac_prov
return new_strats
def derivative_xfm(self, wf, label, connection, json_info, pipe_idx,
pipe_x):
if label in self.xfm:
json_info = dict(json_info)
# get the bold-to-template transform from the current strat_pool
# info
xfm_idx = None
xfm_label = 'from-bold_to-template_mode-image_xfm'
for entry in json_info['CpacProvenance']:
if isinstance(entry, list):
if entry[-1].split(':')[0] == xfm_label:
xfm_prov = entry
xfm_idx = self.generate_prov_string(xfm_prov)[1]
break
# but if the resource doesn't have the bold-to-template transform
# in its provenance/strategy, find the appropriate one for this
# current pipe_idx/strat
if not xfm_idx:
xfm_info = []
for pipe_idx, entry in self.get(xfm_label).items():
xfm_info.append((pipe_idx, entry['json']['CpacProvenance']))
else:
xfm_info = [(xfm_idx, xfm_prov)]
for num, xfm_entry in enumerate(xfm_info):
xfm_idx, xfm_prov = xfm_entry
reg_tool = check_prov_for_regtool(xfm_prov)
xfm = transform_derivative(f'{label}_xfm_{pipe_x}_{num}',
label, reg_tool, self.num_cpus,
self.num_ants_cores,
ants_interp=self.ants_interp,
fsl_interp=self.fsl_interp,
opt=None)
wf.connect(connection[0], connection[1],
xfm, 'inputspec.in_file')
node, out = self.get_data("T1w_brain_template_deriv",
quick_single=True)
wf.connect(node, out, xfm, 'inputspec.reference')
node, out = self.get_data('from-bold_to-template_mode-image_xfm',
pipe_idx=xfm_idx)
wf.connect(node, out, xfm, 'inputspec.transform')
label = f'space-template_{label}'
new_prov = json_info['CpacProvenance'] + xfm_prov
json_info['CpacProvenance'] = new_prov
new_pipe_idx = self.generate_prov_string(new_prov)
self.set_data(label, xfm, 'outputspec.out_file', json_info,
new_pipe_idx, f'{label}_xfm_{num}', fork=True)
return wf
def post_process(self, wf, label, connection, json_info, pipe_idx, pipe_x,
outs):
input_type = 'func_derivative'
if 'centrality' in label or 'lfcd' in label:
input_type = 'func_derivative_multi'
if 'centrality' in label or 'lfcd' in label:
mask = 'template_specification_file'
elif 'space-template' in label:
mask = 'space-template_res-derivative_desc-bold_mask'
else:
mask = 'space-bold_desc-brain_mask'
mask_idx = None
for entry in json_info['CpacProvenance']:
if isinstance(entry, list):
if entry[-1].split(':')[0] == mask:
mask_prov = entry
mask_idx = self.generate_prov_string(mask_prov)[1]
break
if self.run_smoothing:
if label in self.smooth:
for smooth_opt in self.smooth_opts:
sm = spatial_smoothing(f'{label}_smooth_{smooth_opt}_'
f'{pipe_x}',
self.fwhm, input_type, smooth_opt)
wf.connect(connection[0], connection[1],
sm, 'inputspec.in_file')
node, out = self.get_data(mask, pipe_idx=mask_idx,
quick_single=mask_idx is None)
wf.connect(node, out, sm, 'inputspec.mask')
if 'desc-' not in label:
if 'space-' in label:
for tag in label.split('_'):
if 'space-' in tag:
smlabel = label.replace(tag,
f'{tag}_desc-sm')
break
else:
smlabel = f'desc-sm_{label}'
else:
for tag in label.split('_'):
if 'desc-' in tag:
newtag = f'{tag}-sm'
smlabel = label.replace(tag, newtag)
break
self.set_data(smlabel, sm, 'outputspec.out_file',
json_info, pipe_idx,
f'spatial_smoothing_{smooth_opt}',
fork=True)
self.set_data('fwhm', sm, 'outputspec.fwhm', json_info,
pipe_idx, f'spatial_smoothing_{smooth_opt}',
fork=True)
if self.run_zscoring:
if 'desc-' not in label:
if 'space-template' in label:
label = label.replace('space-template',
'space-template_desc-zstd')
else:
label = f'desc-zstd_{label}'
else:
for tag in label.split('_'):
if 'desc-' in tag:
newtag = f'{tag}-zstd'
new_label = label.replace(tag, newtag)
break
if label in self.zscore:
zstd = z_score_standardize(f'{label}_zstd_{pipe_x}',
input_type)
wf.connect(connection[0], connection[1],
zstd, 'inputspec.in_file')
node, out = self.get_data(mask, pipe_idx=mask_idx)
wf.connect(node, out, zstd, 'inputspec.mask')
self.set_data(new_label, zstd, 'outputspec.out_file',
json_info, pipe_idx, f'zscore_standardize',
fork=True)
elif label in self.fisher_zscore:
zstd = fisher_z_score_standardize(f'{label}_zstd_{pipe_x}',
label, input_type)
wf.connect(connection[0], connection[1],
zstd, 'inputspec.correlation_file')
# if the output is 'desc-MeanSCA_correlations', we want
# 'desc-MeanSCA_timeseries'
oned = label.replace('correlations', 'timeseries')
node, out = outs[oned]
wf.connect(node, out, zstd, 'inputspec.timeseries_oned')
self.set_data(new_label, zstd, 'outputspec.out_file',
json_info, pipe_idx,
'fisher_zscore_standardize',
fork=True)
return wf
def gather_pipes(self, wf, cfg, all=False, add_incl=None, add_excl=None):
# TODO: cpac_outputs.csv etc
# TODO: might be better to do an inclusion instead
non_sink = ['scan', 'TR', 'tpattern', 'start_tr', 'stop_tr',
'pe_direction', 'subject', 'atlas_name', 'scan_params',
'deltaTE', 'diff_phase_dwell', 'dwell_asym_ratio',
'diffphase_scan_params', 'diffmag_scan_params']
excl = ['T1w', 'bold', 'motion-basefile',
'diffphase', 'diffmag', 'epi']
substring_excl = []
bold_descs = ['desc-cleaned', 'desc-brain', 'desc-motion',
'desc-preproc']
config_paths = ['T1w_ACPC_template', 'T1w_brain_ACPC_template',
'unet_model', 'T1w_brain_template', 'T1w_template',
'T1w_brain_template_mask',
'T1w_brain_template_symmetric',
'T1w_template_symmetric',
'dilated_symmetric_brain_mask',
'dilated_symmetric_brain_mask_for_template',
'T1w_brain_template_symmetric_for_resample',
'T1w_template_symmetric_for_resample', 'ref_mask',
'template_for_resample',
'T1w_brain_template_for_func',
'T1w_template_for_func',
'template_epi', 'template_epi_mask',
'lateral_ventricles_mask', 'eye_mask_path',
'EPI_template', 'EPI_template_mask',
'EPI_template_deriv', 'EPI_template_for_resample',
'EPI_template_funcreg', 'T1w_brain_template_deriv',
'T1w_template_deriv', 'T1w_brain_template_funcreg',
'T1w_template_funcreg',
'T1w_template_symmetric_funcreg',
'T1w_brain_template_symmetric_funcreg',
'T1w_brain_template_for_resample',
'T1w_template_for_resample']
excl += non_sink
excl += config_paths
if add_excl:
excl += add_excl
if not cfg.pipeline_setup['output_directory']['write_debugging_outputs']:
excl.append('motion-basefile')
substring_excl.append(['desc-reginput', 'bold'])
if not cfg.pipeline_setup['output_directory']['write_func_outputs']:
avail_bolds = []
for resource in self.rpool.keys():
if resource.split('_')[-1] != 'bold':
continue
for bold_desc in bold_descs:
if bold_desc in resource:
if bold_desc not in avail_bolds:
avail_bolds.append(bold_desc)
for bold in bold_descs:
if bold in avail_bolds:
bold_descs.remove(bold)
break
for bold in bold_descs:
substring_excl.append([bold, 'bold'])
anat = ['T1w', 'probseg', 'T1w-template']
func = ['bold', 'timeseries', 'alff', 'falff', 'reho', 'vmhc',
'correlations', 'statmap', 'regressors', 'degree-centrality',
'eigen-centrality', 'lfcd']
motions = ['motion', 'movement', 'coordinate', 'displacement',
'dvars', 'power-params']
qc_anat = ['T1w-axial-qc',
'T1w-sagittal-qc',
'dseg-axial-qc',
'dseg-sagittal-qc']
anat += qc_anat
qc_func = ['bold-axial-qc',
'bold-sagittal-qc',
'bold-carpet-qc',
'framewise-displacement-jenkinson-plot-qc',
'movement-parameters-trans-qc',
'movement-parameters-rot-qc',
'bold-snr-axial-qc',
'bold-snr-sagittal-qc',
'bold-snr-hist-qc',
'bold-snr-qc']
func += qc_func
if all:
excl = non_sink
for resource in self.rpool.keys():
# TODO: cpac_outputs.csv etc
if resource in excl:
continue
drop = False
for substring_list in substring_excl:
bool_list = []
for substring in substring_list:
if substring in resource:
bool_list.append(True)
else:
bool_list.append(False)
for item in bool_list:
if not item:
break
drop = True
if drop:
break
if drop:
continue
subdir = 'other'
if resource.split('_')[-1] in anat:
subdir = 'anat'
#TODO: get acq- etc.
elif resource.split('_')[-1] in func:
subdir = 'func'
#TODO: other stuff like acq- etc.
elif resource.split('_')[-1] == 'mask':
if 'space-T1w' in resource:
subdir = 'anat'
if 'label-CSF' in resource or 'label-GM' in resource or \
'label-WM' in resource:
subdir = 'anat'
if 'space-bold' in resource:
subdir = 'func'
elif resource.split('_')[-1] == 'xfm':
if 'from-T1w' in resource:
subdir = 'anat'
if 'template_to-T1w' in resource:
subdir = 'anat'
if 'from-bold' in resource:
subdir = 'func'
if 'template_to-bold' in resource:
subdir = 'func'
else:
for tag in motions:
if tag in resource:
subdir = 'func'
for pipe_idx in self.rpool[resource]:
unique_id = self.get_name()
out_dir = cfg.pipeline_setup['output_directory']['path']
pipe_name = cfg.pipeline_setup['pipeline_name']
container = os.path.join(f'cpac_{pipe_name}', unique_id)
filename = f'{unique_id}_{resource}'
out_path = os.path.join(out_dir, container, subdir, filename)
out_dct = {
'unique_id': unique_id,
'out_dir': out_dir,
'container': container,
'subdir': subdir,
'filename': filename,
'out_path': out_path
}
self.rpool[resource][pipe_idx]['out'] = out_dct
# TODO: have to link the pipe_idx's here. and call up 'desc-preproc_T1w' from a Sources in a json and replace. here.
# TODO: can do the pipeline_description.json variants here too!
for resource in self.rpool.keys():
# TODO: cpac_outputs.csv etc
if resource in excl:
continue
drop = False
for substring_list in substring_excl:
bool_list = []
for substring in substring_list:
if substring in resource:
bool_list.append(True)
else:
bool_list.append(False)
for item in bool_list:
if not item:
break
drop = True
if drop:
break
if drop:
continue
if not all:
if 'symtemplate' in resource:
continue
num_variant = 0
if len(self.rpool[resource]) == 1:
num_variant = ""
for pipe_idx in self.rpool[resource]:
pipe_x = self.get_pipe_number(pipe_idx)
try:
num_variant += 1
except TypeError:
pass
json_info = self.rpool[resource][pipe_idx]['json']
out_dct = self.rpool[resource][pipe_idx]['out']
if out_dct['subdir'] == 'other' and not all:
continue
unique_id = out_dct['unique_id']
if num_variant:
for key in out_dct['filename'].split('_'):
if 'desc-' in key:
out_dct['filename'] = out_dct['filename'
].replace(key, f'{key}-{num_variant}')
resource_idx = resource.replace(key,
f'{key}-{num_variant}')
break
else:
suff = resource.split('_')[-1]
newdesc_suff = f'desc-{num_variant}_{suff}'
resource_idx = resource.replace(suff,
newdesc_suff)
else:
resource_idx = resource
id_string = pe.Node(Function(input_names=['unique_id',
'resource',
'scan_id',
'atlas_id',
'fwhm'],
output_names=['out_filename'],
function=create_id_string),
name=f'id_string_{resource_idx}_{pipe_x}')
id_string.inputs.unique_id = unique_id
id_string.inputs.resource = resource_idx
# grab the iterable scan ID
if out_dct['subdir'] == 'func':
node, out = self.rpool['scan']["['scan:func_ingress']"][
'data']
wf.connect(node, out, id_string, 'scan_id')
# grab the FWHM if smoothed
for tag in resource.split('_'):
if 'desc-' in tag and '-sm' in tag:
fwhm_idx = pipe_idx.replace(f'{resource}:', 'fwhm:')
node, out = self.rpool['fwhm'][fwhm_idx]['data']
wf.connect(node, out, id_string, 'fwhm')
break
atlas_suffixes = ['timeseries', 'correlations', 'statmap']
# grab the iterable atlas ID
if resource.split('_')[-1] in atlas_suffixes:
atlas_idx = pipe_idx.replace(resource, 'atlas_name')
# need the single quote and the colon inside the double
# quotes - it's the encoded pipe_idx
#atlas_idx = new_idx.replace(f"'{temp_rsc}:",
# "'atlas_name:")
if atlas_idx in self.rpool['atlas_name']:
node, out = self.rpool['atlas_name'][atlas_idx][
'data']
wf.connect(node, out, id_string, 'atlas_id')
elif 'atlas-' in resource:
for tag in resource.split('_'):
if 'atlas-' in tag:
atlas_id = tag.replace('atlas-', '')
id_string.inputs.atlas_id = atlas_id
else:
warnings.warn(str(
LookupError("\n[!] No atlas ID found for "
f"{out_dct['filename']}.\n")))
nii_name = pe.Node(Rename(), name=f'nii_{resource_idx}_'
f'{pipe_x}')
nii_name.inputs.keep_ext = True
wf.connect(id_string, 'out_filename',
nii_name, 'format_string')
node, out = self.rpool[resource][pipe_idx]['data']
wf.connect(node, out, nii_name, 'in_file')
write_json_imports = ['import os', 'import json']
write_json = pe.Node(Function(input_names=['json_data',
'filename'],
output_names=['json_file'],
function=write_output_json,
imports=write_json_imports),
name=f'json_{resource_idx}_{pipe_x}')
write_json.inputs.json_data = json_info
wf.connect(id_string, 'out_filename', write_json, 'filename')
ds = pe.Node(DataSink(), name=f'sinker_{resource_idx}_'
f'{pipe_x}')
ds.inputs.parameterization = False
ds.inputs.base_directory = out_dct['out_dir']
ds.inputs.encrypt_bucket_keys = cfg.pipeline_setup[
'Amazon-AWS']['s3_encryption']
ds.inputs.container = out_dct['container']
if cfg.pipeline_setup['Amazon-AWS'][
'aws_output_bucket_credentials']:
ds.inputs.creds_path = cfg.pipeline_setup['Amazon-AWS'][
'aws_output_bucket_credentials']
wf.connect(nii_name, 'out_file',
ds, f'{out_dct["subdir"]}.@data')
wf.connect(write_json, 'json_file',
ds, f'{out_dct["subdir"]}.@json')
class NodeBlock(object):
def __init__(self, node_block_functions):
if not isinstance(node_block_functions, list):
node_block_functions = [node_block_functions]
self.node_blocks = {}
for node_block_function in node_block_functions: # <---- sets up the NodeBlock object in case you gave it a list of node blocks instead of a single one - for option forking.
init_dct = self.grab_docstring_dct(node_block_function.__doc__)
name = init_dct['name']
self.name = name
self.node_blocks[name] = {}
for key, val in init_dct.items():
self.node_blocks[name][key] = val
self.node_blocks[name]['block_function'] = node_block_function
#TODO: fix/replace below
self.outputs = {}
for out in init_dct['outputs']:
self.outputs[out] = None
self.options = ['base']
if 'options' in init_dct:
self.options = init_dct['options']
def get_name(self):
return self.name
def grab_docstring_dct(self, fn_docstring):
init_dct_schema = ['name', 'config', 'switch', 'option_key',
'option_val', 'inputs', 'outputs']
if 'Node Block:' in fn_docstring:
fn_docstring = fn_docstring.split('Node Block:')[1]
fn_docstring = fn_docstring.replace('\n', '').replace(' ', '')
#dct = json.loads(fn_docstring.replace('\n', '').replace(' ', ''))
dct = ast.literal_eval(fn_docstring)
#try:
# dct = json.loads(fn_docstring.replace('\n', '').replace(' ', ''))
#except Exception as e:
# raise Exception('\n\n[!] Node block docstring error.\n\n'
# f'Docstring:\n{fn_docstring}\n\n')
for key in init_dct_schema:
if key not in dct.keys():
raise Exception('\n[!] Developer info: At least one of the '
'required docstring keys in your node block '
'is missing.\n\nNode block docstring keys:\n'
f'{init_dct_schema}\n\nYou provided:\n'
f'{dct.keys()}\n\nDocstring:\n{fn_docstring}'
'\n\n')
return dct
def check_null(self, val):
if isinstance(val, str):
val = None if val.lower() == 'none' else val
return val
def check_output(self, outputs, label, name):
if label not in outputs:
raise Exception('\n[!] Output name in the block function does '
'not match the outputs list in Node Block '
f'{name}\n')
def grab_tiered_dct(self, cfg, key_list):
cfg_dct = cfg
for key in key_list:
cfg_dct = cfg_dct.__getitem__(key)
return cfg_dct
def connect_block(self, wf, cfg, rpool):
all_opts = []
for name, block_dct in self.node_blocks.items():
opts = []
config = self.check_null(block_dct['config'])
option_key = self.check_null(block_dct['option_key'])
option_val = self.check_null(block_dct['option_val'])
if option_key and option_val:
if not isinstance(option_key, list):
option_key = [option_key]
if not isinstance(option_val, list):
option_val = [option_val]
if config:
key_list = config + option_key
else:
key_list = option_key
if 'USER-DEFINED' in option_val:
# load custom config data into each 'opt'
opts = self.grab_tiered_dct(cfg, key_list)
else:
for option in option_val:
if option in self.grab_tiered_dct(cfg, key_list): # <---- goes over the option_vals in the node block docstring, and checks if the user's pipeline config included it in the forking list
opts.append(option)
else: # AND, if there are multiple option-val's (in a list) in the docstring, it gets iterated below in 'for opt in option' etc. AND THAT'S WHEN YOU HAVE TO DELINEATE WITHIN THE NODE BLOCK CODE!!!
opts = [None]
all_opts += opts
for name, block_dct in self.node_blocks.items(): # <--- iterates over either the single node block in the sequence, or a list of node blocks within the list of node blocks, i.e. for option forking.
switch = self.check_null(block_dct['switch'])
config = self.check_null(block_dct['config'])
option_key = self.check_null(block_dct['option_key'])
option_val = self.check_null(block_dct['option_val'])
inputs = self.check_null(block_dct['inputs'])
outputs = self.check_null(block_dct['outputs'])
block_function = block_dct['block_function']
opts = []
if option_key and option_val:
if not isinstance(option_key, list):
option_key = [option_key]
if not isinstance(option_val, list):
option_val = [option_val]
if config:
key_list = config + option_key
else:
key_list = option_key
if 'USER-DEFINED' in option_val:
# load custom config data into each 'opt'
opts = self.grab_tiered_dct(cfg, key_list)
else:
for option in option_val:
if option in self.grab_tiered_dct(cfg, key_list): # <---- goes over the option_vals in the node block docstring, and checks if the user's pipeline config included it in the forking list
opts.append(option)
else: # AND, if there are multiple option-val's (in a list) in the docstring, it gets iterated below in 'for opt in option' etc. AND THAT'S WHEN YOU HAVE TO DELINEATE WITHIN THE NODE BLOCK CODE!!!
opts = [None] # THIS ALSO MEANS the multiple option-val's in docstring node blocks can be entered once in the entire node-block sequence, not in a list of multiples
if not opts:
# for node blocks where the options are split into different
# block functions - opts will be empty for non-selected
# options, and would waste the get_strats effort below
continue
if not switch:
switch = [True]
else:
if config:
try:
key_list = config + switch
except TypeError:
raise Exception("\n\n[!] Developer info: Docstring error "
f"for {name}, make sure the 'config' or "
"'switch' fields are lists.\n\n")
else:
key_list = switch
switch = self.grab_tiered_dct(cfg, key_list)
if not isinstance(switch, list):
switch = [switch]
#print(f'switch and opts for {name}: {switch} --- {opts}')
if True in switch:
print(f"Connecting {name}...\n")
for pipe_idx, strat_pool in rpool.get_strats(inputs).items(): # strat_pool is a ResourcePool like {'desc-preproc_T1w': { 'json': info, 'data': (node, out) }, 'desc-brain_mask': etc.}
fork = False in switch # keep in mind rpool.get_strats(inputs) = {pipe_idx1: {'desc-preproc_T1w': etc.}, pipe_idx2: {..} }
for opt in opts: # it's a dictionary of ResourcePools called strat_pools, except those sub-ResourcePools only have one level! no pipe_idx strat keys.
# remember, you can get 'data' or 'json' from strat_pool with member functions
# strat_pool has all of the JSON information of all the inputs!
# so when we set_data below for the TOP-LEVEL MAIN RPOOL (not the strat_pool), we can generate new merged JSON information for each output.
# particularly, our custom 'CpacProvenance' field.
node_name = name
pipe_x = rpool.get_pipe_number(pipe_idx)
wf, outs = block_function(wf, cfg, strat_pool,
pipe_x, opt)
if not outs:
continue
if opt and len(option_val) > 1:
node_name = f'{node_name}_{opt}'
elif opt and 'USER-DEFINED' in option_val:
node_name = f'{node_name}_{opt["Name"]}'
if cfg.pipeline_setup['Debugging']['verbose']:
print('\n=======================')
print(f'Node name: {node_name}')
prov_dct = \
rpool.get_resource_strats_from_prov(ast.literal_eval(pipe_idx))
for key, val in prov_dct.items():
print('-------------------')
print(f'Input - {key}:')
sub_prov_dct = \
rpool.get_resource_strats_from_prov(val)
for sub_key, sub_val in sub_prov_dct.items():
sub_sub_dct = \
rpool.get_resource_strats_from_prov(sub_val)
print(f' sub-input - {sub_key}:')
print(f' prov = {sub_val}')
print(f' sub_sub_inputs = {sub_sub_dct.keys()}')
for label, connection in outs.items():
self.check_output(outputs, label, name)
new_json_info = copy.deepcopy(strat_pool.get('json'))
new_json_info['Sources'] = [x for x in strat_pool.get_entire_rpool() if x != 'json']
if strat_pool.check_rpool(label):
# so we won't get extra forks if we are
# merging strats (multiple inputs) plus the
# output name is one of the input names
old_pipe_prov = list(strat_pool.get_cpac_provenance(label))
new_json_info['CpacProvenance'] = old_pipe_prov
pipe_idx = strat_pool.generate_prov_string(old_pipe_prov)[1]
if fork or len(opts) > 1 or len(all_opts) > 1:
if 'CpacVariant' not in new_json_info:
new_json_info['CpacVariant'] = {}
raw_label = rpool.get_raw_label(label)
if raw_label not in new_json_info['CpacVariant']:
new_json_info['CpacVariant'][raw_label] = []
new_json_info['CpacVariant'][raw_label].append(node_name)
rpool.set_data(label,
connection[0],
connection[1],
new_json_info,
pipe_idx, node_name, fork)
if rpool.func_reg:
wf = rpool.derivative_xfm(wf, label,
connection,
new_json_info,
pipe_idx,
pipe_x)
wf = rpool.post_process(wf, label, connection,
new_json_info, pipe_idx,
pipe_x, outs)
return wf
def wrap_block(node_blocks, interface, wf, cfg, strat_pool, pipe_num, opt):
"""Wrap a list of node block functions to make them easier to use within
other node blocks.
Example usage:
# This calls the 'bold_mask_afni' and 'bold_masking' node blocks to
# skull-strip an EPI field map, without having to invoke the NodeBlock
# connection system.
# The interface dictionary tells wrap_block to set the EPI field map
# in the parent node block's throw-away strat_pool as 'bold', so that
# the 'bold_mask_afni' and 'bold_masking' node blocks will see that as
# the 'bold' input.
# It also tells wrap_block to set the 'desc-brain_bold' output of
# the 'bold_masking' node block to 'opposite_pe_epi_brain' (what it
# actually is) in the parent node block's strat_pool, which gets
# returned.
# Note 'bold' and 'desc-brain_bold' (all on the left side) are the
# labels that 'bold_mask_afni' and 'bold_masking' understand/expect
# through their interfaces and docstrings.
# The right-hand side (the values of the 'interface' dictionary) are
# what 'make sense' within the current parent node block - in this
# case, the distortion correction node block dealing with field maps.
interface = {'bold': (match_epi_fmaps_node, 'opposite_pe_epi'),
'desc-brain_bold': 'opposite_pe_epi_brain'}
wf, strat_pool = wrap_block([bold_mask_afni, bold_masking],
interface, wf, cfg, strat_pool,
pipe_num, opt)
...further downstream in the parent node block:
node, out = strat_pool.get_data('opposite_pe_epi_brain')
# The above line will connect the output of the 'bold_masking' node
# block (which is the skull-stripped version of 'opposite_pe_epi') to
# the next node.
"""
for block in node_blocks:
new_pool = copy.deepcopy(strat_pool)
for input, val in interface.items():
if isinstance(val, tuple):
new_pool.set_data(input, val[0], val[1], {}, "", "")
wf, outputs = block(wf, cfg, new_pool, pipe_num, opt)
for out, val in outputs.items():
if out in interface and isinstance(val, str):
strat_pool.set_data(out, outputs[out][0], outputs[out][1],
{}, "", "")
return (wf, strat_pool)
def ingress_raw_anat_data(wf, rpool, cfg, data_paths, unique_id, part_id,
ses_id):
if 'creds_path' not in data_paths:
data_paths['creds_path'] = None
anat_flow = create_anat_datasource(f'anat_gather_{part_id}_{ses_id}')
anat_flow.inputs.inputnode.set(
subject=part_id,
anat=data_paths['anat'],
creds_path=data_paths['creds_path'],
dl_dir=cfg.pipeline_setup['working_directory']['path'],
img_type='anat'
)
rpool.set_data('T1w', anat_flow, 'outputspec.anat', {},
"", "anat_ingress")
return rpool
def ingress_raw_func_data(wf, rpool, cfg, data_paths, unique_id, part_id,
ses_id):
func_paths_dct = data_paths['func']
func_wf = create_func_datasource(func_paths_dct,
f'func_ingress_{part_id}_{ses_id}')
func_wf.inputs.inputnode.set(
subject=part_id,
creds_path=data_paths['creds_path'],
dl_dir=cfg.pipeline_setup['working_directory']['path']
)
func_wf.get_node('inputnode').iterables = \
("scan", list(func_paths_dct.keys()))
rpool.set_data('subject', func_wf, 'outputspec.subject', {}, "",
"func_ingress")
rpool.set_data('bold', func_wf, 'outputspec.rest', {}, "", "func_ingress")
rpool.set_data('scan', func_wf, 'outputspec.scan', {}, "", "func_ingress")
rpool.set_data('scan_params', func_wf, 'outputspec.scan_params', {}, "",
"scan_params_ingress")
wf, rpool, diff, blip, fmap_rp_list = \
ingress_func_metadata(wf, cfg, rpool, data_paths, part_id,
data_paths['creds_path'], ses_id)
return (wf, rpool, diff, blip, fmap_rp_list)
def ingress_output_dir(cfg, rpool, unique_id, creds_path=None):
out_dir = cfg.pipeline_setup['output_directory']['path']
if cfg.pipeline_setup['output_directory']['pull_source_once']:
if os.path.isdir(cfg.pipeline_setup['output_directory']['path']):
if not os.listdir(cfg.pipeline_setup['output_directory']['path']):
if cfg.pipeline_setup['output_directory']['source_outputs_dir']:
out_dir = cfg.pipeline_setup['output_directory'][
'source_outputs_dir']
else:
out_dir = cfg.pipeline_setup['output_directory']['path']
else:
out_dir = cfg.pipeline_setup['output_directory']['path']
else:
if cfg.pipeline_setup['output_directory']['source_outputs_dir']:
out_dir = cfg.pipeline_setup['output_directory'][
'source_outputs_dir']
else:
if cfg.pipeline_setup['output_directory']['source_outputs_dir']:
out_dir = cfg.pipeline_setup['output_directory'][
'source_outputs_dir']
else:
out_dir = cfg.pipeline_setup['output_directory']['path']
if os.path.isdir(out_dir):
if not os.listdir(out_dir):
print(f"\nOutput directory {out_dir} does not exist yet, "
f"initializing.")
return rpool
else:
print(f"\nOutput directory {out_dir} does not exist yet, "
f"initializing.")
return rpool
print(f"\nPulling outputs from {out_dir}.\n")
cpac_dir = os.path.join(out_dir,
f'cpac_{cfg.pipeline_setup["pipeline_name"]}',
unique_id)
cpac_dir_anat = os.path.join(cpac_dir, 'anat')
cpac_dir_func = os.path.join(cpac_dir, 'func')
exts = ['.nii', '.gz', '.mat', '.1D', '.txt', '.csv', '.rms']
all_output_dir = []
if os.path.isdir(cpac_dir_anat):
for filename in os.listdir(cpac_dir_anat):
for ext in exts:
if ext in filename:
all_output_dir.append(os.path.join(cpac_dir_anat,
filename))
if os.path.isdir(cpac_dir_func):
for filename in os.listdir(cpac_dir_func):
for ext in exts:
if ext in filename:
all_output_dir.append(os.path.join(cpac_dir_func,
filename))
for filepath in all_output_dir:
filename = str(filepath)
for ext in exts:
filename = filename.split("/")[-1].replace(ext, '')
data_label = filename.split(unique_id)[1].lstrip('_')
if len(filename) == len(data_label):
raise Exception('\n\n[!] Possibly wrong participant or '
'session in this directory?\n\n'
f'Filepath: {filepath}\n\n')
if 'task-' in data_label:
for tag in data_label.split('_'):
if 'task-' in tag:
break
runtag = None
if 'run-' in data_label:
for runtag in data_label.split('_'):
if 'run-' in runtag:
break
data_label = data_label.replace(f'{tag}_', '')
if runtag:
data_label = data_label.replace(f'{runtag}_', '')
unique_data_label = str(data_label)
#if 'sub-' in data_label or 'ses-' in data_label:
# raise Exception('\n\n[!] Possibly wrong participant or '
# 'session in this directory?\n\nDirectory: '
# f'{cpac_dir_anat}\nFilepath: {filepath}\n\n')
suffix = data_label.split('_')[-1]
for tag in data_label.split('_'):
if 'desc-' in tag:
desc_val = tag
break
jsonpath = str(filepath)
for ext in exts:
jsonpath = jsonpath.replace(ext, '')
jsonpath = f"{jsonpath}.json"
if not os.path.exists(jsonpath):
raise Exception('\n\n[!] No JSON found for file '
f'{filepath}.\n\n')
json_info = read_json(jsonpath)
if 'CpacProvenance' in json_info:
# it's a C-PAC output, let's check for pipe_idx/strat integer
# suffixes in the desc- entries.
only_desc = str(desc_val)
if only_desc[-1].isdigit():
for idx in range(0, 3):
# let's stop at 3, please don't run >999 strategies okay?
if only_desc[-1].isdigit():
only_desc = only_desc[:-1]
if only_desc[-1] == '-':
only_desc = only_desc.rstrip('-')
else:
raise Exception('\n[!] Something went wrong with either '
'reading in the output directory or when '
'it was written out previously.\n\nGive '
'this to your friendly local C-PAC '
f'developer:\n\n{unique_data_label}\n')
# remove the integer at the end of the desc-* variant, we will get
# the unique pipe_idx from the CpacProvenance below
data_label = data_label.replace(desc_val, only_desc)
# preserve cpac provenance/pipe_idx
pipe_idx = rpool.generate_prov_string(json_info['CpacProvenance'])
node_name = ""
else:
pipe_idx = ""
node_name = f"{data_label}_ingress"
resource = data_label
ingress = create_general_datasource(f'gather_{unique_data_label}')
ingress.inputs.inputnode.set(
unique_id=unique_id,
data=filepath,
creds_path=creds_path,
dl_dir=cfg.pipeline_setup['working_directory']['path']
)
rpool.set_data(resource, ingress, 'outputspec.data', json_info,
pipe_idx, node_name, inject=True)
return rpool
def ingress_pipeconfig_paths(cfg, rpool, unique_id, creds_path=None):
# ingress config file paths
# TODO: pull this from some external list instead
# TODO: nah, even better: just loop through the config for .nii's
# TODO: may want to change the resource keys for each to include one level up in the YAML as well
templates_for_resampling = [
(cfg.registration_workflows['anatomical_registration']['resolution_for_anat'], cfg.registration_workflows['anatomical_registration']['T1w_brain_template'], 'T1w_brain_template', 'resolution_for_anat'),
(cfg.registration_workflows['anatomical_registration']['resolution_for_anat'], cfg.registration_workflows['anatomical_registration']['T1w_template'], 'T1w_template', 'resolution_for_anat'),
(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_preproc_outputs'], cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_brain_template_funcreg'], 'T1w_brain_template_funcreg', 'func_preproc_outputs'),
(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_derivative_outputs'], cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_brain_template_funcreg'], 'T1w_brain_template_deriv', 'func_derivative_outputs'),
(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_preproc_outputs'], cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_template_funcreg'], 'T1w_template_funcreg', 'func_preproc_outputs'),
(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_derivative_outputs'], cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_template_funcreg'], 'T1w_template_deriv', 'func_derivative_outputs'),
(cfg.registration_workflows['anatomical_registration']['resolution_for_anat'], cfg.voxel_mirrored_homotopic_connectivity['symmetric_registration']['T1w_brain_template_symmetric'], 'T1w_brain_template_symmetric', 'resolution_for_anat'),
(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_preproc_outputs'], cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_template_funcreg'], 'T1w_brain_template_symmetric_deriv', 'func_derivative_outputs'),
(cfg.registration_workflows['anatomical_registration']['resolution_for_anat'], cfg.voxel_mirrored_homotopic_connectivity['symmetric_registration']['T1w_template_symmetric'], 'T1w_template_symmetric', 'resolution_for_anat'),
(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_preproc_outputs'], cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_brain_template_funcreg'], 'T1w_template_symmetric_deriv', 'func_derivative_outputs'),
(cfg.registration_workflows['anatomical_registration']['resolution_for_anat'], cfg.voxel_mirrored_homotopic_connectivity['symmetric_registration']['dilated_symmetric_brain_mask'], 'template_dilated_symmetric_brain_mask', 'resolution_for_anat'),
(cfg.registration_workflows['anatomical_registration']['resolution_for_anat'], cfg.registration_workflows['anatomical_registration']['registration']['FSL-FNIRT']['ref_mask'], 'template_ref_mask', 'resolution_for_anat'),
(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_preproc_outputs'], cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_brain_template_funcreg'], 'T1w_brain_template_funcreg', 'func_preproc_outputs'),
(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_preproc_outputs'], cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_template_funcreg'], 'T1w_template_funcreg', 'func_preproc_outputs'),
(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_preproc_outputs'], cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['EPI_template']['EPI_template_funcreg'], 'EPI_template_funcreg', 'func_preproc_outputs'), # no difference of skull and only brain
(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_derivative_outputs'], cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['EPI_template']['EPI_template_funcreg'], 'EPI_template_deriv', 'func_derivative_outputs'), # no difference of skull and only brain
(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_derivative_outputs'], cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_brain_template_funcreg'], 'T1w_brain_template_deriv', 'func_derivative_outputs'),
(cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_derivative_outputs'], cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_template_funcreg'], 'T1w_template_deriv', 'func_derivative_outputs')
]
if cfg.PyPEER['run']:
templates_for_resampling.append((cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution']['func_preproc_outputs'], cfg.PyPEER['eye_mask_path'], 'template_eye_mask', 'func_preproc_outputs'))
#Outputs.any.append("template_eye_mask")
# update resampled template to resource pool
for resolution, template, template_name, tag in templates_for_resampling:
if not template:
continue
if '$FSLDIR' in template:
template = template.replace('$FSLDIR', cfg.pipeline_setup[
'system_config']['FSLDIR'])
#if '${resolution_for_anat}' in template:
# template = template.replace('${resolution_for_anat}',
# cfg.registration_workflows[
# 'anatomical_registration'][
# 'resolution_for_anat'])
if '${func_resolution}' in template:
template = template.replace('func_resolution', tag)
resampled_template = pe.Node(Function(input_names=['resolution',
'template',
'template_name',
'tag'],
output_names=['resampled_template'],
function=resolve_resolution,
as_module=True),
name='resampled_' + template_name)
resampled_template.inputs.resolution = resolution
resampled_template.inputs.template = template
resampled_template.inputs.template_name = template_name
resampled_template.inputs.tag = tag
# the set_data below is set up a little differently, because we are
# injecting and also over-writing already-existing entries
# other alternative would have been to ingress into the
# resampled_template node from the already existing entries, but we
# didn't do that here
rpool.set_data(template_name,
resampled_template,
'resampled_template',
#{'CpacProvenance': [f'{template_name}:{template_name}_config_ingress']},
#f"['{template_name}:{template_name}_config_ingress']",
{}, "",
"template_resample") #, inject=True) # pipe_idx (after the blank json {}) should be the previous strat that you want deleted! because you're not connecting this the regular way, you have to do it manually
config_resource_paths = [
('CSF_path', cfg.segmentation['tissue_segmentation']['FSL-FAST']['use_priors']['CSF_path']),
('WM_path', cfg.segmentation['tissue_segmentation']['FSL-FAST']['use_priors']['WM_path']),
('GM_path', cfg.segmentation['tissue_segmentation']['FSL-FAST']['use_priors']['GM_path']),
('T1w_ACPC_template', cfg.anatomical_preproc['acpc_alignment']['T1w_ACPC_template']),
('T1w_brain_ACPC_template', cfg.anatomical_preproc['acpc_alignment']['T1w_brain_ACPC_template']),
('unet_model', cfg.anatomical_preproc['brain_extraction']['UNet']['unet_model']),
('T1w_brain_template', cfg.registration_workflows['anatomical_registration']['T1w_brain_template']),
('T1w_template', cfg.registration_workflows['anatomical_registration']['T1w_template']),
('T1w_brain_template_mask', cfg.registration_workflows['anatomical_registration']['T1w_brain_template_mask']),
('T1w_brain_template_symmetric', cfg.voxel_mirrored_homotopic_connectivity['symmetric_registration']['T1w_brain_template_symmetric']),
('T1w_template_symmetric', cfg.voxel_mirrored_homotopic_connectivity['symmetric_registration']['T1w_template_symmetric']),
('dilated_symmetric_brain_mask', cfg.voxel_mirrored_homotopic_connectivity['symmetric_registration']['dilated_symmetric_brain_mask']),
('T1w_brain_template_symmetric_for_resample', cfg.voxel_mirrored_homotopic_connectivity['symmetric_registration']['T1w_brain_template_symmetric_for_resample']),
('T1w_template_symmetric_for_resample', cfg.voxel_mirrored_homotopic_connectivity['symmetric_registration']['T1w_template_symmetric_for_resample']),
('dilated_symmetric_brain_mask_for_resample', cfg.voxel_mirrored_homotopic_connectivity['symmetric_registration']['dilated_symmetric_brain_mask_for_resample']),
('ref_mask', cfg.registration_workflows['anatomical_registration']['registration']['FSL-FNIRT']['ref_mask']),
('T1w_template_for_resample', cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_template_for_resample']),
('EPI_template_for_resample', cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['EPI_template']['EPI_template_for_resample']),
('T1w_brain_template_funcreg', cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_brain_template_funcreg']),
('T1w_brain_template_deriv', cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_brain_template_funcreg']),
('T1w_template_funcreg', cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_template_funcreg']),
('T1w_template_deriv', cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_template_funcreg']),
('T1w_brain_template_symmetric_deriv', cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_brain_template_funcreg']),
('T1w_template_symmetric_deriv', cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['T1_template']['T1w_template_funcreg']),
('EPI_template_funcreg', cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['EPI_template']['EPI_template_funcreg']),
('EPI_template_deriv', cfg.registration_workflows['functional_registration']['func_registration_to_template']['target_template']['EPI_template']['EPI_template_funcreg']),
('EPI_template', cfg.registration_workflows['functional_registration']['EPI_registration']['EPI_template']),
('EPI_template_mask', cfg.registration_workflows['functional_registration']['EPI_registration']['EPI_template_mask']),
('lateral_ventricles_mask', cfg.nuisance_corrections['2-nuisance_regression']['lateral_ventricles_mask']),
('template_specification_file', cfg.network_centrality['template_specification_file'])
]
if cfg.PyPEER['run']:
config_resource_paths.append(
('eye_mask_path', cfg.PyPEER['eye_mask_path']))
for resource in config_resource_paths:
key = resource[0]
val = resource[1]
if rpool.check_rpool(key):
continue
if not val:
continue
if '$FSLDIR' in val:
val = val.replace('$FSLDIR', cfg.pipeline_setup['system_config']['FSLDIR'])
if '$priors_path' in val:
priors_path = cfg.segmentation['tissue_segmentation']['FSL-FAST']['use_priors']['priors_path']
if '$FSLDIR' in priors_path:
priors_path = priors_path.replace('$FSLDIR', cfg.pipeline_setup['system_config']['FSLDIR'])
val = val.replace('$priors_path', priors_path)
if '${resolution_for_anat}' in val:
val = val.replace('${resolution_for_anat}', cfg.registration_workflows['anatomical_registration']['resolution_for_anat'])
if '${func_resolution}' in val:
# functional registration
if 'funcreg' in key:
out_res = 'func_preproc_outputs'
# functional derivatives
else:
out_res = 'func_derivative_outputs'
val = val.replace('${func_resolution}', cfg.registration_workflows['functional_registration']['func_registration_to_template']['output_resolution'][out_res])
if val:
config_ingress = create_general_datasource(f'gather_{key}')
config_ingress.inputs.inputnode.set(
unique_id=unique_id,
data=val,
creds_path=creds_path,
dl_dir=cfg.pipeline_setup['working_directory']['path']
)
rpool.set_data(key, config_ingress, 'outputspec.data', {}, "",
f"{key}_config_ingress")
# templates, resampling from config
'''
template_keys = [
("anat", ["network_centrality", "template_specification_file"]),
("anat", ["nuisance_corrections", "2-nuisance_regression",
"lateral_ventricles_mask"]),
("anat",
["segmentation", "tissue_segmentation", "FSL-FAST", "use_priors",
"CSF_path"]),
("anat",
["segmentation", "tissue_segmentation", "FSL-FAST", "use_priors",
"GM_path"]),
("anat",
["segmentation", "tissue_segmentation", "FSL-FAST", "use_priors",
"WM_path"]),
("anat",
["segmentation", "tissue_segmentation", "Template_Based", "CSF"]),
("anat",
["segmentation", "tissue_segmentation", "Template_Based", "GRAY"]),
("anat",
["segmentation", "tissue_segmentation", "Template_Based", "WHITE"]),
("anat", ["anatomical_preproc", "acpc_alignment", "T1w_ACPC_template"]),
("anat", ["anatomical_preproc", "acpc_alignment", "T1w_brain_ACPC_template"])]
def get_nested_attr(c, template_key):
attr = getattr(c, template_key[0])
keys = template_key[1:]
def _get_nested(attr, keys):
if len(keys) > 1:
return (_get_nested(attr[keys[0]], keys[1:]))
elif len(keys):
return (attr[keys[0]])
else:
return (attr)
return (_get_nested(attr, keys))
def set_nested_attr(c, template_key, value):
attr = getattr(c, template_key[0])
keys = template_key[1:]
def _set_nested(attr, keys):
if len(keys) > 1:
return (_set_nested(attr[keys[0]], keys[1:]))
elif len(keys):
attr[keys[0]] = value
else:
return (attr)
return (_set_nested(attr, keys))
for key_type, key in template_keys:
attr = cfg.get_nested(cfg, key)
if isinstance(attr, str) or attr == None:
node = create_check_for_s3_node(
key[-1],
attr, key_type,
data_paths['creds_path'],
cfg.pipeline_setup['working_directory']['path'],
map_node=False
)
cfg.set_nested(cfg, key, node)
template_keys_in_list = [
("anat",
["segmentation", "tissue_segmentation", "ANTs_Prior_Based",
"template_brain_list"]),
("anat",
["segmentation", "tissue_segmentation", "ANTs_Prior_Based",
"template_segmentation_list"]),
]
for key_type, key in template_keys_in_list:
node = create_check_for_s3_node(
key[-1],
cfg.get_nested(cfg, key), key_type,
data_paths['creds_path'],
cfg.pipeline_setup['working_directory']['path'],
map_node=True
)
cfg.set_nested(cfg, key, node)
'''
return rpool
def initiate_rpool(wf, cfg, data_paths=None, part_id=None):
'''
data_paths format:
{'anat': '{T1w path}',
'creds_path': {None OR path to credentials CSV},
'func': {
'{scan ID}':
{
'scan': '{path to BOLD}',
'scan_parameters': {scan parameter dictionary}
}
},
'site_id': 'site-ID',
'subject_id': 'sub-01',
'unique_id': 'ses-1'}
'''
# TODO: refactor further, integrate with the ingress_data functionality
# TODO: used for BIDS-Derivatives (below), and possible refactoring of
# TODO: the raw data config to use 'T1w' label instead of 'anat' etc.
if data_paths:
part_id = data_paths['subject_id']
ses_id = data_paths['unique_id']
if 'creds_path' not in data_paths:
creds_path = None
else:
creds_path = data_paths['creds_path']
unique_id = f'{part_id}_{ses_id}'
elif part_id:
unique_id = part_id
creds_path = None
rpool = ResourcePool(name=unique_id, cfg=cfg)
if data_paths:
rpool = ingress_raw_anat_data(wf, rpool, cfg, data_paths, unique_id,
part_id, ses_id)
wf, rpool, diff, blip, fmap_rp_list = \
ingress_raw_func_data(wf, rpool, cfg, data_paths, unique_id,
part_id, ses_id)
# grab already-processed data from the output directory
rpool = ingress_output_dir(cfg, rpool, unique_id, creds_path)
# grab any file paths from the pipeline config YAML
rpool = ingress_pipeconfig_paths(cfg, rpool, unique_id, creds_path)
return (wf, rpool)
def run_node_blocks(blocks, data_paths, cfg=None):
import os
from CPAC.pipeline import nipype_pipeline_engine as pe
from CPAC.utils.strategy import NodeBlock
if not cfg:
cfg = {
'pipeline_setup': {
'working_directory': {
'path': os.getcwd()
},
'log_directory': {
'path': os.getcwd()
}
}
}
# TODO: WE HAVE TO PARSE OVER UNIQUE ID'S!!!
rpool = initiate_rpool(cfg, data_paths)
wf = pe.Workflow(name=f'node_blocks')
wf.base_dir = cfg.pipeline_setup['working_directory']['path']
wf.config['execution'] = {
'hash_method': 'timestamp',
'crashdump_dir': cfg.pipeline_setup['log_directory']['path']
}
run_blocks = []
if rpool.check_rpool('desc-preproc_T1w'):
print("Preprocessed T1w found, skipping anatomical preprocessing.")
else:
run_blocks += blocks[0]
if rpool.check_rpool('desc-preproc_bold'):
print("Preprocessed BOLD found, skipping functional preprocessing.")
else:
run_blocks += blocks[1]
for block in run_blocks:
wf = NodeBlock(block).connect_block(wf, cfg, rpool)
rpool.gather_pipes(wf, cfg)
wf.run()
| bsd-3-clause | -252,832,068,145,758,400 | 48.858065 | 382 | 0.515161 | false |
amrdraz/brython | www/tests/test_set.py | 9 | 1178 | x = set(['a','r','bg','Z'])
assert x==set(['bg','Z','a','r'])
assert len(x)==4
x.add('tail')
assert len(x)==5
x.add('tail')
assert len(x)==5
assert 'r' in x
assert 'rty' not in x
y = set([1,2,3])
assert x.isdisjoint(y)
y.add('r')
assert not x.isdisjoint(y)
z = set(['a','r'])
assert z.issubset(x)
assert z <= x
assert z < x
assert x.issuperset(z)
assert x >= z
assert x > z
assert not z.issubset(y)
assert z|y == {'a','r',1,2,3}
assert z.union(y) == {'a','r',1,2,3}
assert x&y=={'r'}
assert x.intersection(y)=={'r'}
assert x-y=={'a','bg','Z','tail'}
assert z^y == {'a',1,2,3}
x.remove('tail')
assert x=={'a','r','bg','Z'}
x.discard('azerty')
assert x=={'a','r','bg','Z'}
x.discard('a')
assert x=={'r','bg','Z'}
z.pop()
assert z=={'a'}
z.pop()
try:z.pop()
except KeyError:pass
x.clear()
assert len(x)==0
x = frozenset(['a','r','bg','Z'])
assert str(x).startswith("frozenset({")
assert x==set(['bg','Z','a','r'])
assert len(x)==4
try:
x.add('tail')
except AttributeError:
pass
assert len(x)==4
assert 'r' in x
assert 'rty' not in x
class foo(set):
def show(self):
return 'show'
x = foo([1,2])
assert x.show()=='show'
print("passed all tests..")
| bsd-3-clause | 6,604,613,360,345,172,000 | 15.361111 | 39 | 0.567912 | false |
cylc/cylc | cylc/flow/parsec/validate.py | 1 | 34282 | # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Validate a nested dict parsed from a config file against a spec file.
Check all items are legal.
Check all values are legal (type; min, max, allowed options).
Coerce value type from string (to int, float, list, etc.).
Coerce more value type from string (to time point, duration, xtriggers, etc.).
Also provides default values from the spec as a nested dict.
"""
import re
import shlex
from collections import deque
from textwrap import dedent
from metomi.isodatetime.data import Duration, TimePoint, Calendar
from metomi.isodatetime.dumpers import TimePointDumper
from metomi.isodatetime.parsers import TimePointParser, DurationParser
from metomi.isodatetime.exceptions import IsodatetimeError
from cylc.flow.parsec.exceptions import (
ListValueError, IllegalValueError, IllegalItemError)
from cylc.flow.subprocctx import SubFuncContext
class ParsecValidator:
"""Type validator and coercer for configurations.
Attributes:
.coercers (dict):
Map value type keys with coerce methods.
"""
# quoted value regex reference:
# http://stackoverflow.com/questions/5452655/
# python-regex-to-match-text-in-single-quotes-
# ignoring-escaped-quotes-and-tabs-n
# quoted list values not at end of line
_REC_SQ_L_VALUE = re.compile(r"'([^'\\]*(?:\\.[^'\\]*)*)'")
_REC_DQ_L_VALUE = re.compile(r'"([^"\\]*(?:\\.[^"\\]*)*)"')
# quoted values with ignored trailing comments
_REC_SQ_VALUE = re.compile(r"'([^'\\]*(?:\\.[^'\\]*)*)'(?:\s*(?:#.*)?)?$")
_REC_DQ_VALUE = re.compile(r'"([^"\\]*(?:\\.[^"\\]*)*)"(?:\s*(?:#.*)?)?$')
_REC_UQLP = re.compile(r"""(['"]?)(.*?)\1(,|$)""")
_REC_SQV = re.compile(r"((?:^[^']*(?:'[^']*')*[^']*)*)(#.*)$")
_REC_DQV = re.compile('((?:^[^"]*(?:"[^"]*")*[^"]*)*)(#.*)$')
# quoted multi-line values
_REC_MULTI_LINE_SINGLE = re.compile(
r"\A'''(.*?)'''\s*(?:#.*)?\Z", re.MULTILINE | re.DOTALL)
_REC_MULTI_LINE_DOUBLE = re.compile(
r'\A"""(.*?)"""\s*(?:#.*)?\Z', re.MULTILINE | re.DOTALL)
# integer range syntax START..END[..STEP]
_REC_INT_RANGE = re.compile(
r'\A([+\-]?\d+)\s*\.\.\s*([+\-]?\d+)(?:\s*\.\.\s*(\d+))?\Z')
# Parameterized names containing at least one comma.
_REC_MULTI_PARAM = re.compile(r'<[\w]+,.*?>')
SELF_REFERENCE_PATTERNS = ['localhost', '127.0.0.1', '0.0.0.0']
# Value type constants
V_BOOLEAN = 'V_BOOLEAN'
V_FLOAT = 'V_FLOAT'
V_FLOAT_LIST = 'V_FLOAT_LIST'
V_INTEGER = 'V_INTEGER'
V_INTEGER_LIST = 'V_INTEGER_LIST'
V_STRING = 'V_STRING'
V_STRING_LIST = 'V_STRING_LIST'
V_SPACELESS_STRING_LIST = 'V_SPACELESS_STRING_LIST'
V_ABSOLUTE_HOST_LIST = 'V_ABSOLUTE_HOST_LIST'
V_TYPE_HELP = {
# V_TYPE: (quick_name, help_string, examples_list, see_also)
V_BOOLEAN: (
'boolean',
'A boolean in Python format',
['True', 'False']
),
V_FLOAT: (
'float',
'A number in integer, decimal or exponential format',
['1', '1.1', '1.1e11']
),
V_FLOAT_LIST: (
'float list',
'A comma separated list of floats.',
['1, 1.1, 1.1e11']
),
V_INTEGER: (
'integer',
'An integer.',
['1', '2', '3']
),
V_INTEGER_LIST: (
'integer list',
'A comma separated list of integers.',
['1, 2, 3']
),
V_STRING: (
'string',
'Plain text.',
['Hello World!']
),
V_STRING_LIST: (
'list',
'A comma separated list of strings.',
['a, b c, d']
),
V_SPACELESS_STRING_LIST: (
'spaceless list',
'A comma separated list of strings which cannot contain spaces.',
['a, b, c']
),
V_ABSOLUTE_HOST_LIST: (
'absolute host list',
'A comma separated list of hostnames which does not contain '
'any self references '
f'(i.e. does not contain {", ".join(SELF_REFERENCE_PATTERNS)})',
['foo', 'bar', 'baz']
)
}
def __init__(self):
self.coercers = {
self.V_BOOLEAN: self.coerce_boolean,
self.V_FLOAT: self.coerce_float,
self.V_FLOAT_LIST: self.coerce_float_list,
self.V_INTEGER: self.coerce_int,
self.V_INTEGER_LIST: self.coerce_int_list,
self.V_STRING: self.coerce_str,
self.V_STRING_LIST: self.coerce_str_list,
self.V_SPACELESS_STRING_LIST: self.coerce_spaceless_str_list,
self.V_ABSOLUTE_HOST_LIST: self.coerce_absolute_host_list
}
def validate(self, cfg_root, spec_root):
"""Validate and coerce a nested dict against a parsec spec.
Args:
cfg_root (dict):
A nested dict representing the raw configuration.
spec_root (dict):
A nested dict containing the spec for the configuration.
Raises:
IllegalItemError: on bad configuration items.
IllegalValueError: on bad configuration values.
"""
queue = deque([[cfg_root, spec_root, []]])
while queue:
# Walk items, breadth first
cfg, spec, keys = queue.popleft()
for key, value in cfg.items():
if key not in spec:
if '__MANY__' not in spec:
raise IllegalItemError(keys, key)
else:
# only accept the item if its value is of the same type
# as that of the __MANY__ item, i.e. dict or not-dict.
val_is_dict = isinstance(value, dict)
spc_is_dict = not spec['__MANY__'].is_leaf()
if (
keys != ['scheduling', 'graph'] and
not val_is_dict and
' ' in key
):
# Item names shouldn't have consecutive spaces
# (GitHub #2417)
raise IllegalItemError(
keys, key, 'consecutive spaces')
if ((val_is_dict and spc_is_dict) or
(not val_is_dict and not spc_is_dict)):
speckey = '__MANY__'
else:
raise IllegalItemError(keys, key)
else:
speckey = key
specval = spec[speckey]
if isinstance(value, dict) and not specval.is_leaf():
# Item is dict, push to queue
queue.append([value, specval, keys + [key]])
elif value is not None and specval.is_leaf():
# Item is value, coerce according to value type
cfg[key] = self.coercers[specval.vdr](value, keys + [key])
if specval.options:
voptions = specval.options
if (isinstance(cfg[key], list) and
any(val not in voptions for val in cfg[key]) or
not isinstance(cfg[key], list) and
cfg[key] not in voptions):
raise IllegalValueError(
'option', keys + [key], cfg[key])
__call__ = validate
@classmethod
def coerce_boolean(cls, value, keys):
"""Coerce value to a boolean.
Examples:
>>> ParsecValidator.coerce_boolean('True', None)
True
>>> ParsecValidator.coerce_boolean('true', None)
True
"""
value = cls.strip_and_unquote(keys, value)
if value in ['True', 'true']:
return True
elif value in ['False', 'false']:
return False
elif value in ['', None]:
return None
else:
raise IllegalValueError('boolean', keys, value)
@classmethod
def coerce_float(cls, value, keys):
"""Coerce value to a float.
Examples:
>>> ParsecValidator.coerce_float('1', None)
1.0
>>> ParsecValidator.coerce_float('1.1', None)
1.1
>>> ParsecValidator.coerce_float('1.1e1', None)
11.0
"""
value = cls.strip_and_unquote(keys, value)
if value in ['', None]:
return None
try:
return float(value)
except ValueError:
raise IllegalValueError('float', keys, value)
@classmethod
def coerce_float_list(cls, value, keys):
"""Coerce list values with optional multipliers to float.
Examples:
>>> ParsecValidator.coerce_float_list('1, 1.1, 1.1e1', None)
[1.0, 1.1, 11.0]
"""
values = cls.strip_and_unquote_list(keys, value)
return cls.expand_list(values, keys, float)
@classmethod
def coerce_int(cls, value, keys):
"""Coerce value to an integer.
Examples:
>>> ParsecValidator.coerce_int('1', None)
1
"""
value = cls.strip_and_unquote(keys, value)
if value in ['', None]:
return None
try:
return int(value)
except ValueError:
raise IllegalValueError('int', keys, value)
@classmethod
def coerce_int_list(cls, value, keys):
"""Coerce list values with optional multipliers to integer.
Examples:
>>> ParsecValidator.coerce_int_list('1, 2, 3', None)
[1, 2, 3]
"""
items = []
for item in cls.strip_and_unquote_list(keys, value):
values = cls.parse_int_range(item)
if values is None:
items.extend(cls.expand_list([item], keys, int))
else:
items.extend(values)
return items
@classmethod
def coerce_str(cls, value, keys):
"""Coerce value to a string.
Examples:
>>> ParsecValidator.coerce_str('abc', None)
'abc'
>>> ParsecValidator.coerce_str(['abc', 'def'], None)
'abc\\ndef'
"""
if isinstance(value, list):
# handle graph string merging
vraw = []
vals = [value]
while vals:
val = vals.pop()
if isinstance(val, list):
vals.extend(reversed(val)) # reverse to preserve order
else:
vraw.append(cls.strip_and_unquote(keys, val))
value = '\n'.join(vraw)
else:
value = cls.strip_and_unquote(keys, value)
return value
@classmethod
def coerce_str_list(cls, value, keys):
"""Coerce value to a list of strings.
>>> ParsecValidator.coerce_str_list('a, b, c', None)
['a', 'b', 'c']
>>> ParsecValidator.coerce_str_list('a, b c , d', None)
['a', 'b c', 'd']
"""
return cls.strip_and_unquote_list(keys, value)
@classmethod
def coerce_spaceless_str_list(cls, value, keys):
"""Coerce value to a list of strings ensuring no values contain spaces.
Examples:
>>> ParsecValidator.coerce_spaceless_str_list(
... 'a, b, c', None)
['a', 'b', 'c']
>>> ParsecValidator.coerce_spaceless_str_list(
... 'a, b c, d', ['foo']) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
cylc.flow.parsec.exceptions.ListValueError: \
(type=list) foo = a, b c, d - \
(list item "b c" cannot contain a space character)
"""
lst = cls.strip_and_unquote_list(keys, value)
for item in lst:
if ' ' in item:
raise ListValueError(
keys, value,
msg='list item "%s" cannot contain a space character' %
item)
return lst
@classmethod
def coerce_absolute_host_list(cls, value, keys):
"""Do not permit self reference in host names.
Examples:
>>> ParsecValidator.coerce_absolute_host_list(
... 'foo, bar, baz', None)
['foo', 'bar', 'baz']
>>> ParsecValidator.coerce_absolute_host_list(
... 'foo, bar, 127.0.0.1:8080, baz', ['pub']
... ) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
cylc.flow.parsec.exceptions.ListValueError: \
(type=list) pub = foo, bar, 127.0.0.1:8080, baz - \
(ambiguous host "127.0.0.1:8080")
"""
hosts = cls.coerce_spaceless_str_list(value, keys)
for host in hosts:
if any(host.startswith(pattern)
for pattern in cls.SELF_REFERENCE_PATTERNS):
raise ListValueError(
keys, value, msg='ambiguous host "%s"' % host)
return hosts
@classmethod
def expand_list(cls, values, keys, type_):
"""Handle multiplier syntax N*VALUE in a list.
Examples:
>>> ParsecValidator.expand_list(['1', '2*3'], None, int)
[1, 3, 3]
"""
lvalues = []
for item in values:
try:
mult, val = item.split('*', 1)
except ValueError:
# too few values to unpack: no multiplier
try:
lvalues.append(type_(item))
except ValueError as exc:
raise IllegalValueError('list', keys, item, exc=exc)
else:
# mult * val
try:
lvalues += int(mult) * [type_(val)]
except ValueError as exc:
raise IllegalValueError('list', keys, item, exc=exc)
return lvalues
@classmethod
def parse_int_range(cls, value):
"""Parse a value containing an integer range START..END[..STEP].
Return (list):
A list containing the integer values in range,
or None if value does not contain an integer range.
Examples:
>>> ParsecValidator.parse_int_range('1..3')
[1, 2, 3]
"""
match = cls._REC_INT_RANGE.match(value)
if match:
lower, upper, step = match.groups()
if not step:
step = 1
return list(range(int(lower), int(upper) + 1, int(step)))
else:
return None
@classmethod
def strip_and_unquote(cls, keys, value):
"""Remove leading and trailing spaces and unquote value.
Args:
keys (list):
Keys in nested dict that represents the raw configuration.
value (str):
String value in raw configuration.
Return (str):
Processed value.
Examples:
>>> ParsecValidator.strip_and_unquote(None, '" foo "')
'foo'
"""
for substr, rec in [
["'''", cls._REC_MULTI_LINE_SINGLE],
['"""', cls._REC_MULTI_LINE_DOUBLE],
['"', cls._REC_DQ_VALUE],
["'", cls._REC_SQ_VALUE]]:
if value.startswith(substr):
match = rec.match(value)
if match:
value = match.groups()[0]
else:
raise IllegalValueError("string", keys, value)
break
else:
# unquoted
value = value.split(r'#', 1)[0]
# Note strip() removes leading and trailing whitespace, including
# initial newlines on a multiline string:
return dedent(value).strip()
@classmethod
def strip_and_unquote_list(cls, keys, value):
"""Remove leading and trailing spaces and unquote list value.
Args:
keys (list):
Keys in nested dict that represents the raw configuration.
value (str):
String value in raw configuration that is supposed to be a
comma separated list.
Return (list):
Processed value as a list.
Examples:
>>> ParsecValidator.strip_and_unquote_list(None, ' 1 , "2", 3')
['1', '"2"', '3']
>>> ParsecValidator.strip_and_unquote_list(None, '" 1 , 2", 3')
['1 , 2', '3']
"""
if value.startswith('"') or value.startswith("'"):
lexer = shlex.shlex(value, posix=True, punctuation_chars=",")
lexer.commenters = '#'
lexer.whitespace_split = False
lexer.whitespace = "\t\n\r"
lexer.wordchars += " "
values = [t.strip() for t in lexer if t != "," and t.strip()]
else:
# unquoted values (may contain internal quoted strings with list
# delimiters inside 'em!)
for quotation, rec in (('"', cls._REC_DQV), ("'", cls._REC_SQV)):
if quotation in value:
match = rec.match(value)
if match:
value = match.groups()[0]
break
else:
value = value.split(r'#', 1)[0].strip()
values = list(cls._unquoted_list_parse(keys, value))
# allow trailing commas
if values[-1] == '':
values = values[0:-1]
return values
@classmethod
def _unquoted_list_parse(cls, keys, value):
"""Split comma separated list, and unquote each value.
Examples:
>>> list(ParsecValidator._unquoted_list_parse(None, '"1", 2'))
['1', '2']
"""
# http://stackoverflow.com/questions/4982531/
# how-do-i-split-a-comma-delimited-string-in-python-except-
# for-the-commas-that-are
# First detect multi-parameter lists like <m,n>.
if cls._REC_MULTI_PARAM.search(value):
raise ListValueError(
keys, value,
msg="names containing commas must be quoted"
"(e.g. 'foo<m,n>')")
pos = 0
while True:
match = cls._REC_UQLP.search(value, pos)
result = match.group(2).strip()
separator = match.group(3)
yield result
if not separator:
break
pos = match.end(0)
def parsec_validate(cfg_root, spec_root):
"""Short for "ParsecValidator().validate(...)"."""
return ParsecValidator().validate(cfg_root, spec_root)
class DurationFloat(float):
"""Duration in floating point seconds, but stringify as ISO8601 format."""
def __str__(self):
return str(Duration(seconds=self, standardize=True))
class CylcConfigValidator(ParsecValidator):
"""Type validator and coercer for Cylc configurations.
Attributes:
.coercers (dict):
Map value type keys with coerce methods.
"""
# Parameterized names containing at least one comma.
_REC_NAME_SUFFIX = re.compile(r'\A[\w\-+%@]+\Z')
_REC_TRIG_FUNC = re.compile(r'(\w+)\((.*)\)(?::(\w+))?')
# Value type constants
V_CYCLE_POINT = 'V_CYCLE_POINT'
V_CYCLE_POINT_FORMAT = 'V_CYCLE_POINT_FORMAT'
V_CYCLE_POINT_TIME_ZONE = 'V_CYCLE_POINT_TIME_ZONE'
V_INTERVAL = 'V_INTERVAL'
V_INTERVAL_LIST = 'V_INTERVAL_LIST'
V_PARAMETER_LIST = 'V_PARAMETER_LIST'
V_XTRIGGER = 'V_XTRIGGER'
V_TYPE_HELP = {
# V_TYPE: (quick_name, help_string, examples_list, see_also)
V_CYCLE_POINT: (
'cycle point',
'An integer or date-time cycle point as appropriate.',
{
'1': 'An integer cycle point.',
'2000-01-01T00:00Z': 'A date-time cycle point.',
'now': 'The current date-time.',
'next(T-00)':
'The current date-time rounded up to the nearest'
' whole hour.'
},
[
('std:term', 'cycle point'),
('std:term', 'ISO8601 duration')
]
),
V_CYCLE_POINT_FORMAT: (
'cycle point format',
'An time format for date-time cycle points in ``isodatetime`` '
'"print" or "parse" format. '
'See ``isodatetime --help`` for more information.',
{
'CCYYMM': '``isodatetime`` print format.',
'%Y%m': '``isodatetime`` parse format.'
}
),
V_CYCLE_POINT_TIME_ZONE: (
'cycle point time zone',
'A time zone for date-time cycle points in ISO8601 format.',
{
'Z': 'UTC / GMT.',
'+13': 'UTC plus 13 hours.',
'-0830': 'UTC minus 8 hours and 30 minutes.'
}
),
V_INTERVAL: (
'time interval',
'An ISO8601 duration.',
{
'P1Y': 'Every year.',
'PT6H': 'Every six hours.'
},
[('std:term', 'ISO8601 duration')]
),
V_INTERVAL_LIST: (
'time interval list',
'A comma separated list of time intervals',
['P1Y, P2Y, P3Y'],
[('std:term', 'ISO8601 duration')]
),
V_PARAMETER_LIST: (
'parameter list',
'A comma separated list of Cylc parameter values. '
'This can include strings, integers and integer ranges.',
{
'foo, bar, baz': 'List of string parameters.',
'1, 2, 3': 'List of integer parameters.',
'1..3': 'The same as 1, 2, 3.',
'1..5..2': 'The same as 1, 3, 5.',
'1..5..2, 8': 'Range and integers can be mixed.',
},
[('ref', 'Parameterized Tasks Label')]
),
V_XTRIGGER: (
'xtrigger function signature',
'A function signature similar to how it would be written in '
'Python.\n'
'``<function>(<arg>, <kwarg>=<value>):<interval>``',
{
'mytrigger(42, cycle_point=%(point)):PT10S':
'Run function ``mytrigger`` every 10 seconds.'
},
[('ref', 'Section External Triggers')]
)
}
def __init__(self):
ParsecValidator.__init__(self)
self.coercers.update({
self.V_CYCLE_POINT: self.coerce_cycle_point,
self.V_CYCLE_POINT_FORMAT: self.coerce_cycle_point_format,
self.V_CYCLE_POINT_TIME_ZONE: self.coerce_cycle_point_time_zone,
self.V_INTERVAL: self.coerce_interval,
self.V_INTERVAL_LIST: self.coerce_interval_list,
self.V_PARAMETER_LIST: self.coerce_parameter_list,
self.V_XTRIGGER: self.coerce_xtrigger,
})
@classmethod
def coerce_cycle_point(cls, value, keys):
"""Coerce value to a cycle point.
Examples:
>>> CylcConfigValidator.coerce_cycle_point('2000', None)
'2000'
>>> CylcConfigValidator.coerce_cycle_point('now', None)
'now'
>>> CylcConfigValidator.coerce_cycle_point('next(T-00)', None)
'next(T-00)'
"""
if not value:
return None
value = cls.strip_and_unquote(keys, value)
if value == 'now':
# Handle this later in config.py when the suite UTC mode is known.
return value
if "next" in value or "previous" in value:
# Handle this later, as for "now".
return value
if value.isdigit():
# Could be an old date-time cycle point format, or integer format.
return value
if "P" not in value and (
value.startswith('-') or value.startswith('+')):
# We don't know the value given for num expanded year digits...
for i in range(1, 101):
try:
TimePointParser(num_expanded_year_digits=i).parse(value)
except IsodatetimeError:
continue
return value
raise IllegalValueError('cycle point', keys, value)
if "P" in value:
# ICP is an offset
parser = DurationParser()
try:
if value.startswith("-"):
# parser doesn't allow negative duration with this setup?
parser.parse(value[1:])
else:
parser.parse(value)
return value
except IsodatetimeError:
raise IllegalValueError("cycle point", keys, value)
try:
TimePointParser().parse(value)
except IsodatetimeError:
raise IllegalValueError('cycle point', keys, value)
return value
@classmethod
def coerce_cycle_point_format(cls, value, keys):
"""Coerce to a cycle point format.
Examples:
>>> CylcConfigValidator.coerce_cycle_point_format(
... 'CCYYMM', None)
'CCYYMM'
>>> CylcConfigValidator.coerce_cycle_point_format(
... '%Y%m', None)
'%Y%m'
"""
value = cls.strip_and_unquote(keys, value)
if not value:
return None
test_timepoint = TimePoint(year=2001, month_of_year=3, day_of_month=1,
hour_of_day=4, minute_of_hour=30,
second_of_minute=54)
if '/' in value:
raise IllegalValueError('cycle point format', keys, value)
if '%' in value:
try:
TimePointDumper().strftime(test_timepoint, value)
except IsodatetimeError:
raise IllegalValueError('cycle point format', keys, value)
return value
if 'X' in value:
for i in range(1, 101):
dumper = TimePointDumper(num_expanded_year_digits=i)
try:
dumper.dump(test_timepoint, value)
except IsodatetimeError:
continue
return value
raise IllegalValueError('cycle point format', keys, value)
dumper = TimePointDumper()
try:
dumper.dump(test_timepoint, value)
except IsodatetimeError:
raise IllegalValueError('cycle point format', keys, value)
return value
@classmethod
def coerce_cycle_point_time_zone(cls, value, keys):
"""Coerce value to a cycle point time zone format.
Examples:
>>> CylcConfigValidator.coerce_cycle_point_time_zone(
... 'Z', None)
'Z'
>>> CylcConfigValidator.coerce_cycle_point_time_zone(
... '+13', None)
'+13'
>>> CylcConfigValidator.coerce_cycle_point_time_zone(
... '-0800', None)
'-0800'
"""
value = cls.strip_and_unquote(keys, value)
if not value:
return None
test_timepoint = TimePoint(year=2001, month_of_year=3, day_of_month=1,
hour_of_day=4, minute_of_hour=30,
second_of_minute=54)
dumper = TimePointDumper()
test_timepoint_string = dumper.dump(test_timepoint, 'CCYYMMDDThhmmss')
test_timepoint_string += value
parser = TimePointParser(allow_only_basic=True)
try:
parser.parse(test_timepoint_string)
except ValueError: # not IsodatetimeError as too specific
raise IllegalValueError(
'cycle point time zone format', keys, value)
return value
@classmethod
def coerce_interval(cls, value, keys):
"""Coerce an ISO 8601 interval into seconds.
Examples:
>>> CylcConfigValidator.coerce_interval('PT1H', None)
3600.0
"""
value = cls.strip_and_unquote(keys, value)
if not value:
# Allow explicit empty values.
return None
try:
interval = DurationParser().parse(value)
except IsodatetimeError:
raise IllegalValueError("ISO 8601 interval", keys, value)
days, seconds = interval.get_days_and_seconds()
return DurationFloat(
days * Calendar.default().SECONDS_IN_DAY + seconds)
@classmethod
def coerce_interval_list(cls, value, keys):
"""Coerce a list of intervals into seconds.
Examples:
>>> CylcConfigValidator.coerce_interval_list('PT1H, PT2H', None)
[3600.0, 7200.0]
"""
return cls.expand_list(
cls.strip_and_unquote_list(keys, value),
keys,
lambda v: cls.coerce_interval(v, keys))
@classmethod
def coerce_parameter_list(cls, value, keys):
"""Coerce parameter list.
Args:
value (str):
This can be a list of str values. Each str value must conform
to the same restriction as a task name.
Otherwise, this can be a mixture of int ranges and int values.
keys (list):
Keys in nested dict that represents the raw configuration.
Return (list):
A list of strings or a list of sorted integers.
Raise:
IllegalValueError:
If value has both str and int range or if a str value breaks
the task name restriction.
Examples:
>>> CylcConfigValidator.coerce_parameter_list('1..4, 6', None)
[1, 2, 3, 4, 6]
>>> CylcConfigValidator.coerce_parameter_list('a, b, c', None)
['a', 'b', 'c']
"""
items = []
can_only_be = None # A flag to prevent mixing str and int range
for item in cls.strip_and_unquote_list(keys, value):
values = cls.parse_int_range(item)
if values is not None:
if can_only_be == str:
raise IllegalValueError(
'parameter', keys, value, 'mixing int range and str')
can_only_be = int
items.extend(values)
elif cls._REC_NAME_SUFFIX.match(item):
try:
int(item)
except ValueError:
if can_only_be == int:
raise IllegalValueError(
'parameter', keys, value,
'mixing int range and str')
can_only_be = str
items.append(item)
else:
raise IllegalValueError(
'parameter', keys, value, '%s: bad value' % item)
try:
return [int(item) for item in items]
except ValueError:
return items
@classmethod
def coerce_xtrigger(cls, value, keys):
"""Coerce a string into an xtrigger function context object.
func_name(*func_args, **func_kwargs)
Checks for legal string templates in arg values too.
Examples:
>>> CylcConfigValidator.coerce_xtrigger('a(b, c):PT1M', [None])
a(b, c):60.0
"""
label = keys[-1]
value = cls.strip_and_unquote(keys, value)
if not value:
raise IllegalValueError("xtrigger", keys, value)
fname = None
args = []
kwargs = {}
match = cls._REC_TRIG_FUNC.match(value)
if match is None:
raise IllegalValueError("xtrigger", keys, value)
fname, fargs, intvl = match.groups()
if intvl:
intvl = cls.coerce_interval(intvl, keys)
if fargs:
# Extract function args and kwargs.
for farg in fargs.split(r','):
try:
key, val = farg.strip().split(r'=', 1)
except ValueError:
args.append(cls._coerce_type(farg.strip()))
else:
kwargs[key.strip()] = cls._coerce_type(val.strip())
return SubFuncContext(label, fname, args, kwargs, intvl)
@classmethod
def _coerce_type(cls, value):
"""Convert value to int, float, or bool, if possible.
Examples:
>>> CylcConfigValidator._coerce_type('1')
1
>>> CylcConfigValidator._coerce_type('1.1')
1.1
>>> CylcConfigValidator._coerce_type('True')
True
>>> CylcConfigValidator._coerce_type('abc')
'abc'
"""
try:
val = int(value)
except ValueError:
try:
val = float(value)
except ValueError:
if value == 'False':
val = False
elif value == 'True':
val = True
else:
# Leave as string.
val = cls.strip_and_unquote([], value)
return val
def cylc_config_validate(cfg_root, spec_root):
"""Short for "CylcConfigValidator().validate(...)"."""
return CylcConfigValidator().validate(cfg_root, spec_root)
| gpl-3.0 | -5,937,824,959,578,395,000 | 34.451913 | 79 | 0.511872 | false |
eloquentstore/appimager | cli/install.py | 1 | 1599 | from cli import base
from core import data, container
import shutil
import os
import sys
import tarfile
from docker import Client
from urllib.request import urlretrieve
from cement.core.controller import CementBaseController, expose
class InstallController(CementBaseController):
class Meta:
label = 'install'
stacked_on = 'base'
@expose(help='Installs dependencies from an AppImage.yml file.')
def install(self):
data_obj = data.Data()
build_path = data_obj.get_build_path()
docker = Client()
if not os.path.exists(build_path):
print("Creating build directory")
os.mkdir(build_path)
container_name = data_obj.get_path_hash()
container_obj = container.Container(container_name)
print("Downloading app dependencies...")
deps = ""
for dep in data_obj.get_deps():
deps = deps + " " + dep
for line in container_obj.execute('rm -rf /tmp/debs && mkdir /tmp/debs && cd /tmp/debs && apt-get download ' + deps):
print(line, end="")
print('Decompressing dependencies...')
for line in container_obj.execute('ls -1 /tmp/debs | while read line ; do dpkg-deb -R /tmp/debs/$line /mnt/appimager/build ; done'):
print(line)
print('Configuring permissions...')
container_obj.execute('chown -R ' + str(os.getuid()) + ':' + str(os.getgid()) + ' /mnt/appimager/build')
shutil.rmtree('build/DEBIAN')
print('Writing lock file...')
data_obj.write_lock_file()
print("Complete")
| mit | -7,275,915,682,717,822,000 | 29.75 | 140 | 0.621639 | false |
ESOedX/edx-platform | common/test/acceptance/pages/lms/library.py | 2 | 1513 | """
Library Content XBlock Wrapper
"""
from __future__ import absolute_import
from bok_choy.page_object import PageObject
class LibraryContentXBlockWrapper(PageObject):
"""
A PageObject representing a wrapper around a LibraryContent block seen in the LMS
"""
url = None
BODY_SELECTOR = '.xblock-student_view div'
def __init__(self, browser, locator):
super(LibraryContentXBlockWrapper, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
"""
Checks if page is opened
"""
return self.q(css='{}[data-id="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular block's context
"""
return u'{}[data-id="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def children_contents(self):
"""
Gets contents of all child XBlocks as list of strings
"""
child_blocks = self.q(css=self._bounded_selector("div[data-id]"))
return frozenset(child.text for child in child_blocks)
@property
def children_headers(self):
"""
Gets headers of all child XBlocks as list of strings
"""
child_blocks_headers = self.q(css=self._bounded_selector("div[data-id] .problem-header"))
return frozenset(child.text for child in child_blocks_headers)
| agpl-3.0 | -250,246,660,050,396,380 | 29.26 | 97 | 0.615995 | false |
fieraloca/CODEPROJ | PYTHON/COMP_PHOTO/hw0/part0.py | 1 | 3516 | import sys
import os
import numpy as np
import cv2
def split_rgb(image):
'''Split the target image into its red, green and blue channels.
image - a numpy array of shape (rows, columns, 3).
output - three numpy arrays of shape (rows, columns) and dtype same as
image, containing the corresponding channels.
Please make sure the output shape has only 2 components!
For instance, (600, 800) instead of (600, 800, 1)
'''
red = None
green = None
blue = None
# Insert your code here.----------------------------------------------------
blue = image[:,:,0]
green = image[:,:,1]
red = image[:,:,2]
#---------------------------------------------------------------------------
return red, green, blue
def main():
''' This function applies your split script to images.
It will search through the images/part0 subfolder, and apply your splitting
function to each one. It will then save the resulting images.
'''
imagesfolder0 = os.path.abspath(os.path.join(os.curdir, 'images', 'part0'))
print '''Searching for images in {} folder
(will ignore red, green, or blue in the name)'''.format(imagesfolder0)
exts = ['.bmp', '.pbm', '.pgm', '.ppm', '.sr', '.ras', '.jpeg', '.jpg',
'.jpe', '.jp2', '.tiff', '.tif', '.png']
for dirname, dirnames, filenames in os.walk(imagesfolder0):
for filename in filenames:
name, ext = os.path.splitext(filename)
if ext in exts and 'red' not in name and 'green' not in name and \
'blue' not in name:
print "Splitting image {}.".format(filename)
img = cv2.imread(os.path.join(dirname, filename))
red, green, blue = split_rgb(img)
for values, color, channel in zip((red, green, blue),
('red', 'green', 'blue'), (2,1,0)):
img = np.zeros((values.shape[0], values.shape[1], 3),
dtype = values.dtype)
img[:,:,channel] = values
print "Writing image {}.".format(name+color+ext)
cv2.imwrite(os.path.join(dirname, name+color+ext), img)
def test():
'''This script will perform a unit test on your function, and provide useful
output.
'''
x = (np.random.rand(4,4,3) * 255).astype(np.uint8)
if __name__ == "__main__":
print "Input:\n{}".format(x)
usr_red, usr_green, usr_blue = split_rgb(x)
true_red = x[:,:,2]
true_green = x[:,:,1]
true_blue = x[:,:,0]
for usr_out, true_out, name in zip((usr_red, usr_green, usr_blue),
(true_red, true_green, true_blue), ('red', 'green', 'blue')):
if usr_out == None:
if __name__ == "__main__":
print "Error- {} has value None.".format(name)
return False
if not usr_out.shape == true_out.shape:
if __name__ == "__main__":
print "Error- {} has shape {}. Expected shape is {}.".format(name,
usr_out.shape, true_out.shape)
return False
if not usr_out.dtype == true_out.dtype:
if __name__ == "__main__":
print "Error- {} has dtype {}. Expected dtype is {}.".format(name,
usr_out.dtype, true_out.dtype)
return False
if not np.all(usr_out == true_out):
if __name__ == "__main__":
print "Error- {} has value:\n{}\nExpected value:\n{}".format(name,
usr_out, true_out)
return False
if __name__ == "__main__":
print "Success - all outputs correct."
return True
if __name__ == "__main__":
# Testing code
print "Performing unit test."
t = test()
print "Unit test: {}".format(t)
if t:
main()
| mit | 9,172,736,779,900,084,000 | 31.555556 | 78 | 0.565984 | false |
popazerty/EG-2 | lib/python/Screens/InfoBarGenerics.py | 4 | 131511 | # -*- coding: utf-8 -*-
from Components.ActionMap import ActionMap, HelpableActionMap, NumberActionMap
from Components.Harddisk import harddiskmanager, findMountPoint
from Components.Input import Input
from Components.Label import Label
from Components.MovieList import AUDIO_EXTENSIONS, MOVIE_EXTENSIONS, DVD_EXTENSIONS
from Components.PluginComponent import plugins
from Components.ServiceEventTracker import ServiceEventTracker
from Components.Sources.Boolean import Boolean
from Components.config import config, configfile, ConfigBoolean, ConfigClock
from Components.SystemInfo import SystemInfo
from Components.UsageConfig import preferredInstantRecordPath, defaultMoviePath, preferredTimerPath, ConfigSelection
# from Components.Task import Task, Job, job_manager as JobManager
from Components.Pixmap import MovingPixmap, MultiPixmap
from Components.Sources.StaticText import StaticText
from Components.ScrollLabel import ScrollLabel
from Plugins.Plugin import PluginDescriptor
from Components.Timeshift import InfoBarTimeshift
from Screens.Screen import Screen
from Screens import ScreenSaver
from Screens.ChannelSelection import ChannelSelection, PiPZapSelection, BouquetSelector, EpgBouquetSelector
from Screens.ChoiceBox import ChoiceBox
from Screens.Dish import Dish
from Screens.EventView import EventViewEPGSelect, EventViewSimple
from Screens.EpgSelection import EPGSelection
from Screens.InputBox import InputBox
from Screens.MessageBox import MessageBox
from Screens.MinuteInput import MinuteInput
from Screens.TimerSelection import TimerSelection
from Screens.PictureInPicture import PictureInPicture
from Screens.PVRState import PVRState, TimeshiftState
from Screens.SubtitleDisplay import SubtitleDisplay
from Screens.RdsDisplay import RdsInfoDisplay, RassInteractive
from Screens.TimeDateInput import TimeDateInput
from Screens.TimerEdit import TimerEditList
from Screens.UnhandledKey import UnhandledKey
from ServiceReference import ServiceReference, isPlayableForCur
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT, findSafeRecordPath
from Screens.TimerEntry import TimerEntry as TimerEntry
from Tools import Notifications
from Tools.Directories import pathExists, fileExists
from Tools.KeyBindings import getKeyDescription
from enigma import eTimer, eServiceCenter, eDVBServicePMTHandler, iServiceInformation, iPlayableService, eServiceReference, eEPGCache, eActionMap
from boxbranding import getBrandOEM, getMachineBuild
from time import time, localtime, strftime
from bisect import insort
from sys import maxint
import os, cPickle
# hack alert!
from Screens.Menu import MainMenu, Menu, mdom
from Screens.Setup import Setup
import Screens.Standby
def isStandardInfoBar(self):
return self.__class__.__name__ == "InfoBar"
def isMoviePlayerInfoBar(self):
return self.__class__.__name__ == "MoviePlayer"
def setResumePoint(session):
global resumePointCache, resumePointCacheLast
service = session.nav.getCurrentService()
ref = session.nav.getCurrentlyPlayingServiceOrGroup()
if (service is not None) and (ref is not None): # and (ref.type != 1):
# ref type 1 has its own memory...
seek = service.seek()
if seek:
pos = seek.getPlayPosition()
if not pos[0]:
key = ref.toString()
lru = int(time())
l = seek.getLength()
if l:
l = l[1]
else:
l = None
resumePointCache[key] = [lru, pos[1], l]
for k, v in resumePointCache.items():
if v[0] < lru:
candidate = k
filepath = os.path.realpath(candidate.split(':')[-1])
mountpoint = findMountPoint(filepath)
if os.path.ismount(mountpoint) and not os.path.exists(filepath):
del resumePointCache[candidate]
saveResumePoints()
def delResumePoint(ref):
global resumePointCache, resumePointCacheLast
try:
del resumePointCache[ref.toString()]
except KeyError:
pass
saveResumePoints()
def getResumePoint(session):
global resumePointCache
ref = session.nav.getCurrentlyPlayingServiceOrGroup()
if (ref is not None) and (ref.type != 1):
try:
entry = resumePointCache[ref.toString()]
entry[0] = int(time()) # update LRU timestamp
return entry[1]
except KeyError:
return None
def saveResumePoints():
global resumePointCache, resumePointCacheLast
try:
f = open('/etc/enigma2/resumepoints.pkl', 'wb')
cPickle.dump(resumePointCache, f, cPickle.HIGHEST_PROTOCOL)
f.close()
except Exception, ex:
print "[InfoBar] Failed to write resumepoints:", ex
resumePointCacheLast = int(time())
def loadResumePoints():
try:
file = open('/etc/enigma2/resumepoints.pkl', 'rb')
PickleFile = cPickle.load(file)
file.close()
return PickleFile
except Exception, ex:
print "[InfoBar] Failed to load resumepoints:", ex
return {}
def updateresumePointCache():
global resumePointCache
resumePointCache = loadResumePoints()
resumePointCache = loadResumePoints()
resumePointCacheLast = int(time())
class InfoBarDish:
def __init__(self):
self.dishDialog = self.session.instantiateDialog(Dish)
class InfoBarLongKeyDetection:
def __init__(self):
eActionMap.getInstance().bindAction('', -maxint -1, self.detection) #highest prio
self.LongButtonPressed = False
#this function is called on every keypress!
def detection(self, key, flag):
if flag == 3:
self.LongButtonPressed = True
elif flag == 0:
self.LongButtonPressed = False
class InfoBarUnhandledKey:
def __init__(self):
self.unhandledKeyDialog = self.session.instantiateDialog(UnhandledKey)
self.hideUnhandledKeySymbolTimer = eTimer()
self.hideUnhandledKeySymbolTimer.callback.append(self.unhandledKeyDialog.hide)
self.checkUnusedTimer = eTimer()
self.checkUnusedTimer.callback.append(self.checkUnused)
self.onLayoutFinish.append(self.unhandledKeyDialog.hide)
eActionMap.getInstance().bindAction('', -maxint -1, self.actionA) #highest prio
eActionMap.getInstance().bindAction('', maxint, self.actionB) #lowest prio
self.flags = (1<<1)
self.uflags = 0
#this function is called on every keypress!
def actionA(self, key, flag):
try:
print 'KEY: %s %s' % (key,getKeyDescription(key)[0])
except:
print 'KEY: %s' % key
self.unhandledKeyDialog.hide()
if self.closeSIB(key) and self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
if flag != 4:
if self.flags & (1<<1):
self.flags = self.uflags = 0
self.flags |= (1<<flag)
if flag == 1: # break
self.checkUnusedTimer.start(0, True)
return 0
def closeSIB(self, key):
if key >= 12 and key not in (114, 115, 352, 103, 108, 402, 403, 407, 412):
return True
else:
return False
#this function is only called when no other action has handled this key
def actionB(self, key, flag):
if flag != 4:
self.uflags |= (1<<flag)
def checkUnused(self):
if self.flags == self.uflags:
self.unhandledKeyDialog.show()
self.hideUnhandledKeySymbolTimer.start(2000, True)
class InfoBarScreenSaver:
def __init__(self):
self.onExecBegin.append(self.__onExecBegin)
self.onExecEnd.append(self.__onExecEnd)
self.screenSaverTimer = eTimer()
self.screenSaverTimer.callback.append(self.screensaverTimeout)
self.screensaver = self.session.instantiateDialog(ScreenSaver.Screensaver)
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
self.screensaver.hide()
def __onExecBegin(self):
self.ScreenSaverTimerStart()
def __onExecEnd(self):
if self.screensaver.shown:
self.screensaver.hide()
eActionMap.getInstance().unbindAction('', self.keypressScreenSaver)
self.screenSaverTimer.stop()
def ScreenSaverTimerStart(self):
time = int(config.usage.screen_saver.value)
flag = self.seekstate[0]
if not flag:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref:
ref = ref.toString().split(":")
flag = ref[2] == "2" or os.path.splitext(ref[10])[1].lower() in AUDIO_EXTENSIONS
if time and flag:
self.screenSaverTimer.startLongTimer(time)
else:
self.screenSaverTimer.stop()
def screensaverTimeout(self):
if self.execing and not Screens.Standby.inStandby and not Screens.Standby.inTryQuitMainloop:
self.hide()
if hasattr(self, "pvrStateDialog"):
self.pvrStateDialog.hide()
self.screensaver.show()
eActionMap.getInstance().bindAction('', -maxint - 1, self.keypressScreenSaver)
def keypressScreenSaver(self, key, flag):
if flag:
self.screensaver.hide()
self.show()
self.ScreenSaverTimerStart()
eActionMap.getInstance().unbindAction('', self.keypressScreenSaver)
class SecondInfoBar(Screen):
ADD_TIMER = 0
REMOVE_TIMER = 1
def __init__(self, session):
Screen.__init__(self, session)
self["epg_description"] = ScrollLabel()
self["channel"] = Label()
self["key_red"] = Label()
self["key_green"] = Label()
self["key_yellow"] = Label()
self["key_blue"] = Label()
self["SecondInfoBar"] = ActionMap(["2ndInfobarActions"],
{
"prevPage": self.pageUp,
"nextPage": self.pageDown,
"prevEvent": self.prevEvent,
"nextEvent": self.nextEvent,
"timerAdd": self.timerAdd,
"openSimilarList": self.openSimilarList,
}, -1)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedEventInfo: self.getEvent
})
self.onShow.append(self.__Show)
self.onHide.append(self.__Hide)
def pageUp(self):
self["epg_description"].pageUp()
def pageDown(self):
self["epg_description"].pageDown()
def __Show(self):
if config.vixsettings.ColouredButtons.value:
self["key_yellow"].setText(_("Search"))
self["key_red"].setText(_("Similar"))
self["key_blue"].setText(_("Extensions"))
self["SecondInfoBar"].doBind()
self.getEvent()
def __Hide(self):
if self["SecondInfoBar"].bound:
self["SecondInfoBar"].doUnbind()
def getEvent(self):
self["epg_description"].setText("")
self["channel"].setText("")
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.getNowNext()
epglist = self.epglist
if not epglist:
self.is_now_next = False
epg = eEPGCache.getInstance()
ptr = ref and ref.valid() and epg.lookupEventTime(ref, -1)
if ptr:
epglist.append(ptr)
ptr = epg.lookupEventTime(ref, ptr.getBeginTime(), +1)
if ptr:
epglist.append(ptr)
else:
self.is_now_next = True
if epglist:
Event = self.epglist[0]
Ref = ServiceReference(ref)
callback = self.eventViewCallback
self.cbFunc = callback
self.currentService = Ref
self.isRecording = (not Ref.ref.flags & eServiceReference.isGroup) and Ref.ref.getPath()
self.event = Event
self.key_green_choice = self.ADD_TIMER
if self.isRecording:
self["key_green"].setText("")
else:
self["key_green"].setText(_("Add timer"))
self.setEvent(self.event)
def getNowNext(self):
epglist = [ ]
service = self.session.nav.getCurrentService()
info = service and service.info()
ptr = info and info.getEvent(0)
if ptr:
epglist.append(ptr)
ptr = info and info.getEvent(1)
if ptr:
epglist.append(ptr)
self.epglist = epglist
def eventViewCallback(self, setEvent, setService, val): #used for now/next displaying
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0] = epglist[1]
epglist[1] = tmp
setEvent(epglist[0])
def prevEvent(self):
if self.cbFunc is not None:
self.cbFunc(self.setEvent, self.setService, -1)
def nextEvent(self):
if self.cbFunc is not None:
self.cbFunc(self.setEvent, self.setService, +1)
def removeTimer(self, timer):
timer.afterEvent = AFTEREVENT.NONE
self.session.nav.RecordTimer.removeEntry(timer)
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
def timerAdd(self):
self.hide()
self.secondInfoBarWasShown = False
if self.isRecording:
return
event = self.event
serviceref = self.currentService
if event is None:
return
eventid = event.getEventId()
refstr = serviceref.ref.toString()
for timer in self.session.nav.RecordTimer.timer_list:
if timer.eit == eventid and timer.service_ref.ref.toString() == refstr:
cb_func = lambda ret : not ret or self.removeTimer(timer)
self.session.openWithCallback(cb_func, MessageBox, _("Do you really want to delete %s?") % event.getEventName())
break
else:
newEntry = RecordTimerEntry(self.currentService, checkOldTimers = True, dirname = preferredTimerPath(), *parseEvent(self.event))
self.session.openWithCallback(self.finishedAdd, TimerEntry, newEntry)
def finishedAdd(self, answer):
# print "finished add"
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
if not entry.repeated and not config.recording.margin_before.value and not config.recording.margin_after.value and len(simulTimerList) > 1:
change_time = False
conflict_begin = simulTimerList[1].begin
conflict_end = simulTimerList[1].end
if conflict_begin == entry.end:
entry.end -= 30
change_time = True
elif entry.begin == conflict_end:
entry.begin += 30
change_time = True
if change_time:
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
self["key_green"].setText(_("Remove timer"))
self.key_green_choice = self.REMOVE_TIMER
else:
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
# print "Timeredit aborted"
def finishSanityCorrection(self, answer):
self.finishedAdd(answer)
def setService(self, service):
self.currentService=service
if self.isRecording:
self["channel"].setText(_("Recording"))
else:
name = self.currentService.getServiceName()
if name is not None:
self["channel"].setText(name)
else:
self["channel"].setText(_("unknown service"))
def sort_func(self,x,y):
if x[1] < y[1]:
return -1
elif x[1] == y[1]:
return 0
else:
return 1
def setEvent(self, event):
if event is None:
return
self.event = event
try:
name = event.getEventName()
self["channel"].setText(name)
except:
pass
description = event.getShortDescription()
extended = event.getExtendedDescription()
if description and extended:
description += '\n'
text = description + extended
self.setTitle(event.getEventName())
self["epg_description"].setText(text)
serviceref = self.currentService
eventid = self.event.getEventId()
refstr = serviceref.ref.toString()
isRecordEvent = False
for timer in self.session.nav.RecordTimer.timer_list:
if timer.eit == eventid and timer.service_ref.ref.toString() == refstr:
isRecordEvent = True
break
if isRecordEvent and self.key_green_choice != self.REMOVE_TIMER:
self["key_green"].setText(_("Remove timer"))
self.key_green_choice = self.REMOVE_TIMER
elif not isRecordEvent and self.key_green_choice != self.ADD_TIMER:
self["key_green"].setText(_("Add timer"))
self.key_green_choice = self.ADD_TIMER
def openSimilarList(self):
id = self.event and self.event.getEventId()
refstr = str(self.currentService)
if id is not None:
self.hide()
self.secondInfoBarWasShown = False
self.session.open(EPGSelection, refstr, None, id)
class InfoBarShowHide(InfoBarScreenSaver):
""" InfoBar show/hide control, accepts toggleShow and hide actions, might start
fancy animations. """
STATE_HIDDEN = 0
STATE_HIDING = 1
STATE_SHOWING = 2
STATE_SHOWN = 3
def __init__(self):
self["ShowHideActions"] = ActionMap( ["InfobarShowHideActions"] ,
{
"LongOKPressed": self.toggleShowLong,
"toggleShow": self.toggleShow,
"hide": self.keyHide,
}, 1) # lower prio to make it possible to override ok and cancel..
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.serviceStarted,
})
InfoBarScreenSaver.__init__(self)
self.__state = self.STATE_SHOWN
self.__locked = 0
self.hideTimer = eTimer()
self.hideTimer.callback.append(self.doTimerHide)
self.hideTimer.start(5000, True)
self.onShow.append(self.__onShow)
self.onHide.append(self.__onHide)
self.onShowHideNotifiers = []
self.standardInfoBar = False
self.secondInfoBarScreen = ""
if isStandardInfoBar(self):
self.secondInfoBarScreen = self.session.instantiateDialog(SecondInfoBar)
self.secondInfoBarScreen.show()
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
if self.secondInfoBarScreen:
self.secondInfoBarScreen.hide()
self.standardInfoBar = True
self.secondInfoBarWasShown = False
self.EventViewIsShown = False
def __onShow(self):
self.__state = self.STATE_SHOWN
for x in self.onShowHideNotifiers:
x(True)
self.startHideTimer()
def doDimming(self):
if config.usage.show_infobar_do_dimming.value:
self.dimmed = self.dimmed-1
else:
self.dimmed = 0
self.DimmingTimer.stop()
self.doHide()
def unDimming(self):
self.unDimmingTimer.stop()
self.doWriteAlpha(config.av.osd_alpha.value)
def doWriteAlpha(self, value):
if fileExists("/proc/stb/video/alpha"):
f=open("/proc/stb/video/alpha","w")
f.write("%i" % (value))
f.close()
def __onHide(self):
self.__state = self.STATE_HIDDEN
self.resetAlpha()
for x in self.onShowHideNotifiers:
x(False)
def resetAlpha(self):
if config.usage.show_infobar_do_dimming.value:
self.unDimmingTimer = eTimer()
self.unDimmingTimer.callback.append(self.unDimming)
self.unDimmingTimer.start(300, True)
def keyHide(self):
if self.__state == self.STATE_HIDDEN:
if config.vixsettings.InfoBarEpg_mode.value == "2":
self.openInfoBarEPG()
else:
self.hide()
if self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
if self.session.pipshown and "popup" in config.usage.pip_hideOnExit.value:
if config.usage.pip_hideOnExit.value == "popup":
self.session.openWithCallback(self.hidePipOnExitCallback, MessageBox, _("Disable Picture in Picture"), simple=True)
else:
self.hidePipOnExitCallback(True)
else:
self.hide()
if hasattr(self, "pvrStateDialog"):
self.pvrStateDialog.hide()
def hidePipOnExitCallback(self, answer):
if answer:
self.showPiP()
def connectShowHideNotifier(self, fnc):
if not fnc in self.onShowHideNotifiers:
self.onShowHideNotifiers.append(fnc)
def disconnectShowHideNotifier(self, fnc):
if fnc in self.onShowHideNotifiers:
self.onShowHideNotifiers.remove(fnc)
def serviceStarted(self):
if self.execing:
if config.usage.show_infobar_on_zap.value:
self.doShow()
def startHideTimer(self):
if self.__state == self.STATE_SHOWN and not self.__locked:
self.hideTimer.stop()
idx = config.usage.infobar_timeout.index
if idx:
self.hideTimer.start(idx*1000, True)
elif (self.secondInfoBarScreen and self.secondInfoBarScreen.shown) or ((not config.usage.show_second_infobar.value or isMoviePlayerInfoBar(self)) and self.EventViewIsShown):
self.hideTimer.stop()
idx = config.usage.show_second_infobar.index - 1
if idx:
self.hideTimer.start(idx*1000, True)
elif hasattr(self, "pvrStateDialog"):
self.hideTimer.stop()
idx = config.usage.infobar_timeout.index
if idx:
self.hideTimer.start(idx*1000, True)
def doShow(self):
self.show()
self.startHideTimer()
def doTimerHide(self):
self.hideTimer.stop()
self.DimmingTimer = eTimer()
self.DimmingTimer.callback.append(self.doDimming)
self.DimmingTimer.start(70, True)
self.dimmed = config.usage.show_infobar_dimming_speed.value
def doHide(self):
if self.__state != self.STATE_HIDDEN:
if self.dimmed > 0:
self.doWriteAlpha((config.av.osd_alpha.value*self.dimmed/config.usage.show_infobar_dimming_speed.value))
self.DimmingTimer.start(5, True)
else:
self.DimmingTimer.stop()
self.hide()
elif self.__state == self.STATE_HIDDEN and self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
if self.dimmed > 0:
self.doWriteAlpha((config.av.osd_alpha.value*self.dimmed/config.usage.show_infobar_dimming_speed.value))
self.DimmingTimer.start(5, True)
else:
self.DimmingTimer.stop()
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
self.resetAlpha()
elif self.__state == self.STATE_HIDDEN and self.EventViewIsShown:
try:
self.eventView.close()
except:
pass
self.EventViewIsShown = False
# elif hasattr(self, "pvrStateDialog"):
# if self.dimmed > 0:
# self.doWriteAlpha((config.av.osd_alpha.value*self.dimmed/config.usage.show_infobar_dimming_speed.value))
# self.DimmingTimer.start(5, True)
# else:
# self.DimmingTimer.stop()
# try:
# self.pvrStateDialog.hide()
# except:
# pass
def toggleShow(self):
if not hasattr(self, "LongButtonPressed"):
self.LongButtonPressed = False
if not self.LongButtonPressed:
if self.__state == self.STATE_HIDDEN:
if not self.secondInfoBarWasShown:
self.show()
if self.secondInfoBarScreen:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
self.EventViewIsShown = False
elif isStandardInfoBar(self) and config.usage.show_second_infobar.value == "EPG":
self.showDefaultEPG()
elif isStandardInfoBar(self) and config.usage.show_second_infobar.value == "INFOBAREPG":
self.openInfoBarEPG()
elif self.secondInfoBarScreen and config.usage.show_second_infobar.value != "none" and not self.secondInfoBarScreen.shown:
self.hide()
self.secondInfoBarScreen.show()
self.secondInfoBarWasShown = True
self.startHideTimer()
elif isMoviePlayerInfoBar(self) and not self.EventViewIsShown and config.usage.show_second_infobar.value:
self.hide()
try:
self.openEventView(True)
except:
pass
self.EventViewIsShown = True
self.startHideTimer()
else:
self.hide()
if self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
elif self.EventViewIsShown:
try:
self.eventView.close()
except:
pass
self.EventViewIsShown = False
def toggleShowLong(self):
if self.LongButtonPressed:
if isinstance(self, InfoBarEPG):
if config.vixsettings.InfoBarEpg_mode.value == "1":
self.openInfoBarEPG()
def lockShow(self):
self.__locked += 1
if self.execing:
self.show()
self.hideTimer.stop()
def unlockShow(self):
self.__locked -= 1
if self.__locked <0:
self.__locked = 0
if self.execing:
self.startHideTimer()
class NumberZap(Screen):
def quit(self):
self.Timer.stop()
self.close()
def keyOK(self):
self.Timer.stop()
self.close(self.service, self.bouquet)
def handleServiceName(self):
if self.searchNumber:
self.service, self.bouquet = self.searchNumber(int(self["number"].getText()))
self["servicename"].setText(ServiceReference(self.service).getServiceName())
if not self.startBouquet:
self.startBouquet = self.bouquet
def keyBlue(self):
self.Timer.start(5000, True)
if self.searchNumber:
if self.startBouquet == self.bouquet:
self.service, self.bouquet = self.searchNumber(int(self["number"].getText()), firstBouquetOnly = True)
else:
self.service, self.bouquet = self.searchNumber(int(self["number"].getText()))
self["servicename"].setText(ServiceReference(self.service).getServiceName())
def keyNumberGlobal(self, number):
self.Timer.start(5000, True)
self.numberString += str(number)
self["number"].setText(self.numberString)
self["number_summary"].setText(self.numberString)
self.handleServiceName()
if len(self.numberString) >= 4:
self.keyOK()
def __init__(self, session, number, searchNumberFunction = None):
Screen.__init__(self, session)
self.onChangedEntry = [ ]
self.numberString = str(number)
self.searchNumber = searchNumberFunction
self.startBouquet = None
self["channel"] = Label(_("Channel:"))
self["channel_summary"] = StaticText(_("Channel:"))
self["number"] = Label(self.numberString)
self["number_summary"] = StaticText(self.numberString)
self["servicename"] = Label()
self.handleServiceName()
self["actions"] = NumberActionMap( [ "SetupActions", "ShortcutActions" ],
{
"cancel": self.quit,
"ok": self.keyOK,
"blue": self.keyBlue,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
})
self.Timer = eTimer()
self.Timer.callback.append(self.keyOK)
self.Timer.start(5000, True)
class InfoBarNumberZap:
""" Handles an initial number for NumberZapping """
def __init__(self):
self["NumberActions"] = NumberActionMap( [ "NumberActions"],
{
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
})
def keyNumberGlobal(self, number):
if self.pvrStateDialog.has_key("PTSSeekPointer") and self.timeshiftEnabled() and self.isSeekable():
# noinspection PyProtectedMember
InfoBarTimeshiftState._mayShow(self)
self.pvrStateDialog["PTSSeekPointer"].setPosition((self.pvrStateDialog["PTSSeekBack"].instance.size().width()-4)/2, self.pvrStateDialog["PTSSeekPointer"].position[1])
if self.seekstate != self.SEEK_STATE_PLAY:
self.setSeekState(self.SEEK_STATE_PLAY)
self.ptsSeekPointerOK()
return
if self.pts_blockZap_timer.isActive():
return
# if self.save_current_timeshift and self.timeshiftEnabled():
# InfoBarTimeshift.saveTimeshiftActions(self)
# return
if number == 0:
if isinstance(self, InfoBarPiP) and self.pipHandles0Action():
self.pipDoHandle0Action()
elif len(self.servicelist.history) > 1:
self.checkTimeshiftRunning(self.recallPrevService)
else:
if self.has_key("TimeshiftActions") and self.timeshiftEnabled():
ts = self.getTimeshift()
if ts and ts.isTimeshiftActive():
return
self.session.openWithCallback(self.numberEntered, NumberZap, number, self.searchNumber)
def recallPrevService(self, reply):
if reply:
if config.usage.panicbutton.value:
if self.session.pipshown:
del self.session.pip
self.session.pipshown = False
self.servicelist.history_tv = []
self.servicelist.history_radio = []
self.servicelist.history = self.servicelist.history_tv
self.servicelist.history_pos = 0
self.servicelist2.history_tv = []
self.servicelist2.history_radio = []
self.servicelist2.history = self.servicelist.history_tv
self.servicelist2.history_pos = 0
if config.usage.multibouquet.value:
bqrootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'
else:
bqrootstr = '%s FROM BOUQUET "userbouquet.favourites.tv" ORDER BY bouquet'% self.service_types
serviceHandler = eServiceCenter.getInstance()
rootbouquet = eServiceReference(bqrootstr)
bouquet = eServiceReference(bqrootstr)
bouquetlist = serviceHandler.list(bouquet)
if not bouquetlist is None:
while True:
bouquet = bouquetlist.getNext()
if bouquet.flags & eServiceReference.isDirectory:
self.servicelist.clearPath()
self.servicelist.setRoot(bouquet)
servicelist = serviceHandler.list(bouquet)
if not servicelist is None:
serviceIterator = servicelist.getNext()
while serviceIterator.valid():
service, bouquet2 = self.searchNumber(1)
if service == serviceIterator: break
serviceIterator = servicelist.getNext()
if serviceIterator.valid() and service == serviceIterator: break
self.servicelist.enterPath(rootbouquet)
self.servicelist.enterPath(bouquet)
self.servicelist.saveRoot()
self.servicelist2.enterPath(rootbouquet)
self.servicelist2.enterPath(bouquet)
self.servicelist2.saveRoot()
self.selectAndStartService(service, bouquet)
else:
self.servicelist.recallPrevService()
def numberEntered(self, service = None, bouquet = None):
if service:
self.selectAndStartService(service, bouquet)
def searchNumberHelper(self, serviceHandler, num, bouquet):
servicelist = serviceHandler.list(bouquet)
if servicelist:
serviceIterator = servicelist.getNext()
while serviceIterator.valid():
if num == serviceIterator.getChannelNum():
return serviceIterator
serviceIterator = servicelist.getNext()
return None
def searchNumber(self, number, firstBouquetOnly = False):
bouquet = self.servicelist.getRoot()
service = None
serviceHandler = eServiceCenter.getInstance()
if not firstBouquetOnly:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if config.usage.multibouquet.value and not service:
bouquet = self.servicelist.bouquet_root
bouquetlist = serviceHandler.list(bouquet)
if bouquetlist:
bouquet = bouquetlist.getNext()
while bouquet.valid():
if bouquet.flags & eServiceReference.isDirectory:
service = self.searchNumberHelper(serviceHandler, number, bouquet)
if service:
playable = not (service.flags & (eServiceReference.isMarker|eServiceReference.isDirectory)) or (service.flags & eServiceReference.isNumberedMarker)
if not playable:
service = None
break
if config.usage.alternative_number_mode.value or firstBouquetOnly:
break
bouquet = bouquetlist.getNext()
return service, bouquet
def selectAndStartService(self, service, bouquet):
if service:
if self.servicelist.getRoot() != bouquet: #already in correct bouquet?
self.servicelist.clearPath()
if self.servicelist.bouquet_root != bouquet:
self.servicelist.enterPath(self.servicelist.bouquet_root)
self.servicelist.enterPath(bouquet)
self.servicelist.setCurrentSelection(service) #select the service in servicelist
self.servicelist.zap(enable_pipzap = True)
self.servicelist.correctChannelNumber()
self.servicelist.startRoot = None
def zapToNumber(self, number):
service, bouquet = self.searchNumber(number)
self.selectAndStartService(service, bouquet)
config.misc.initialchannelselection = ConfigBoolean(default = True)
class InfoBarChannelSelection:
""" ChannelSelection - handles the channelSelection dialog and the initial
channelChange actions which open the channelSelection dialog """
def __init__(self):
#instantiate forever
self.servicelist = self.session.instantiateDialog(ChannelSelection)
self.servicelist2 = self.session.instantiateDialog(PiPZapSelection)
self.tscallback = None
self["ChannelSelectActions"] = HelpableActionMap(self, "InfobarChannelSelection",
{
"switchChannelUp": (self.switchChannelUp, _("Open service list and select previous channel")),
"switchChannelDown": (self.switchChannelDown, _("Open service list and select next channel")),
"switchChannelUpLong": (self.switchChannelUp, _("Open service list and select previous channel for PiP")),
"switchChannelDownLong": (self.switchChannelDown, _("Open service list and select next channel for PiP")),
"zapUp": (self.zapUp, _("Switch to previous channel")),
"zapDown": (self.zapDown, _("Switch next channel")),
"historyBack": (self.historyBack, _("Switch to previous channel in history")),
"historyNext": (self.historyNext, _("Switch to next channel in history")),
"openServiceList": (self.openServiceList, _("Open service list")),
"openSatellites": (self.openSatellites, _("Open satellites list")),
"openBouquets": (self.openBouquets, _("Open favourites list")),
"LeftPressed": self.LeftPressed,
"RightPressed": self.RightPressed,
"ChannelPlusPressed": self.ChannelPlusPressed,
"ChannelMinusPressed": self.ChannelMinusPressed,
"ChannelPlusPressedLong": self.ChannelPlusPressed,
"ChannelMinusPressedLong": self.ChannelMinusPressed,
})
def LeftPressed(self):
if config.vixsettings.InfoBarEpg_mode.value == "3" and config.usage.show_second_infobar.value != "INFOBAREPG":
self.openInfoBarEPG()
else:
self.zapUp()
def RightPressed(self):
if config.vixsettings.InfoBarEpg_mode.value == "3" and config.usage.show_second_infobar.value != "INFOBAREPG":
self.openInfoBarEPG()
else:
self.zapDown()
def ChannelPlusPressed(self):
if config.usage.channelbutton_mode.value == "0" or config.usage.show_second_infobar.value == "INFOBAREPG":
self.zapDown()
elif config.usage.channelbutton_mode.value == "1":
self.openServiceList()
elif config.usage.channelbutton_mode.value == "2":
self.serviceListType = "Norm"
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
def ChannelMinusPressed(self):
if config.usage.channelbutton_mode.value == "0" or config.usage.show_second_infobar.value == "INFOBAREPG":
self.zapUp()
elif config.usage.channelbutton_mode.value == "1":
self.openServiceList()
elif config.usage.channelbutton_mode.value == "2":
self.serviceListType = "Norm"
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
def showTvChannelList(self, zap=False):
self.servicelist.setModeTv()
if zap:
self.servicelist.zap()
if config.usage.show_servicelist.value:
self.session.execDialog(self.servicelist)
def showRadioChannelList(self, zap=False):
self.servicelist.setModeRadio()
if zap:
self.servicelist.zap()
if config.usage.show_servicelist.value:
self.session.execDialog(self.servicelist)
def historyBack(self):
if config.usage.historymode.value == "0":
self.servicelist.historyBack()
else:
self.servicelist.historyZap(-1)
def historyNext(self):
if config.usage.historymode.value == "0":
self.servicelist.historyNext()
else:
self.servicelist.historyZap(+1)
def switchChannelUp(self):
if not self.secondInfoBarScreen.shown:
self.keyHide()
if not self.LongButtonPressed or SystemInfo.get("NumVideoDecoders", 1) <= 1:
if not config.usage.show_bouquetalways.value:
if "keep" not in config.usage.servicelist_cursor_behavior.value:
self.servicelist.moveUp()
self.session.execDialog(self.servicelist)
else:
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
elif self.LongButtonPressed:
if not config.usage.show_bouquetalways.value:
if "keep" not in config.usage.servicelist_cursor_behavior.value:
self.servicelist2.moveUp()
self.session.execDialog(self.servicelist2)
else:
self.servicelist2.showFavourites()
self.session.execDialog(self.servicelist2)
def switchChannelDown(self):
if not self.secondInfoBarScreen.shown:
self.keyHide()
if not self.LongButtonPressed or SystemInfo.get("NumVideoDecoders", 1) <= 1:
if not config.usage.show_bouquetalways.value:
if "keep" not in config.usage.servicelist_cursor_behavior.value:
self.servicelist.moveDown()
self.session.execDialog(self.servicelist)
else:
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
elif self.LongButtonPressed:
if not config.usage.show_bouquetalways.value:
if "keep" not in config.usage.servicelist_cursor_behavior.value:
self.servicelist2.moveDown()
self.session.execDialog(self.servicelist2)
else:
self.servicelist2.showFavourites()
self.session.execDialog(self.servicelist2)
def openServiceList(self):
self.session.execDialog(self.servicelist)
def openServiceListPiP(self):
self.session.execDialog(self.servicelist2)
def openSatellites(self):
self.servicelist.showSatellites()
self.session.execDialog(self.servicelist)
def openBouquets(self):
self.servicelist.showFavourites()
self.session.execDialog(self.servicelist)
def zapUp(self):
if not self.LongButtonPressed or SystemInfo.get("NumVideoDecoders", 1) <= 1:
if self.pts_blockZap_timer.isActive():
return
if self.servicelist.inBouquet():
prev = self.servicelist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value:
if self.servicelist.atBegin():
self.servicelist.prevBouquet()
self.servicelist.moveUp()
cur = self.servicelist.getCurrentSelection()
if cur:
if self.servicelist.dopipzap:
isPlayable = self.session.pip.isPlayableForPipService(cur)
else:
isPlayable = isPlayableForCur(cur)
if cur and (cur.toString() == prev or isPlayable):
break
else:
self.servicelist.moveUp()
self.servicelist.zap(enable_pipzap = True)
elif self.LongButtonPressed:
if not hasattr(self.session, 'pip') and not self.session.pipshown:
self.session.open(MessageBox, _("Please open Picture in Picture first"), MessageBox.TYPE_ERROR)
return
from Screens.ChannelSelection import ChannelSelection
ChannelSelectionInstance = ChannelSelection.instance
ChannelSelectionInstance.dopipzap = True
if self.servicelist2.inBouquet():
prev = self.servicelist2.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value:
if self.servicelist2.atBegin():
self.servicelist2.prevBouquet()
self.servicelist2.moveUp()
cur = self.servicelist2.getCurrentSelection()
if cur:
if ChannelSelectionInstance.dopipzap:
isPlayable = self.session.pip.isPlayableForPipService(cur)
else:
isPlayable = isPlayableForCur(cur)
if cur and (cur.toString() == prev or isPlayable):
break
else:
self.servicelist2.moveUp()
self.servicelist2.zap(enable_pipzap = True)
ChannelSelectionInstance.dopipzap = False
def openFavouritesList(self):
self.servicelist.showFavourites()
self.openServiceList()
def zapDown(self):
if not self.LongButtonPressed or SystemInfo.get("NumVideoDecoders", 1) <= 1:
if self.pts_blockZap_timer.isActive():
return
if self.servicelist.inBouquet():
prev = self.servicelist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value and self.servicelist.atEnd():
self.servicelist.nextBouquet()
else:
self.servicelist.moveDown()
cur = self.servicelist.getCurrentSelection()
if cur:
if self.servicelist.dopipzap:
isPlayable = self.session.pip.isPlayableForPipService(cur)
else:
isPlayable = isPlayableForCur(cur)
if cur and (cur.toString() == prev or isPlayable):
break
else:
self.servicelist.moveDown()
self.servicelist.zap(enable_pipzap = True)
elif self.LongButtonPressed:
if not hasattr(self.session, 'pip') and not self.session.pipshown:
self.session.open(MessageBox, _("Please open Picture in Picture first"), MessageBox.TYPE_ERROR)
return
from Screens.ChannelSelection import ChannelSelection
ChannelSelectionInstance = ChannelSelection.instance
ChannelSelectionInstance.dopipzap = True
if self.servicelist2.inBouquet():
prev = self.servicelist2.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value and self.servicelist2.atEnd():
self.servicelist2.nextBouquet()
else:
self.servicelist2.moveDown()
cur = self.servicelist2.getCurrentSelection()
if cur:
if ChannelSelectionInstance.dopipzap:
isPlayable = self.session.pip.isPlayableForPipService(cur)
else:
isPlayable = isPlayableForCur(cur)
if cur and (cur.toString() == prev or isPlayable):
break
else:
self.servicelist2.moveDown()
self.servicelist2.zap(enable_pipzap = True)
ChannelSelectionInstance.dopipzap = False
class InfoBarMenu:
""" Handles a menu action, to open the (main) menu """
def __init__(self):
self["MenuActions"] = HelpableActionMap(self, "InfobarMenuActions",
{
"mainMenu": (self.mainMenu, _("Enter main menu...")),
"showNetworkSetup": (self.showNetworkMounts, _("Show network mounts ...")),
"showSystemSetup": (self.showSystemMenu, _("Show network mounts ...")),
"showRFmod": (self.showRFSetup, _("Show RFmod setup...")),
"toggleAspectRatio": (self.toggleAspectRatio, _("Toggle aspect ratio...")),
})
self.session.infobar = None
def mainMenu(self):
# print "loading mainmenu XML..."
menu = mdom.getroot()
assert menu.tag == "menu", "root element in menu must be 'menu'!"
self.session.infobar = self
# so we can access the currently active infobar from screens opened from within the mainmenu
# at the moment used from the SubserviceSelection
self.session.openWithCallback(self.mainMenuClosed, MainMenu, menu)
def mainMenuClosed(self, *val):
self.session.infobar = None
def toggleAspectRatio(self):
ASPECT = [ "auto", "16_9", "4_3" ]
ASPECT_MSG = { "auto":"Auto", "16_9":"16:9", "4_3":"4:3" }
if config.av.aspect.value in ASPECT:
index = ASPECT.index(config.av.aspect.value)
config.av.aspect.value = ASPECT[(index+1)%3]
else:
config.av.aspect.value = "auto"
config.av.aspect.save()
self.session.open(MessageBox, _("AV aspect is %s." % ASPECT_MSG[config.av.aspect.value]), MessageBox.TYPE_INFO, timeout=5)
def showSystemMenu(self):
menulist = mdom.getroot().findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'setup_selection':
menulist = item.findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'system_selection':
menu = item
assert menu.tag == "menu", "root element in menu must be 'menu'!"
self.session.openWithCallback(self.mainMenuClosed, Menu, menu)
def showNetworkMounts(self):
menulist = mdom.getroot().findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'setup_selection':
menulist = item.findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'system_selection':
menulist = item.findall('menu')
for item in menulist:
if item.attrib['entryID'] == 'network_menu':
menu = item
assert menu.tag == "menu", "root element in menu must be 'menu'!"
self.session.openWithCallback(self.mainMenuClosed, Menu, menu)
def showRFSetup(self):
self.session.openWithCallback(self.mainMenuClosed, Setup, 'RFmod')
class InfoBarSimpleEventView:
def __init__(self):
pass
class SimpleServicelist:
def __init__(self, services):
self.services = services
self.length = len(services)
self.current = 0
def selectService(self, service):
if not self.length:
self.current = -1
return False
else:
self.current = 0
while self.services[self.current].ref != service:
self.current += 1
if self.current >= self.length:
return False
return True
def nextService(self):
if not self.length:
return
if self.current+1 < self.length:
self.current += 1
else:
self.current = 0
def prevService(self):
if not self.length:
return
if self.current-1 > -1:
self.current -= 1
else:
self.current = self.length - 1
def currentService(self):
if not self.length or self.current >= self.length:
return None
return self.services[self.current]
class InfoBarEPG:
""" EPG - Opens an EPG list when the showEPGList action fires """
def __init__(self):
self.is_now_next = False
self.dlg_stack = []
self.bouquetSel = None
self.eventView = None
self.epglist = []
self.defaultEPGType = self.getDefaultEPGtype()
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedEventInfo: self.__evEventInfoChanged,
})
self["EPGActions"] = HelpableActionMap(self, "InfobarEPGActions",
{
"RedPressed": (self.RedPressed, _("Show epg")),
"IPressed": (self.IPressed, _("show program information...")),
"InfoPressed": (self.InfoPressed, _("show program information...")),
"showEventInfoPlugin": (self.showEventInfoPlugins, _("List EPG functions...")),
"EPGPressed": (self.showDefaultEPG, _("show EPG...")),
"showEventGuidePlugin": (self.showEventGuidePlugins, _("List EPG functions...")),
"showInfobarOrEpgWhenInfobarAlreadyVisible": self.showEventInfoWhenNotVisible,
})
def getEPGPluginList(self):
pluginlist = [(p.name, boundFunction(self.runPlugin, p)) for p in plugins.getPlugins(where = PluginDescriptor.WHERE_EVENTINFO)]
if pluginlist:
pluginlist.append((_("Event Info"), self.openEventView))
pluginlist.append((_("Graphical EPG"), self.openGraphEPG))
pluginlist.append((_("Infobar EPG"), self.openInfoBarEPG))
pluginlist.append((_("Multi EPG"), self.openMultiServiceEPG))
pluginlist.append((_("Show EPG for current channel..."), self.openSingleServiceEPG))
return pluginlist
def getDefaultEPGtype(self):
pluginlist = self.getEPGPluginList()
config.usage.defaultEPGType=ConfigSelection(default = "None", choices = pluginlist)
for plugin in pluginlist:
if plugin[0] == config.usage.defaultEPGType.value:
return plugin[1]
return None
def showEventInfoPlugins(self):
if isStandardInfoBar(self):
if getBrandOEM() not in ('xtrend', 'odin', 'ini', 'dags' ,'gigablue', 'xp'):
pluginlist = self.getEPGPluginList()
if pluginlist:
# pluginlist.append((_("Select default EPG type..."), self.SelectDefaultInfoPlugin))
self.session.openWithCallback(self.EventInfoPluginChosen, ChoiceBox, title=_("Please choose an extension..."), list = pluginlist, skin_name = "EPGExtensionsList")
else:
self.openSingleServiceEPG()
else:
self.openEventView()
elif isMoviePlayerInfoBar(self):
self.openEventView()
def showEventGuidePlugins(self):
if isMoviePlayerInfoBar(self):
self.openEventView()
else:
pluginlist = self.getEPGPluginList()
if pluginlist:
# pluginlist.append((_("Select default EPG type..."), self.SelectDefaultInfoPlugin))
self.session.openWithCallback(self.EventInfoPluginChosen, ChoiceBox, title=_("Please choose an extension..."), list = pluginlist, skin_name = "EPGExtensionsList")
else:
self.openSingleServiceEPG()
def SelectDefaultInfoPlugin(self):
self.session.openWithCallback(self.DefaultInfoPluginChosen, ChoiceBox, title=_("Please select a default EPG type..."), list = self.getEPGPluginList(), skin_name = "EPGExtensionsList")
def DefaultInfoPluginChosen(self, answer):
if answer is not None:
self.defaultEPGType = answer[1]
config.usage.defaultEPGType.value = answer[0]
config.usage.defaultEPGType.save()
configfile.save()
def runPlugin(self, plugin):
plugin(session = self.session, servicelist=self.servicelist)
def EventInfoPluginChosen(self, answer):
if answer is not None:
answer[1]()
def RedPressed(self):
if isStandardInfoBar(self) or isMoviePlayerInfoBar(self):
if config.usage.defaultEPGType.value != _("Graphical EPG") and config.usage.defaultEPGType.value != _("None"):
self.openGraphEPG()
else:
self.openSingleServiceEPG()
def InfoPressed(self):
if isStandardInfoBar(self) or isMoviePlayerInfoBar(self):
if getBrandOEM() in ('xtrend', 'odin', 'ini', 'dags' ,'gigablue', 'xp'):
self.openEventView()
else:
self.showDefaultEPG()
def IPressed(self):
if isStandardInfoBar(self) or isMoviePlayerInfoBar(self):
self.openEventView()
def EPGPressed(self):
if isStandardInfoBar(self) or isMoviePlayerInfoBar(self):
self.openGraphEPG()
def showEventInfoWhenNotVisible(self):
if self.shown:
self.openEventView()
else:
self.toggleShow()
return 1
def zapToService(self, service, bouquet = None, preview = False, zapback = False):
if self.servicelist.startServiceRef is None:
self.servicelist.startServiceRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.servicelist.currentServiceRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if service is not None:
if self.servicelist.getRoot() != bouquet: #already in correct bouquet?
self.servicelist.clearPath()
if self.servicelist.bouquet_root != bouquet:
self.servicelist.enterPath(self.servicelist.bouquet_root)
self.servicelist.enterPath(bouquet)
self.servicelist.setCurrentSelection(service) #select the service in servicelist
if not zapback or preview:
self.servicelist.zap(preview_zap = preview)
if (self.servicelist.dopipzap or zapback) and not preview:
self.servicelist.zapBack()
if not preview:
self.servicelist.startServiceRef = None
self.servicelist.startRoot = None
def getBouquetServices(self, bouquet):
services = []
servicelist = eServiceCenter.getInstance().list(bouquet)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): #check if end of list
break
if service.flags & (eServiceReference.isDirectory | eServiceReference.isMarker): #ignore non playable services
continue
services.append(ServiceReference(service))
return services
def openBouquetEPG(self, bouquet = None, bouquets = None):
if bouquet:
self.StartBouquet = bouquet
self.dlg_stack.append(self.session.openWithCallback(self.closed, EPGSelection, zapFunc=self.zapToService, EPGtype=self.EPGtype, StartBouquet=self.StartBouquet, StartRef=self.StartRef, bouquets = bouquets))
def closed(self, ret=False):
if not self.dlg_stack:
return
closedScreen = self.dlg_stack.pop()
if self.bouquetSel and closedScreen == self.bouquetSel:
self.bouquetSel = None
elif self.eventView and closedScreen == self.eventView:
self.eventView = None
if ret == True or ret == 'close':
dlgs=len(self.dlg_stack)
if dlgs > 0:
self.dlg_stack[dlgs-1].close(dlgs > 1)
self.reopen(ret)
def MultiServiceEPG(self):
bouquets = self.servicelist.getBouquetList()
if bouquets is None:
cnt = 0
else:
cnt = len(bouquets)
if (self.EPGtype == "multi" and config.epgselection.multi_showbouquet.value) or (self.EPGtype == "graph" and config.epgselection.graph_showbouquet.value):
if cnt > 1: # show bouquet list
self.bouquetSel = self.session.openWithCallback(self.closed, EpgBouquetSelector, bouquets, self.openBouquetEPG, enableWrapAround=True)
self.dlg_stack.append(self.bouquetSel)
elif cnt == 1:
self.openBouquetEPG(bouquets=bouquets)
else:
self.openBouquetEPG(bouquets=bouquets)
def openMultiServiceEPG(self):
if self.servicelist is None:
return
self.EPGtype = "multi"
self.StartBouquet = self.servicelist.getRoot()
if isMoviePlayerInfoBar(self):
self.StartRef = self.lastservice
else:
self.StartRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.MultiServiceEPG()
def openGraphEPG(self, reopen=False):
if self.servicelist is None:
return
self.EPGtype = "graph"
if not reopen:
self.StartBouquet = self.servicelist.getRoot()
self.StartRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.MultiServiceEPG()
def openSingleServiceEPG(self, reopen=False):
if self.servicelist is None:
return
self.EPGtype = "enhanced"
self.SingleServiceEPG()
def openInfoBarEPG(self, reopen=False):
if self.servicelist is None:
return
if not reopen:
self.StartBouquet = self.servicelist.getRoot()
self.StartRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if config.epgselection.infobar_type_mode.value == 'single':
self.EPGtype = "infobar"
self.SingleServiceEPG()
else:
self.EPGtype = "infobargraph"
self.MultiServiceEPG()
def showCoolTVGuide(self):
if self.servicelist is None:
return
if fileExists("/usr/lib/enigma2/python/Plugins/Extensions/CoolTVGuide/plugin.pyo"):
for plugin in plugins.getPlugins([PluginDescriptor.WHERE_EXTENSIONSMENU, PluginDescriptor.WHERE_EVENTINFO]):
if plugin.name == _("Cool TV Guide"):
self.runPlugin(plugin)
break
else:
self.session.open(MessageBox, _("The Cool TV Guide plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def SingleServiceEPG(self):
self.StartBouquet = self.servicelist.getRoot()
self.StartRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if isMoviePlayerInfoBar(self):
ref = self.lastservice
else:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref:
services = self.getBouquetServices(self.StartBouquet)
self.serviceSel = SimpleServicelist(services)
if self.serviceSel.selectService(ref):
self.session.openWithCallback(self.SingleServiceEPGClosed,EPGSelection, self.servicelist, zapFunc=self.zapToService, serviceChangeCB = self.changeServiceCB, EPGtype=self.EPGtype, StartBouquet=self.StartBouquet, StartRef=self.StartRef)
else:
self.session.openWithCallback(self.SingleServiceEPGClosed, EPGSelection, ref)
def changeServiceCB(self, direction, epg):
if self.serviceSel:
if direction > 0:
self.serviceSel.nextService()
else:
self.serviceSel.prevService()
epg.setService(self.serviceSel.currentService())
def SingleServiceEPGClosed(self, ret=False):
self.serviceSel = None
self.reopen(ret)
def reopen(self, answer):
if answer == 'reopengraph':
self.openGraphEPG(True)
elif answer == 'reopeninfobargraph' or answer == 'reopeninfobar':
self.openInfoBarEPG(True)
elif answer == 'close' and isMoviePlayerInfoBar(self):
self.lastservice = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.close()
def openSimilarList(self, eventid, refstr):
self.session.open(EPGSelection, refstr, eventid=eventid)
def getNowNext(self):
epglist = [ ]
service = self.session.nav.getCurrentService()
info = service and service.info()
ptr = info and info.getEvent(0)
if ptr:
epglist.append(ptr)
ptr = info and info.getEvent(1)
if ptr:
epglist.append(ptr)
self.epglist = epglist
def __evEventInfoChanged(self):
if self.is_now_next and len(self.dlg_stack) == 1:
self.getNowNext()
if self.eventView and self.epglist:
self.eventView.setEvent(self.epglist[0])
def showDefaultEPG(self):
if self.defaultEPGType is not None:
self.defaultEPGType()
return
self.EPGPressed()
def openEventView(self, simple=False):
if self.servicelist is None:
return
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.getNowNext()
epglist = self.epglist
if not epglist:
self.is_now_next = False
epg = eEPGCache.getInstance()
ptr = ref and ref.valid() and epg.lookupEventTime(ref, -1)
if ptr:
epglist.append(ptr)
ptr = epg.lookupEventTime(ref, ptr.getBeginTime(), +1)
if ptr:
epglist.append(ptr)
else:
self.is_now_next = True
if epglist:
if not simple:
self.eventView = self.session.openWithCallback(self.closed, EventViewEPGSelect, epglist[0], ServiceReference(ref), self.eventViewCallback, self.openSingleServiceEPG, self.openMultiServiceEPG, self.openSimilarList)
else:
self.eventView = self.session.openWithCallback(self.closed, EventViewSimple, epglist[0], ServiceReference(ref))
self.dlg_stack.append(self.eventView)
def eventViewCallback(self, setEvent, setService, val): #used for now/next displaying
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0]=epglist[1]
epglist[1]=tmp
setEvent(epglist[0])
class InfoBarRdsDecoder:
"""provides RDS and Rass support/display"""
def __init__(self):
self.rds_display = self.session.instantiateDialog(RdsInfoDisplay)
self.session.instantiateSummaryDialog(self.rds_display)
self.rass_interactive = None
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEnd: self.__serviceStopped,
iPlayableService.evUpdatedRassSlidePic: self.RassSlidePicChanged
})
self["RdsActions"] = ActionMap(["InfobarRdsActions"],
{
"startRassInteractive": self.startRassInteractive
},-1)
self["RdsActions"].setEnabled(False)
self.onLayoutFinish.append(self.rds_display.show)
self.rds_display.onRassInteractivePossibilityChanged.append(self.RassInteractivePossibilityChanged)
def RassInteractivePossibilityChanged(self, state):
self["RdsActions"].setEnabled(state)
def RassSlidePicChanged(self):
if not self.rass_interactive:
service = self.session.nav.getCurrentService()
decoder = service and service.rdsDecoder()
if decoder:
decoder.showRassSlidePicture()
def __serviceStopped(self):
if self.rass_interactive is not None:
rass_interactive = self.rass_interactive
self.rass_interactive = None
rass_interactive.close()
def startRassInteractive(self):
self.rds_display.hide()
self.rass_interactive = self.session.openWithCallback(self.RassInteractiveClosed, RassInteractive)
def RassInteractiveClosed(self, *val):
if self.rass_interactive is not None:
self.rass_interactive = None
self.RassSlidePicChanged()
self.rds_display.show()
class Seekbar(Screen):
def __init__(self, session, fwd):
Screen.__init__(self, session)
self.setTitle(_("Seek"))
self.session = session
self.fwd = fwd
self.percent = 0.0
self.length = None
service = session.nav.getCurrentService()
if service:
self.seek = service.seek()
if self.seek:
self.length = self.seek.getLength()
position = self.seek.getPlayPosition()
if self.length and position and int(self.length[1]) > 0:
if int(position[1]) > 0:
self.percent = float(position[1]) * 100.0 / float(self.length[1])
else:
self.close()
self["cursor"] = MovingPixmap()
self["time"] = Label()
self["actions"] = ActionMap(["WizardActions", "DirectionActions"], {"back": self.exit, "ok": self.keyOK, "left": self.keyLeft, "right": self.keyRight}, -1)
self.cursorTimer = eTimer()
self.cursorTimer.callback.append(self.updateCursor)
self.cursorTimer.start(200, False)
def updateCursor(self):
if self.length:
x = 145 + int(2.7 * self.percent)
self["cursor"].moveTo(x, 15, 1)
self["cursor"].startMoving()
pts = int(float(self.length[1]) / 100.0 * self.percent)
self["time"].setText("%d:%02d" % ((pts/60/90000), ((pts/90000)%60)))
def exit(self):
self.cursorTimer.stop()
self.close()
def keyOK(self):
if self.length:
self.seek.seekTo(int(float(self.length[1]) / 100.0 * self.percent))
self.exit()
def keyLeft(self):
self.percent -= float(config.seek.sensibility.value) / 10.0
if self.percent < 0.0:
self.percent = 0.0
def keyRight(self):
self.percent += float(config.seek.sensibility.value) / 10.0
if self.percent > 100.0:
self.percent = 100.0
def keyNumberGlobal(self, number):
sel = self["config"].getCurrent()[1]
if sel == self.positionEntry:
self.percent = float(number) * 10.0
else:
ConfigListScreen.keyNumberGlobal(self, number)
class InfoBarSeek:
"""handles actions like seeking, pause"""
SEEK_STATE_PLAY = (0, 0, 0, ">")
SEEK_STATE_PAUSE = (1, 0, 0, "||")
SEEK_STATE_EOF = (1, 0, 0, "END")
def __init__(self, actionmap = "InfobarSeekActions"):
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evSeekableStatusChanged: self.__seekableStatusChanged,
iPlayableService.evStart: self.__serviceStarted,
iPlayableService.evEOF: self.__evEOF,
iPlayableService.evSOF: self.__evSOF,
})
self.fast_winding_hint_message_showed = False
class InfoBarSeekActionMap(HelpableActionMap):
def __init__(self, screen, *args, **kwargs):
HelpableActionMap.__init__(self, screen, *args, **kwargs)
self.screen = screen
def action(self, contexts, action):
# print "action:", action
if action[:5] == "seek:":
time = int(action[5:])
self.screen.doSeekRelative(time * 90000)
return 1
elif action[:8] == "seekdef:":
key = int(action[8:])
time = (-config.seek.selfdefined_13.value, False, config.seek.selfdefined_13.value,
-config.seek.selfdefined_46.value, False, config.seek.selfdefined_46.value,
-config.seek.selfdefined_79.value, False, config.seek.selfdefined_79.value)[key-1]
self.screen.doSeekRelative(time * 90000)
return 1
else:
return HelpableActionMap.action(self, contexts, action)
self["SeekActions"] = InfoBarSeekActionMap(self, actionmap,
{
"playpauseService": self.playpauseService,
"pauseService": (self.pauseService, _("Pause playback")),
"unPauseService": (self.unPauseService, _("Continue playback")),
"seekFwd": (self.seekFwd, _("Seek forward")),
"seekFwdManual": (self.seekFwdManual, _("Seek forward (enter time)")),
"seekBack": (self.seekBack, _("Seek backward")),
"seekBackManual": (self.seekBackManual, _("Seek backward (enter time)")),
"SeekbarFwd": self.seekFwdSeekbar,
"SeekbarBack": self.seekBackSeekbar
}, prio=-1) # give them a little more priority to win over color buttons
self["SeekActions"].setEnabled(False)
self["SeekActionsPTS"] = InfoBarSeekActionMap(self, "InfobarSeekActionsPTS",
{
"playpauseService": self.playpauseService,
"pauseService": (self.pauseService, _("Pause playback")),
"unPauseService": (self.unPauseService, _("Continue playback")),
"seekFwd": (self.seekFwd, _("skip forward")),
"seekFwdManual": (self.seekFwdManual, _("skip forward (enter time)")),
"seekBack": (self.seekBack, _("skip backward")),
"seekBackManual": (self.seekBackManual, _("skip backward (enter time)")),
}, prio=-1) # give them a little more priority to win over color buttons
self["SeekActionsPTS"].setEnabled(False)
self.activity = 0
self.activityTimer = eTimer()
self.activityTimer.callback.append(self.doActivityTimer)
self.seekstate = self.SEEK_STATE_PLAY
self.lastseekstate = self.SEEK_STATE_PLAY
self.onPlayStateChanged = [ ]
self.lockedBecauseOfSkipping = False
self.__seekableStatusChanged()
def makeStateForward(self, n):
return 0, n, 0, ">> %dx" % n
def makeStateBackward(self, n):
return 0, -n, 0, "<< %dx" % n
def makeStateSlowMotion(self, n):
return 0, 0, n, "/%d" % n
def isStateForward(self, state):
return state[1] > 1
def isStateBackward(self, state):
return state[1] < 0
def isStateSlowMotion(self, state):
return state[1] == 0 and state[2] > 1
def getHigher(self, n, lst):
for x in lst:
if x > n:
return x
return False
def getLower(self, n, lst):
lst = lst[:]
lst.reverse()
for x in lst:
if x < n:
return x
return False
def showAfterSeek(self):
if isinstance(self, InfoBarShowHide):
self.doShow()
def up(self):
pass
def down(self):
pass
def getSeek(self):
service = self.session.nav.getCurrentService()
if service is None:
return None
seek = service.seek()
if seek is None or not seek.isCurrentlySeekable():
return None
return seek
def isSeekable(self):
if self.getSeek() is None or (isStandardInfoBar(self) and not self.timeshiftEnabled()):
return False
return True
def __seekableStatusChanged(self):
if isStandardInfoBar(self) and self.timeshiftEnabled():
pass
elif not self.isSeekable():
# print "not seekable, return to play"
self["SeekActions"].setEnabled(False)
self.setSeekState(self.SEEK_STATE_PLAY)
else:
# print "seekable"
self["SeekActions"].setEnabled(True)
self.activityTimer.start(200, False)
for c in self.onPlayStateChanged:
c(self.seekstate)
def doActivityTimer(self):
if self.isSeekable():
self.activity += 16
hdd = 1
if self.activity >= 100:
self.activity = 0
else:
self.activityTimer.stop()
self.activity = 0
hdd = 0
if os.path.exists("/proc/stb/lcd/symbol_hdd"):
file = open("/proc/stb/lcd/symbol_hdd", "w")
file.write('%d' % int(hdd))
file.close()
if os.path.exists("/proc/stb/lcd/symbol_hddprogress"):
file = open("/proc/stb/lcd/symbol_hddprogress", "w")
file.write('%d' % int(self.activity))
file.close()
def __serviceStarted(self):
self.fast_winding_hint_message_showed = False
self.setSeekState(self.SEEK_STATE_PLAY)
self.__seekableStatusChanged()
def setSeekState(self, state):
service = self.session.nav.getCurrentService()
if service is None:
return False
if not self.isSeekable():
if state not in (self.SEEK_STATE_PLAY, self.SEEK_STATE_PAUSE):
state = self.SEEK_STATE_PLAY
pauseable = service.pause()
if pauseable is None:
# print "not pauseable."
state = self.SEEK_STATE_PLAY
self.seekstate = state
if pauseable is not None:
if self.seekstate[0] and self.seekstate[3] == '||':
# print "resolved to PAUSE"
self.activityTimer.stop()
pauseable.pause()
elif self.seekstate[0] and self.seekstate[3] == 'END':
# print "resolved to STOP"
self.activityTimer.stop()
service.stop()
elif self.seekstate[1]:
if not pauseable.setFastForward(self.seekstate[1]):
pass
# print "resolved to FAST FORWARD"
else:
self.seekstate = self.SEEK_STATE_PLAY
# print "FAST FORWARD not possible: resolved to PLAY"
elif self.seekstate[2]:
if not pauseable.setSlowMotion(self.seekstate[2]):
pass
# print "resolved to SLOW MOTION"
else:
self.seekstate = self.SEEK_STATE_PAUSE
# print "SLOW MOTION not possible: resolved to PAUSE"
else:
# print "resolved to PLAY"
self.activityTimer.start(200, False)
pauseable.unpause()
for c in self.onPlayStateChanged:
c(self.seekstate)
self.checkSkipShowHideLock()
if hasattr(self, "ScreenSaverTimerStart"):
self.ScreenSaverTimerStart()
return True
def playpauseService(self):
if self.seekstate == self.SEEK_STATE_PLAY:
self.pauseService()
else:
if self.seekstate == self.SEEK_STATE_PAUSE:
if config.seek.on_pause.value == "play":
self.unPauseService()
elif config.seek.on_pause.value == "step":
self.doSeekRelative(1)
elif config.seek.on_pause.value == "last":
self.setSeekState(self.lastseekstate)
self.lastseekstate = self.SEEK_STATE_PLAY
else:
self.unPauseService()
def pauseService(self):
if self.seekstate != self.SEEK_STATE_EOF:
self.lastseekstate = self.seekstate
self.setSeekState(self.SEEK_STATE_PAUSE)
def unPauseService(self):
if self.seekstate == self.SEEK_STATE_PLAY:
return 0
self.setSeekState(self.SEEK_STATE_PLAY)
def doSeek(self, pts):
seekable = self.getSeek()
if seekable is None:
return
seekable.seekTo(pts)
def doSeekRelative(self, pts):
seekable = self.getSeek()
if seekable is None:
return
prevstate = self.seekstate
if self.seekstate == self.SEEK_STATE_EOF:
if prevstate == self.SEEK_STATE_PAUSE:
self.setSeekState(self.SEEK_STATE_PAUSE)
else:
self.setSeekState(self.SEEK_STATE_PLAY)
seekable.seekRelative(pts<0 and -1 or 1, abs(pts))
if abs(pts) > 100 and config.usage.show_infobar_on_skip.value:
self.showAfterSeek()
def seekFwd(self):
seek = self.getSeek()
if seek and not (seek.isCurrentlySeekable() & 2):
if not self.fast_winding_hint_message_showed and (seek.isCurrentlySeekable() & 1):
self.session.open(MessageBox, _("No fast winding possible yet.. but you can use the number buttons to skip forward/backward!"), MessageBox.TYPE_INFO, timeout=10)
self.fast_winding_hint_message_showed = True
return
return 0 # trade as unhandled action
if self.seekstate == self.SEEK_STATE_PLAY:
self.setSeekState(self.makeStateForward(int(config.seek.enter_forward.value)))
elif self.seekstate == self.SEEK_STATE_PAUSE:
if len(config.seek.speeds_slowmotion.value):
self.setSeekState(self.makeStateSlowMotion(config.seek.speeds_slowmotion.value[-1]))
else:
self.setSeekState(self.makeStateForward(int(config.seek.enter_forward.value)))
elif self.seekstate == self.SEEK_STATE_EOF:
pass
elif self.isStateForward(self.seekstate):
speed = self.seekstate[1]
if self.seekstate[2]:
speed /= self.seekstate[2]
speed = self.getHigher(speed, config.seek.speeds_forward.value) or config.seek.speeds_forward.value[-1]
self.setSeekState(self.makeStateForward(speed))
elif self.isStateBackward(self.seekstate):
speed = -self.seekstate[1]
if self.seekstate[2]:
speed /= self.seekstate[2]
speed = self.getLower(speed, config.seek.speeds_backward.value)
if speed:
self.setSeekState(self.makeStateBackward(speed))
else:
self.setSeekState(self.SEEK_STATE_PLAY)
elif self.isStateSlowMotion(self.seekstate):
speed = self.getLower(self.seekstate[2], config.seek.speeds_slowmotion.value) or config.seek.speeds_slowmotion.value[0]
self.setSeekState(self.makeStateSlowMotion(speed))
def seekBack(self):
seek = self.getSeek()
if seek and not (seek.isCurrentlySeekable() & 2):
if not self.fast_winding_hint_message_showed and (seek.isCurrentlySeekable() & 1):
self.session.open(MessageBox, _("No fast winding possible yet.. but you can use the number buttons to skip forward/backward!"), MessageBox.TYPE_INFO, timeout=10)
self.fast_winding_hint_message_showed = True
return
return 0 # trade as unhandled action
seekstate = self.seekstate
if seekstate == self.SEEK_STATE_PLAY:
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.value)))
elif seekstate == self.SEEK_STATE_EOF:
self.setSeekState(self.makeStateBackward(int(config.seek.enter_backward.value)))
self.doSeekRelative(-6)
elif seekstate == self.SEEK_STATE_PAUSE:
self.doSeekRelative(-1)
elif self.isStateForward(seekstate):
speed = seekstate[1]
if seekstate[2]:
speed /= seekstate[2]
speed = self.getLower(speed, config.seek.speeds_forward.value)
if speed:
self.setSeekState(self.makeStateForward(speed))
else:
self.setSeekState(self.SEEK_STATE_PLAY)
elif self.isStateBackward(seekstate):
speed = -seekstate[1]
if seekstate[2]:
speed /= seekstate[2]
speed = self.getHigher(speed, config.seek.speeds_backward.value) or config.seek.speeds_backward.value[-1]
self.setSeekState(self.makeStateBackward(speed))
elif self.isStateSlowMotion(seekstate):
speed = self.getHigher(seekstate[2], config.seek.speeds_slowmotion.value)
if speed:
self.setSeekState(self.makeStateSlowMotion(speed))
else:
self.setSeekState(self.SEEK_STATE_PAUSE)
self.pts_lastseekspeed = self.seekstate[1]
def seekFwdManual(self, fwd=True):
if config.seek.baractivation.value == "leftright":
self.session.open(Seekbar, fwd)
else:
self.session.openWithCallback(self.fwdSeekTo, MinuteInput)
def seekBackManual(self, fwd=False):
if config.seek.baractivation.value == "leftright":
self.session.open(Seekbar, fwd)
else:
self.session.openWithCallback(self.rwdSeekTo, MinuteInput)
def seekFwdSeekbar(self, fwd=True):
if not config.seek.baractivation.value == "leftright":
self.session.open(Seekbar, fwd)
else:
self.session.openWithCallback(self.fwdSeekTo, MinuteInput)
def fwdSeekTo(self, minutes):
self.doSeekRelative(minutes * 60 * 90000)
def seekBackSeekbar(self, fwd=False):
if not config.seek.baractivation.value == "leftright":
self.session.open(Seekbar, fwd)
else:
self.session.openWithCallback(self.rwdSeekTo, MinuteInput)
def rwdSeekTo(self, minutes):
# print "rwdSeekTo"
self.doSeekRelative(-minutes * 60 * 90000)
def checkSkipShowHideLock(self):
if self.seekstate == self.SEEK_STATE_PLAY or self.seekstate == self.SEEK_STATE_EOF:
self.lockedBecauseOfSkipping = False
self.unlockShow()
else:
wantlock = self.seekstate != self.SEEK_STATE_PLAY
if config.usage.show_infobar_on_skip.value:
if self.lockedBecauseOfSkipping and not wantlock:
self.unlockShow()
self.lockedBecauseOfSkipping = False
if wantlock and not self.lockedBecauseOfSkipping:
self.lockShow()
self.lockedBecauseOfSkipping = True
def calcRemainingTime(self):
seekable = self.getSeek()
if seekable is not None:
len = seekable.getLength()
try:
tmp = self.cueGetEndCutPosition()
if tmp:
len = (False, tmp)
except:
pass
pos = seekable.getPlayPosition()
speednom = self.seekstate[1] or 1
speedden = self.seekstate[2] or 1
if not len[0] and not pos[0]:
if len[1] <= pos[1]:
return 0
time = (len[1] - pos[1])*speedden/(90*speednom)
return time
return False
def __evEOF(self):
if self.seekstate == self.SEEK_STATE_EOF:
return
# if we are seeking forward, we try to end up ~1s before the end, and pause there.
seekstate = self.seekstate
if self.seekstate != self.SEEK_STATE_PAUSE:
self.setSeekState(self.SEEK_STATE_EOF)
if seekstate not in (self.SEEK_STATE_PLAY, self.SEEK_STATE_PAUSE): # if we are seeking
seekable = self.getSeek()
if seekable is not None:
seekable.seekTo(-1)
self.doEofInternal(True)
if seekstate == self.SEEK_STATE_PLAY: # regular EOF
self.doEofInternal(True)
else:
self.doEofInternal(False)
def doEofInternal(self, playing):
pass # Defined in subclasses
def __evSOF(self):
self.setSeekState(self.SEEK_STATE_PLAY)
self.doSeek(0)
class InfoBarPVRState:
def __init__(self, screen=PVRState, force_show = False):
self.onChangedEntry = [ ]
self.onPlayStateChanged.append(self.__playStateChanged)
self.pvrStateDialog = self.session.instantiateDialog(screen)
self.onShow.append(self._mayShow)
self.onHide.append(self.pvrStateDialog.hide)
self.force_show = force_show
def createSummary(self):
return InfoBarMoviePlayerSummary
def _mayShow(self):
if self.has_key("state") and not config.usage.movieplayer_pvrstate.value:
self["state"].setText("")
self["statusicon"].setPixmapNum(6)
self["speed"].setText("")
if self.shown and self.seekstate != self.SEEK_STATE_EOF and not config.usage.movieplayer_pvrstate.value:
self.pvrStateDialog.show()
self.startHideTimer()
def __playStateChanged(self, state):
playstateString = state[3]
state_summary = playstateString
self.pvrStateDialog["state"].setText(playstateString)
if playstateString == '>':
self.pvrStateDialog["statusicon"].setPixmapNum(0)
self.pvrStateDialog["speed"].setText("")
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 0
if self.has_key("state") and config.usage.movieplayer_pvrstate.value:
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(0)
self["speed"].setText("")
elif playstateString == '||':
self.pvrStateDialog["statusicon"].setPixmapNum(1)
self.pvrStateDialog["speed"].setText("")
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 1
if self.has_key("state") and config.usage.movieplayer_pvrstate.value:
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(1)
self["speed"].setText("")
elif playstateString == 'END':
self.pvrStateDialog["statusicon"].setPixmapNum(2)
self.pvrStateDialog["speed"].setText("")
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 2
if self.has_key("state") and config.usage.movieplayer_pvrstate.value:
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(2)
self["speed"].setText("")
elif playstateString.startswith('>>'):
speed = state[3].split()
self.pvrStateDialog["statusicon"].setPixmapNum(3)
self.pvrStateDialog["speed"].setText(speed[1])
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 3
if self.has_key("state") and config.usage.movieplayer_pvrstate.value:
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(3)
self["speed"].setText(speed[1])
elif playstateString.startswith('<<'):
speed = state[3].split()
self.pvrStateDialog["statusicon"].setPixmapNum(4)
self.pvrStateDialog["speed"].setText(speed[1])
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 4
if self.has_key("state") and config.usage.movieplayer_pvrstate.value:
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(4)
self["speed"].setText(speed[1])
elif playstateString.startswith('/'):
self.pvrStateDialog["statusicon"].setPixmapNum(5)
self.pvrStateDialog["speed"].setText(playstateString)
speed_summary = self.pvrStateDialog["speed"].text
statusicon_summary = 5
if self.has_key("state") and config.usage.movieplayer_pvrstate.value:
self["state"].setText(playstateString)
self["statusicon"].setPixmapNum(5)
self["speed"].setText(playstateString)
for cb in self.onChangedEntry:
cb(state_summary, speed_summary, statusicon_summary)
# if we return into "PLAY" state, ensure that the dialog gets hidden if there will be no infobar displayed
if not config.usage.show_infobar_on_skip.value and self.seekstate == self.SEEK_STATE_PLAY and not self.force_show:
self.pvrStateDialog.hide()
else:
self._mayShow()
class InfoBarTimeshiftState(InfoBarPVRState):
def __init__(self):
InfoBarPVRState.__init__(self, screen=TimeshiftState, force_show = True)
self.onPlayStateChanged.append(self.__timeshiftEventName)
self.onHide.append(self.__hideTimeshiftState)
def _mayShow(self):
if self.shown and self.timeshiftEnabled() and self.isSeekable():
# noinspection PyCallByClass
InfoBarTimeshift.ptsSeekPointerSetCurrentPos(self)
if config.timeshift.showinfobar.value:
self["TimeshiftSeekPointerActions"].setEnabled(True)
self.pvrStateDialog.show()
self.startHideTimer()
def __hideTimeshiftState(self):
self["TimeshiftSeekPointerActions"].setEnabled(False)
self.pvrStateDialog.hide()
def __timeshiftEventName(self,state):
if os.path.exists("%spts_livebuffer_%s.meta" % (config.usage.timeshift_path.value,self.pts_currplaying)):
readmetafile = open("%spts_livebuffer_%s.meta" % (config.usage.timeshift_path.value,self.pts_currplaying), "r")
servicerefname = readmetafile.readline()[0:-1]
eventname = readmetafile.readline()[0:-1]
readmetafile.close()
self.pvrStateDialog["eventname"].setText(eventname)
else:
self.pvrStateDialog["eventname"].setText("")
class InfoBarShowMovies:
# i don't really like this class.
# it calls a not further specified "movie list" on up/down/movieList,
# so this is not more than an action map
def __init__(self):
self["MovieListActions"] = HelpableActionMap(self, "InfobarMovieListActions",
{
"movieList": (self.showMovies, _("Open the movie list")),
"up": (self.up, _("Open the movie list")),
"down": (self.down, _("Open the movie list"))
})
from Screens.PiPSetup import PiPSetup
class InfoBarExtensions:
EXTENSION_SINGLE = 0
EXTENSION_LIST = 1
def __init__(self):
self.list = []
if config.vixsettings.ColouredButtons.value:
self["InstantExtensionsActions"] = HelpableActionMap(self, "InfobarExtensions",
{
"extensions": (self.showExtensionSelection, _("Show extensions...")),
"showPluginBrowser": (self.showPluginBrowser, _("Show the plugin browser..")),
"openTimerList": (self.showTimerList, _("Show the list of timers.")),
"openAutoTimerList": (self.showAutoTimerList, _("Show the list of AutoTimers.")),
"openEPGSearch": (self.showEPGSearch, _("Search the epg for current event.")),
"openIMDB": (self.showIMDB, _("Search IMDb for information about current event.")),
"openDreamPlex": (self.showDreamPlex, _("Show the DreamPlex player...")),
}, 1) # lower priority
else:
self["InstantExtensionsActions"] = HelpableActionMap(self, "InfobarExtensions",
{
"extensions": (self.showExtensionSelection, _("view extensions...")),
"showPluginBrowser": (self.showPluginBrowser, _("Show the plugin browser..")),
"showDreamPlex": (self.showDreamPlex, _("Show the DreamPlex player...")),
}, 1) # lower priority
self.addExtension(extension = self.getLogManager, type = InfoBarExtensions.EXTENSION_LIST)
self.addExtension(extension = self.getOsd3DSetup, type = InfoBarExtensions.EXTENSION_LIST)
self.addExtension(extension = self.getCCcamInfo, type = InfoBarExtensions.EXTENSION_LIST)
self.addExtension(extension = self.getOScamInfo, type = InfoBarExtensions.EXTENSION_LIST)
def getLMname(self):
return _("Log Manager")
def getLogManager(self):
if config.logmanager.showinextensions.value:
return [((boundFunction(self.getLMname), boundFunction(self.openLogManager), lambda: True), None)]
else:
return []
def get3DSetupname(self):
return _("OSD 3D Setup")
def getOsd3DSetup(self):
if config.osd.show3dextensions .value:
return [((boundFunction(self.get3DSetupname), boundFunction(self.open3DSetup), lambda: True), None)]
else:
return []
def getCCname(self):
return _("CCcam Info")
def getCCcamInfo(self):
if pathExists('/usr/softcams/'):
softcams = os.listdir('/usr/softcams/')
for softcam in softcams:
if softcam.lower().startswith('cccam') and config.cccaminfo.showInExtensions.value:
return [((boundFunction(self.getCCname), boundFunction(self.openCCcamInfo), lambda: True), None)] or []
else:
return []
def getOSname(self):
return _("OScam Info")
def getOScamInfo(self):
if pathExists('/usr/softcams/'):
softcams = os.listdir('/usr/softcams/')
for softcam in softcams:
if softcam.lower().startswith('oscam') and config.oscaminfo.showInExtensions.value:
return [((boundFunction(self.getOSname), boundFunction(self.openOScamInfo), lambda: True), None)] or []
else:
return []
def addExtension(self, extension, key = None, type = EXTENSION_SINGLE):
self.list.append((type, extension, key))
def updateExtension(self, extension, key = None):
self.extensionsList.append(extension)
if key is not None:
if self.extensionKeys.has_key(key):
key = None
if key is None:
for x in self.availableKeys:
if not self.extensionKeys.has_key(x):
key = x
break
if key is not None:
self.extensionKeys[key] = len(self.extensionsList) - 1
def updateExtensions(self):
self.extensionsList = []
self.availableKeys = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "red", "green", "yellow", "blue" ]
self.extensionKeys = {}
for x in self.list:
if x[0] == self.EXTENSION_SINGLE:
self.updateExtension(x[1], x[2])
else:
for y in x[1]():
self.updateExtension(y[0], y[1])
def showExtensionSelection(self):
self.updateExtensions()
extensionsList = self.extensionsList[:]
keys = []
list = []
for x in self.availableKeys:
if self.extensionKeys.has_key(x):
entry = self.extensionKeys[x]
extension = self.extensionsList[entry]
if extension[2]():
name = str(extension[0]())
list.append((extension[0](), extension))
keys.append(x)
extensionsList.remove(extension)
else:
extensionsList.remove(extension)
list.extend([(x[0](), x) for x in extensionsList])
keys += [""] * len(extensionsList)
self.session.openWithCallback(self.extensionCallback, ChoiceBox, title=_("Please choose an extension..."), list = list, keys = keys, skin_name = "ExtensionsList")
def extensionCallback(self, answer):
if answer is not None:
answer[1][1]()
def showPluginBrowser(self):
from Screens.PluginBrowser import PluginBrowser
self.session.open(PluginBrowser)
def openCCcamInfo(self):
from Screens.CCcamInfo import CCcamInfoMain
self.session.open(CCcamInfoMain)
def openOScamInfo(self):
from Screens.OScamInfo import OscamInfoMenu
self.session.open(OscamInfoMenu)
def showTimerList(self):
self.session.open(TimerEditList)
def openLogManager(self):
from Screens.LogManager import LogManager
self.session.open(LogManager)
def open3DSetup(self):
from Screens.UserInterfacePositioner import OSD3DSetupScreen
self.session.open(OSD3DSetupScreen)
def showAutoTimerList(self):
if os.path.exists("/usr/lib/enigma2/python/Plugins/Extensions/AutoTimer/plugin.pyo"):
from Plugins.Extensions.AutoTimer.plugin import main, autostart
from Plugins.Extensions.AutoTimer.AutoTimer import AutoTimer
from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller
self.autopoller = AutoPoller()
self.autotimer = AutoTimer()
try:
self.autotimer.readXml()
except SyntaxError as se:
self.session.open(
MessageBox,
_("Your config file is not well-formed:\n%s") % (str(se)),
type = MessageBox.TYPE_ERROR,
timeout = 10
)
return
# Do not run in background while editing, this might screw things up
if self.autopoller is not None:
self.autopoller.stop()
from Plugins.Extensions.AutoTimer.AutoTimerOverview import AutoTimerOverview
self.session.openWithCallback(
self.editCallback,
AutoTimerOverview,
self.autotimer
)
else:
self.session.open(MessageBox, _("The AutoTimer plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def editCallback(self, session):
# XXX: canceling of GUI (Overview) won't affect config values which might have been changed - is this intended?
# Don't parse EPG if editing was canceled
if session is not None:
# Save xml
self.autotimer.writeXml()
# Poll EPGCache
self.autotimer.parseEPG()
# Start autopoller again if wanted
if config.plugins.autotimer.autopoll.value:
if self.autopoller is None:
from Plugins.Extensions.AutoTimer.AutoPoller import AutoPoller
self.autopoller = AutoPoller()
self.autopoller.start()
# Remove instance if not running in background
else:
self.autopoller = None
self.autotimer = None
def showEPGSearch(self):
from Plugins.Extensions.EPGSearch.EPGSearch import EPGSearch
s = self.session.nav.getCurrentService()
if s:
info = s.info()
event = info.getEvent(0) # 0 = now, 1 = next
if event:
name = event and event.getEventName() or ''
else:
name = self.session.nav.getCurrentlyPlayingServiceOrGroup().toString()
name = name.split('/')
name = name[-1]
name = name.replace('.',' ')
name = name.split('-')
name = name[0]
if name.endswith(' '):
name = name[:-1]
if name:
self.session.open(EPGSearch, name, False)
else:
self.session.open(EPGSearch)
else:
self.session.open(EPGSearch)
def showIMDB(self):
if os.path.exists("/usr/lib/enigma2/python/Plugins/Extensions/IMDb/plugin.pyo"):
from Plugins.Extensions.IMDb.plugin import IMDB
s = self.session.nav.getCurrentService()
if s:
info = s.info()
event = info.getEvent(0) # 0 = now, 1 = next
name = event and event.getEventName() or ''
self.session.open(IMDB, name)
else:
self.session.open(MessageBox, _("The IMDb plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
def showDreamPlex(self):
if os.path.exists("/usr/lib/enigma2/python/Plugins/Extensions/DreamPlex/plugin.pyo"):
from Plugins.Extensions.DreamPlex.plugin import DPS_MainMenu
self.session.open(DPS_MainMenu)
else:
self.session.open(MessageBox, _("The DreamPlex plugin is not installed!\nPlease install it."), type = MessageBox.TYPE_INFO,timeout = 10 )
from Tools.BoundFunction import boundFunction
import inspect
# depends on InfoBarExtensions
class InfoBarPlugins:
def __init__(self):
self.addExtension(extension = self.getPluginList, type = InfoBarExtensions.EXTENSION_LIST)
def getPluginName(self, name):
return name
def getPluginList(self):
l = []
for p in plugins.getPlugins(where = PluginDescriptor.WHERE_EXTENSIONSMENU):
args = inspect.getargspec(p.__call__)[0]
if len(args) == 1 or len(args) == 2 and isinstance(self, InfoBarChannelSelection):
l.append(((boundFunction(self.getPluginName, p.name), boundFunction(self.runPlugin, p), lambda: True), None, p.name))
l.sort(key = lambda e: e[2]) # sort by name
return l
def runPlugin(self, plugin):
if isinstance(self, InfoBarChannelSelection):
plugin(session = self.session, servicelist = self.servicelist)
else:
plugin(session = self.session)
from Components.Task import job_manager
class InfoBarJobman:
def __init__(self):
self.addExtension(extension = self.getJobList, type = InfoBarExtensions.EXTENSION_LIST)
def getJobList(self):
if config.usage.jobtaksextensions.value:
return [((boundFunction(self.getJobName, job), boundFunction(self.showJobView, job), lambda: True), None) for job in job_manager.getPendingJobs()]
else:
return []
def getJobName(self, job):
return "%s: %s (%d%%)" % (job.getStatustext(), job.name, int(100*job.progress/float(job.end)))
def showJobView(self, job):
from Screens.TaskView import JobView
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, job)
def JobViewCB(self, in_background):
job_manager.in_background = in_background
# depends on InfoBarExtensions
class InfoBarPiP:
def __init__(self):
try:
self.session.pipshown
except:
self.session.pipshown = False
self.lastPiPService = None
if SystemInfo["PIPAvailable"] and isinstance(self, InfoBarEPG):
self["PiPActions"] = HelpableActionMap(self, "InfobarPiPActions",
{
"activatePiP": (self.activePiP, self.activePiPName),
})
if self.allowPiP:
self.addExtension((self.getShowHideName, self.showPiP, lambda: True), "blue")
self.addExtension((self.getMoveName, self.movePiP, self.pipShown), "green")
self.addExtension((self.getSwapName, self.swapPiP, self.pipShown), "yellow")
# self.addExtension((self.getTogglePipzapName, self.togglePipzap, self.pipShown), "red")
else:
self.addExtension((self.getShowHideName, self.showPiP, self.pipShown), "blue")
self.addExtension((self.getMoveName, self.movePiP, self.pipShown), "green")
self.lastPiPServiceTimeout = eTimer()
self.lastPiPServiceTimeout.callback.append(self.clearLastPiPService)
def pipShown(self):
return self.session.pipshown
def pipHandles0Action(self):
return self.pipShown() and config.usage.pip_zero_button.value != "standard"
def getShowHideName(self):
if self.session.pipshown:
return _("Disable Picture in Picture")
else:
return _("Activate Picture in Picture")
def getSwapName(self):
return _("Swap services")
def getMoveName(self):
return _("Move Picture in Picture")
def getTogglePipzapName(self):
slist = self.servicelist
if slist and slist.dopipzap:
return _("Zap focus to main screen")
return _("Zap focus to Picture in Picture")
def togglePipzap(self):
if not self.session.pipshown:
self.showPiP()
slist = self.servicelist
if slist and self.session.pipshown:
slist.togglePipzap()
if slist.dopipzap:
currentServicePath = slist.getCurrentServicePath()
self.servicelist.setCurrentServicePath(self.session.pip.servicePath, doZap=False)
self.session.pip.servicePath = currentServicePath
def showPiP(self):
if self.session.pipshown:
slist = self.servicelist
if slist and slist.dopipzap:
self.togglePipzap()
if self.session.pipshown:
self.lastPiPService = self.session.pip.getCurrentServiceReference()
self.lastPiPServiceTimeout.startLongTimer(60)
del self.session.pip
if SystemInfo["LCDMiniTVPiP"] and int(config.lcd.minitvpipmode.value) >= 1:
print '[LCDMiniTV] disable PIP'
f = open("/proc/stb/lcd/mode", "w")
f.write(config.lcd.minitvmode.value)
f.close()
self.session.pipshown = False
else:
self.session.pip = self.session.instantiateDialog(PictureInPicture)
self.session.pip.show()
newservice = self.lastPiPService or self.session.nav.getCurrentlyPlayingServiceReference() or self.servicelist.servicelist.getCurrent()
if self.session.pip.playService(newservice):
self.session.pipshown = True
self.session.pip.servicePath = self.servicelist.getCurrentServicePath()
if SystemInfo["LCDMiniTVPiP"] and int(config.lcd.minitvpipmode.value) >= 1:
print '[LCDMiniTV] enable PIP'
f = open("/proc/stb/lcd/mode", "w")
f.write(config.lcd.minitvpipmode.value)
f.close()
f = open("/proc/stb/vmpeg/1/dst_width", "w")
f.write("0")
f.close()
f = open("/proc/stb/vmpeg/1/dst_height", "w")
f.write("0")
f.close()
f = open("/proc/stb/vmpeg/1/dst_apply", "w")
f.write("1")
f.close()
else:
newservice = self.session.nav.getCurrentlyPlayingServiceReference() or self.servicelist.servicelist.getCurrent()
if self.session.pip.playService(newservice):
self.session.pipshown = True
self.session.pip.servicePath = self.servicelist.getCurrentServicePath()
if SystemInfo["LCDMiniTVPiP"] and int(config.lcd.minitvpipmode.value) >= 1:
print '[LCDMiniTV] enable PIP'
f = open("/proc/stb/lcd/mode", "w")
f.write(config.lcd.minitvpipmode.value)
f.close()
f = open("/proc/stb/vmpeg/1/dst_width", "w")
f.write("0")
f.close()
f = open("/proc/stb/vmpeg/1/dst_height", "w")
f.write("0")
f.close()
f = open("/proc/stb/vmpeg/1/dst_apply", "w")
f.write("1")
f.close()
else:
self.lastPiPService = None
self.session.pipshown = False
del self.session.pip
def clearLastPiPService(self):
self.lastPiPService = None
def activePiP(self):
if self.servicelist and self.servicelist.dopipzap or not self.session.pipshown:
self.showPiP()
else:
self.togglePipzap()
def activePiPName(self):
if self.servicelist and self.servicelist.dopipzap:
return _("Disable Picture in Picture")
if self.session.pipshown:
return _("Zap focus to Picture in Picture")
else:
return _("Activate Picture in Picture")
def swapPiP(self):
swapservice = self.session.nav.getCurrentlyPlayingServiceOrGroup()
pipref = self.session.pip.getCurrentService()
if swapservice and pipref and pipref.toString() != swapservice.toString():
currentServicePath = self.servicelist.getCurrentServicePath()
self.servicelist.setCurrentServicePath(self.session.pip.servicePath, doZap=False)
self.session.pip.playService(swapservice)
self.session.nav.playService(pipref, checkParentalControl=False, adjust=False)
self.session.pip.servicePath = currentServicePath
if self.servicelist.dopipzap:
# This unfortunately won't work with subservices
self.servicelist.setCurrentSelection(self.session.pip.getCurrentService())
def movePiP(self):
self.session.open(PiPSetup, pip = self.session.pip)
def pipDoHandle0Action(self):
use = config.usage.pip_zero_button.value
if "swap" == use:
self.swapPiP()
elif "swapstop" == use:
self.swapPiP()
self.showPiP()
elif "stop" == use:
self.showPiP()
class InfoBarInstantRecord:
"""Instant Record - handles the instantRecord action in order to
start/stop instant records"""
def __init__(self):
self["InstantRecordActions"] = HelpableActionMap(self, "InfobarInstantRecord",
{
"instantRecord": (self.instantRecord, _("Instant recording...")),
})
if isStandardInfoBar(self):
self.recording = []
else:
from Screens.InfoBar import InfoBar
InfoBarInstance = InfoBar.instance
if InfoBarInstance:
self.recording = InfoBarInstance.recording
def stopCurrentRecording(self, entry = -1):
if entry is not None and entry != -1:
self.session.nav.RecordTimer.removeEntry(self.recording[entry])
self.recording.remove(self.recording[entry])
def getProgramInfoAndEvent(self, info, name):
info["serviceref"] = self.session.nav.getCurrentlyPlayingServiceOrGroup()
# try to get event info
event = None
try:
service = self.session.nav.getCurrentService()
epg = eEPGCache.getInstance()
event = epg.lookupEventTime(info["serviceref"], -1, 0)
if event is None:
event = service.info().getEvent(0)
except:
pass
info["event"] = event
info["name"] = name
info["description"] = ""
info["eventid"] = None
if event is not None:
curEvent = parseEvent(event)
info["name"] = curEvent[2]
info["description"] = curEvent[3]
info["eventid"] = curEvent[4]
info["end"] = curEvent[1]
def startInstantRecording(self, limitEvent = False):
begin = int(time())
end = begin + 3600 # dummy
name = "instant record"
info = { }
self.getProgramInfoAndEvent(info, name)
serviceref = info["serviceref"]
event = info["event"]
if event is not None:
if limitEvent:
end = info["end"]
else:
if limitEvent:
self.session.open(MessageBox, _("No event info found, recording indefinitely."), MessageBox.TYPE_INFO)
if isinstance(serviceref, eServiceReference):
serviceref = ServiceReference(serviceref)
recording = RecordTimerEntry(serviceref, begin, end, info["name"], info["description"], info["eventid"], dirname = preferredInstantRecordPath())
recording.dontSave = True
if event is None or limitEvent == False:
recording.autoincrease = True
recording.setAutoincreaseEnd()
simulTimerList = self.session.nav.RecordTimer.record(recording)
if simulTimerList is None: # no conflict
recording.autoincrease = False
self.recording.append(recording)
else:
if len(simulTimerList) > 1: # with other recording
name = simulTimerList[1].name
name_date = ' '.join((name, strftime('%F %T', localtime(simulTimerList[1].begin))))
# print "[TIMER] conflicts with", name_date
recording.autoincrease = True # start with max available length, then increment
if recording.setAutoincreaseEnd():
self.session.nav.RecordTimer.record(recording)
self.recording.append(recording)
self.session.open(MessageBox, _("Record time limited due to conflicting timer %s") % name_date, MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("Could not record due to conflicting timer %s") % name, MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("Could not record due to invalid service %s") % serviceref, MessageBox.TYPE_INFO)
recording.autoincrease = False
def isInstantRecordRunning(self):
# print "self.recording:", self.recording
if self.recording:
for x in self.recording:
if x.isRunning():
return True
return False
def recordQuestionCallback(self, answer):
# print 'recordQuestionCallback'
# print "pre:\n", self.recording
# print 'test1'
if answer is None or answer[1] == "no":
# print 'test2'
return
list = []
recording = self.recording[:]
for x in recording:
if not x in self.session.nav.RecordTimer.timer_list:
self.recording.remove(x)
elif x.dontSave and x.isRunning():
list.append((x, False))
if answer[1] == "changeduration":
if len(self.recording) == 1:
self.changeDuration(0)
else:
self.session.openWithCallback(self.changeDuration, TimerSelection, list)
elif answer[1] == "changeendtime":
if len(self.recording) == 1:
self.setEndtime(0)
else:
self.session.openWithCallback(self.setEndtime, TimerSelection, list)
elif answer[1] == "timer":
import TimerEdit
self.session.open(TimerEdit.TimerEditList)
elif answer[1] == "stop":
self.session.openWithCallback(self.stopCurrentRecording, TimerSelection, list)
elif answer[1] in ( "indefinitely" , "manualduration", "manualendtime", "event"):
self.startInstantRecording(limitEvent = answer[1] in ("event", "manualendtime") or False)
if answer[1] == "manualduration":
self.changeDuration(len(self.recording)-1)
elif answer[1] == "manualendtime":
self.setEndtime(len(self.recording)-1)
elif answer[1] == "savetimeshift":
# print 'test1'
if self.isSeekable() and self.pts_eventcount != self.pts_currplaying:
# print 'test2'
# noinspection PyCallByClass
InfoBarTimeshift.SaveTimeshift(self, timeshiftfile="pts_livebuffer_%s" % self.pts_currplaying)
else:
# print 'test3'
Notifications.AddNotification(MessageBox,_("Timeshift will get saved at end of event!"), MessageBox.TYPE_INFO, timeout=5)
self.save_current_timeshift = True
config.timeshift.isRecording.value = True
elif answer[1] == "savetimeshiftEvent":
# print 'test4'
# noinspection PyCallByClass
InfoBarTimeshift.saveTimeshiftEventPopup(self)
elif answer[1].startswith("pts_livebuffer") is True:
# print 'test2'
# noinspection PyCallByClass
InfoBarTimeshift.SaveTimeshift(self, timeshiftfile=answer[1])
def setEndtime(self, entry):
if entry is not None and entry >= 0:
self.selectedEntry = entry
self.endtime=ConfigClock(default = self.recording[self.selectedEntry].end)
dlg = self.session.openWithCallback(self.TimeDateInputClosed, TimeDateInput, self.endtime)
dlg.setTitle(_("Please change recording endtime"))
def TimeDateInputClosed(self, ret):
if len(ret) > 1:
if ret[0]:
# print "stopping recording at", strftime("%F %T", localtime(ret[1]))
if self.recording[self.selectedEntry].end != ret[1]:
self.recording[self.selectedEntry].autoincrease = False
self.recording[self.selectedEntry].end = ret[1]
else:
if self.recording[self.selectedEntry].end != int(time()):
self.recording[self.selectedEntry].autoincrease = False
self.recording[self.selectedEntry].end = int(time())
self.session.nav.RecordTimer.timeChanged(self.recording[self.selectedEntry])
def changeDuration(self, entry):
if entry is not None and entry >= 0:
self.selectedEntry = entry
self.session.openWithCallback(self.inputCallback, InputBox, title=_("How many minutes do you want to record?"), text="5", maxSize=False, type=Input.NUMBER)
def inputCallback(self, value):
# print "stopping recording after", int(value), "minutes."
entry = self.recording[self.selectedEntry]
if value is not None:
if int(value) != 0:
entry.autoincrease = False
entry.end = int(time()) + 60 * int(value)
else:
if entry.end != int(time()):
entry.autoincrease = False
entry.end = int(time())
self.session.nav.RecordTimer.timeChanged(entry)
def isTimerRecordRunning(self):
identical = timers = 0
for timer in self.session.nav.RecordTimer.timer_list:
if timer.isRunning() and not timer.justplay:
timers += 1
if self.recording:
for x in self.recording:
if x.isRunning() and x == timer:
identical += 1
return timers > identical
def instantRecord(self):
pirr = preferredInstantRecordPath()
if not findSafeRecordPath(pirr) and not findSafeRecordPath(defaultMoviePath()):
if not pirr:
pirr = ""
self.session.open(MessageBox, _("Missing ") + "\n" + pirr +
"\n" + _("No HDD found or HDD not initialized!"), MessageBox.TYPE_ERROR)
return
if isStandardInfoBar(self):
common = ((_("Add recording (stop after current event)"), "event"),
(_("Add recording (indefinitely)"), "indefinitely"),
(_("Add recording (enter recording duration)"), "manualduration"),
(_("Add recording (enter recording endtime)"), "manualendtime"),)
timeshiftcommon = ((_("Timeshift save recording (stop after current event)"), "savetimeshift"),
(_("Timeshift save recording (Select event)"), "savetimeshiftEvent"),)
else:
common = ()
timeshiftcommon = ()
if self.isInstantRecordRunning():
title =_("A recording is currently running.\nWhat do you want to do?")
list = ((_("Stop recording"), "stop"),) + common + \
((_("Change recording (duration)"), "changeduration"),
(_("Change recording (endtime)"), "changeendtime"),)
if self.isTimerRecordRunning():
list += ((_("Stop timer recording"), "timer"),)
else:
title=_("Start recording?")
list = common
if self.isTimerRecordRunning():
list += ((_("Stop timer recording"), "timer"),)
if isStandardInfoBar(self) and self.timeshiftEnabled():
list = list + timeshiftcommon
if isStandardInfoBar(self):
list = list + ((_("Do not record"), "no"),)
if list:
self.session.openWithCallback(self.recordQuestionCallback, ChoiceBox,title=title,list=list)
else:
return 0
class InfoBarAudioSelection:
def __init__(self):
self["AudioSelectionAction"] = HelpableActionMap(self, "InfobarAudioSelectionActions",
{
"audioSelection": (self.audioSelection, _("Audio options...")),
"audioSelectionLong": (self.audioSelectionLong, _("Toggle Digital downmix...")),
})
def audioSelection(self):
from Screens.AudioSelection import AudioSelection
self.session.openWithCallback(self.audioSelected, AudioSelection, infobar=self)
def audioSelected(self, ret=None):
print "[infobar::audioSelected]", ret
def audioSelectionLong(self):
if SystemInfo["CanDownmixAC3"]:
if config.av.downmix_ac3.value:
message = _("Dobly Digital downmix is now") + " " + _("disabled")
print '[Audio] Dobly Digital downmix is now disabled'
config.av.downmix_ac3.setValue(False)
else:
config.av.downmix_ac3.setValue(True)
message = _("Dobly Digital downmix is now") + " " + _("enabled")
print '[Audio] Dobly Digital downmix is now enabled'
Notifications.AddPopup(text = message, type = MessageBox.TYPE_INFO, timeout = 5, id = "DDdownmixToggle")
class InfoBarSubserviceSelection:
def __init__(self):
self["SubserviceSelectionAction"] = HelpableActionMap(self, "InfobarSubserviceSelectionActions",
{
"GreenPressed": self.GreenPressed,
})
self["SubserviceQuickzapAction"] = HelpableActionMap(self, "InfobarSubserviceQuickzapActions",
{
"nextSubservice": (self.nextSubservice, _("Switch to next sub service")),
"prevSubservice": (self.prevSubservice, _("Switch to previous sub service"))
}, -1)
self["SubserviceQuickzapAction"].setEnabled(False)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedEventInfo: self.checkSubservicesAvail
})
self.onClose.append(self.__removeNotifications)
self.bsel = None
def GreenPressed(self):
if not config.vixsettings.Subservice.value:
self.openTimerList()
else:
self.subserviceSelection()
def __removeNotifications(self):
self.session.nav.event.remove(self.checkSubservicesAvail)
def checkSubservicesAvail(self):
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
if not subservices or subservices.getNumberOfSubservices() == 0:
self["SubserviceQuickzapAction"].setEnabled(False)
def nextSubservice(self):
self.changeSubservice(+1)
def prevSubservice(self):
self.changeSubservice(-1)
def changeSubservice(self, direction):
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
n = subservices and subservices.getNumberOfSubservices()
if n and n > 0:
selection = -1
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
idx = 0
while idx < n:
if subservices.getSubservice(idx).toString() == ref.toString():
selection = idx
break
idx += 1
if selection != -1:
selection += direction
if selection >= n:
selection=0
elif selection < 0:
selection=n-1
newservice = subservices.getSubservice(selection)
if newservice.valid():
del subservices
del service
self.session.nav.playService(newservice, False)
def subserviceSelection(self):
service = self.session.nav.getCurrentService()
subservices = service and service.subServices()
self.bouquets = self.servicelist.getBouquetList()
n = subservices and subservices.getNumberOfSubservices()
selection = 0
if n and n > 0:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
tlist = []
idx = 0
while idx < n:
i = subservices.getSubservice(idx)
if i.toString() == ref.toString():
selection = idx
tlist.append((i.getName(), i))
idx += 1
if self.bouquets and len(self.bouquets):
keys = ["red", "blue", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ] + [""] * n
if config.usage.multibouquet.value:
tlist = [(_("Quick zap"), "quickzap", service.subServices()), (_("Add to bouquet"), "CALLFUNC", self.addSubserviceToBouquetCallback), ("--", "")] + tlist
else:
tlist = [(_("Quick zap"), "quickzap", service.subServices()), (_("Add to favourites"), "CALLFUNC", self.addSubserviceToBouquetCallback), ("--", "")] + tlist
selection += 3
else:
tlist = [(_("Quick zap"), "quickzap", service.subServices()), ("--", "")] + tlist
keys = ["red", "", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ] + [""] * n
selection += 2
self.session.openWithCallback(self.subserviceSelected, ChoiceBox, title=_("Please select a sub service..."), list = tlist, selection = selection, keys = keys, skin_name = "SubserviceSelection")
def subserviceSelected(self, service):
del self.bouquets
if not service is None:
if isinstance(service[1], str):
if service[1] == "quickzap":
from Screens.SubservicesQuickzap import SubservicesQuickzap
self.session.open(SubservicesQuickzap, service[2])
else:
self["SubserviceQuickzapAction"].setEnabled(True)
self.session.nav.playService(service[1], False)
def addSubserviceToBouquetCallback(self, service):
if len(service) > 1 and isinstance(service[1], eServiceReference):
self.selectedSubservice = service
if self.bouquets is None:
cnt = 0
else:
cnt = len(self.bouquets)
if cnt > 1: # show bouquet list
self.bsel = self.session.openWithCallback(self.bouquetSelClosed, BouquetSelector, self.bouquets, self.addSubserviceToBouquet)
elif cnt == 1: # add to only one existing bouquet
self.addSubserviceToBouquet(self.bouquets[0][1])
self.session.open(MessageBox, _("Service has been added to the favourites."), MessageBox.TYPE_INFO)
def bouquetSelClosed(self, confirmed):
self.bsel = None
del self.selectedSubservice
if confirmed:
self.session.open(MessageBox, _("Service has been added to the selected bouquet."), MessageBox.TYPE_INFO)
def addSubserviceToBouquet(self, dest):
self.servicelist.addServiceToBouquet(dest, self.selectedSubservice[1])
if self.bsel:
self.bsel.close(True)
else:
del self.selectedSubservice
def openTimerList(self):
self.session.open(TimerEditList)
class InfoBarRedButton:
def __init__(self):
self["RedButtonActions"] = HelpableActionMap(self, "InfobarRedButtonActions",
{
"activateRedButton": (self.activateRedButton, _("Red button...")),
})
self.onHBBTVActivation = [ ]
self.onRedButtonActivation = [ ]
def activateRedButton(self):
service = self.session.nav.getCurrentService()
info = service and service.info()
if info and info.getInfoString(iServiceInformation.sHBBTVUrl) != "":
for x in self.onHBBTVActivation:
x()
elif False: # TODO: other red button services
for x in self.onRedButtonActivation:
x()
class InfoBarTimerButton:
def __init__(self):
self["TimerButtonActions"] = HelpableActionMap(self, "InfobarTimerButtonActions",
{
"timerSelection": (self.timerSelection, _("Timer selection...")),
})
def timerSelection(self):
from Screens.TimerEdit import TimerEditList
self.session.open(TimerEditList)
class InfoBarVmodeButton:
def __init__(self):
self["VmodeButtonActions"] = HelpableActionMap(self, "InfobarVmodeButtonActions",
{
"vmodeSelection": (self.vmodeSelection, _("Letterbox zoom")),
})
def vmodeSelection(self):
self.session.open(VideoMode)
class VideoMode(Screen):
def __init__(self,session):
Screen.__init__(self, session)
self["videomode"] = Label()
self["actions"] = NumberActionMap( [ "InfobarVmodeButtonActions" ],
{
"vmodeSelection": self.selectVMode
})
self.Timer = eTimer()
self.Timer.callback.append(self.quit)
self.selectVMode()
def selectVMode(self):
policy = config.av.policy_43
if self.isWideScreen():
policy = config.av.policy_169
idx = policy.choices.index(policy.value)
idx = (idx + 1) % len(policy.choices)
policy.value = policy.choices[idx]
self["videomode"].setText(policy.value)
self.Timer.start(1000, True)
def isWideScreen(self):
from Components.Converter.ServiceInfo import WIDESCREEN
service = self.session.nav.getCurrentService()
info = service and service.info()
return info.getInfo(iServiceInformation.sAspect) in WIDESCREEN
def quit(self):
self.Timer.stop()
self.close()
class InfoBarAdditionalInfo:
def __init__(self):
self["RecordingPossible"] = Boolean(fixed=harddiskmanager.HDDCount() > 0)
self["TimeshiftPossible"] = self["RecordingPossible"]
self["ExtensionsAvailable"] = Boolean(fixed=1)
# TODO: these properties should be queried from the input device keymap
self["ShowTimeshiftOnYellow"] = Boolean(fixed=0)
self["ShowAudioOnYellow"] = Boolean(fixed=0)
self["ShowRecordOnRed"] = Boolean(fixed=0)
class InfoBarNotifications:
def __init__(self):
self.onExecBegin.append(self.checkNotifications)
Notifications.notificationAdded.append(self.checkNotificationsIfExecing)
self.onClose.append(self.__removeNotification)
def __removeNotification(self):
Notifications.notificationAdded.remove(self.checkNotificationsIfExecing)
def checkNotificationsIfExecing(self):
if self.execing:
self.checkNotifications()
def checkNotifications(self):
notifications = Notifications.notifications
if notifications:
n = notifications[0]
del notifications[0]
cb = n[0]
if n[3].has_key("onSessionOpenCallback"):
n[3]["onSessionOpenCallback"]()
del n[3]["onSessionOpenCallback"]
if cb:
dlg = self.session.openWithCallback(cb, n[1], *n[2], **n[3])
elif not Notifications.current_notifications and n[4] == "ZapError":
if n[3].has_key("timeout"):
del n[3]["timeout"]
n[3]["enable_input"] = False
dlg = self.session.instantiateDialog(n[1], *n[2], **n[3])
self.hide()
dlg.show()
self.notificationDialog = dlg
eActionMap.getInstance().bindAction('', -maxint - 1, self.keypressNotification)
else:
dlg = self.session.open(n[1], *n[2], **n[3])
# remember that this notification is currently active
d = (n[4], dlg)
Notifications.current_notifications.append(d)
dlg.onClose.append(boundFunction(self.__notificationClosed, d))
def closeNotificationInstantiateDialog(self):
if hasattr(self, "notificationDialog"):
self.session.deleteDialog(self.notificationDialog)
del self.notificationDialog
eActionMap.getInstance().unbindAction('', self.keypressNotification)
def keypressNotification(self, key, flag):
if flag:
self.closeNotificationInstantiateDialog()
def __notificationClosed(self, d):
Notifications.current_notifications.remove(d)
class InfoBarServiceNotifications:
def __init__(self):
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evEnd: self.serviceHasEnded
})
def serviceHasEnded(self):
# print "service end!"
try:
self.setSeekState(self.SEEK_STATE_PLAY)
except:
pass
class InfoBarCueSheetSupport:
CUT_TYPE_IN = 0
CUT_TYPE_OUT = 1
CUT_TYPE_MARK = 2
CUT_TYPE_LAST = 3
ENABLE_RESUME_SUPPORT = False
def __init__(self, actionmap = "InfobarCueSheetActions"):
self["CueSheetActions"] = HelpableActionMap(self, actionmap,
{
"jumpPreviousMark": (self.jumpPreviousMark, _("Jump to previous marked position")),
"jumpNextMark": (self.jumpNextMark, _("Jump to next marked position")),
"toggleMark": (self.toggleMark, _("Toggle a cut mark at the current position"))
}, prio=1)
self.cut_list = [ ]
self.is_closing = False
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.__serviceStarted,
iPlayableService.evCuesheetChanged: self.downloadCuesheet,
})
def __serviceStarted(self):
if self.is_closing:
return
# print "new service started! trying to download cuts!"
self.downloadCuesheet()
self.resume_point = None
if self.ENABLE_RESUME_SUPPORT:
for (pts, what) in self.cut_list:
if what == self.CUT_TYPE_LAST:
last = pts
break
else:
last = getResumePoint(self.session)
if last is None:
return
# only resume if at least 10 seconds ahead, or <10 seconds before the end.
seekable = self.__getSeekable()
if seekable is None:
return # Should not happen?
length = seekable.getLength() or (None,0)
# print "seekable.getLength() returns:", length
# Hmm, this implies we don't resume if the length is unknown...
if (last > 900000) and (not length[1] or (last < length[1] - 900000)):
self.resume_point = last
l = last / 90000
if config.usage.on_movie_start.value == "ask" or not length[1]:
Notifications.AddNotificationWithCallback(self.playLastCB, MessageBox, _("Do you want to resume this playback?") + "\n" + (_("Resume position at %s") % ("%d:%02d:%02d" % (l/3600, l%3600/60, l%60))), timeout=10)
elif config.usage.on_movie_start.value == "resume":
Notifications.AddNotificationWithCallback(self.playLastCB, MessageBox, _("Resuming playback"), timeout=2, type=MessageBox.TYPE_INFO)
def playLastCB(self, answer):
if answer == True and self.resume_point:
self.doSeek(self.resume_point)
self.hideAfterResume()
def hideAfterResume(self):
if isinstance(self, InfoBarShowHide):
self.hide()
def __getSeekable(self):
service = self.session.nav.getCurrentService()
if service is None:
return None
return service.seek()
def cueGetCurrentPosition(self):
seek = self.__getSeekable()
if seek is None:
return None
r = seek.getPlayPosition()
if r[0]:
return None
return long(r[1])
def cueGetEndCutPosition(self):
ret = False
isin = True
for cp in self.cut_list:
if cp[1] == self.CUT_TYPE_OUT:
if isin:
isin = False
ret = cp[0]
elif cp[1] == self.CUT_TYPE_IN:
isin = True
return ret
def jumpPreviousNextMark(self, cmp, start=False):
current_pos = self.cueGetCurrentPosition()
if current_pos is None:
return False
mark = self.getNearestCutPoint(current_pos, cmp=cmp, start=start)
if mark is not None:
pts = mark[0]
else:
return False
self.doSeek(pts)
return True
def jumpPreviousMark(self):
# we add 5 seconds, so if the play position is <5s after
# the mark, the mark before will be used
self.jumpPreviousNextMark(lambda x: -x-5*90000, start=True)
def jumpNextMark(self):
if not self.jumpPreviousNextMark(lambda x: x-90000):
self.doSeek(-1)
def getNearestCutPoint(self, pts, cmp=abs, start=False):
# can be optimized
beforecut = True
nearest = None
bestdiff = -1
instate = True
if start:
bestdiff = cmp(0 - pts)
if bestdiff >= 0:
nearest = [0, False]
for cp in self.cut_list:
if beforecut and cp[1] in (self.CUT_TYPE_IN, self.CUT_TYPE_OUT):
beforecut = False
if cp[1] == self.CUT_TYPE_IN: # Start is here, disregard previous marks
diff = cmp(cp[0] - pts)
if start and diff >= 0:
nearest = cp
bestdiff = diff
else:
nearest = None
bestdiff = -1
if cp[1] == self.CUT_TYPE_IN:
instate = True
elif cp[1] == self.CUT_TYPE_OUT:
instate = False
elif cp[1] in (self.CUT_TYPE_MARK, self.CUT_TYPE_LAST):
diff = cmp(cp[0] - pts)
if instate and diff >= 0 and (nearest is None or bestdiff > diff):
nearest = cp
bestdiff = diff
return nearest
def toggleMark(self, onlyremove=False, onlyadd=False, tolerance=5*90000, onlyreturn=False):
current_pos = self.cueGetCurrentPosition()
if current_pos is None:
# print "not seekable"
return
nearest_cutpoint = self.getNearestCutPoint(current_pos)
if nearest_cutpoint is not None and abs(nearest_cutpoint[0] - current_pos) < tolerance:
if onlyreturn:
return nearest_cutpoint
if not onlyadd:
self.removeMark(nearest_cutpoint)
elif not onlyremove and not onlyreturn:
self.addMark((current_pos, self.CUT_TYPE_MARK))
if onlyreturn:
return None
def addMark(self, point):
insort(self.cut_list, point)
self.uploadCuesheet()
self.showAfterCuesheetOperation()
def removeMark(self, point):
self.cut_list.remove(point)
self.uploadCuesheet()
self.showAfterCuesheetOperation()
def showAfterCuesheetOperation(self):
if isinstance(self, InfoBarShowHide):
self.doShow()
def __getCuesheet(self):
service = self.session.nav.getCurrentService()
if service is None:
return None
return service.cueSheet()
def uploadCuesheet(self):
cue = self.__getCuesheet()
if cue is None:
# print "upload failed, no cuesheet interface"
return
cue.setCutList(self.cut_list)
def downloadCuesheet(self):
cue = self.__getCuesheet()
if cue is None:
# print "download failed, no cuesheet interface"
self.cut_list = [ ]
else:
self.cut_list = cue.getCutList()
class InfoBarSummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="global.CurrentTime" render="Label" position="62,46" size="82,18" font="Regular;16" >
<convert type="ClockToText">WithSeconds</convert>
</widget>
<widget source="session.RecordState" render="FixedLabel" text=" " position="62,46" size="82,18" zPosition="1" >
<convert type="ConfigEntryTest">config.usage.blinking_display_clock_during_recording,True,CheckSourceBoolean</convert>
<convert type="ConditionalShowHide">Blink</convert>
</widget>
<widget source="session.CurrentService" render="Label" position="6,4" size="120,42" font="Regular;18" >
<convert type="ServiceName">Name</convert>
</widget>
<widget source="session.Event_Now" render="Progress" position="6,46" size="46,18" borderWidth="1" >
<convert type="EventTime">Progress</convert>
</widget>
</screen>"""
# for picon: (path="piconlcd" will use LCD picons)
# <widget source="session.CurrentService" render="Picon" position="6,0" size="120,64" path="piconlcd" >
# <convert type="ServiceName">Reference</convert>
# </widget>
class InfoBarSummarySupport:
def __init__(self):
pass
def createSummary(self):
return InfoBarSummary
class InfoBarMoviePlayerSummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="global.CurrentTime" render="Label" position="62,46" size="64,18" font="Regular;16" halign="right" >
<convert type="ClockToText">WithSeconds</convert>
</widget>
<widget source="session.RecordState" render="FixedLabel" text=" " position="62,46" size="64,18" zPosition="1" >
<convert type="ConfigEntryTest">config.usage.blinking_display_clock_during_recording,True,CheckSourceBoolean</convert>
<convert type="ConditionalShowHide">Blink</convert>
</widget>
<widget source="session.CurrentService" render="Label" position="6,4" size="120,42" font="Regular;18" >
<convert type="ServiceName">Name</convert>
</widget>
<widget source="session.CurrentService" render="Progress" position="6,46" size="56,18" borderWidth="1" >
<convert type="ServicePosition">Position</convert>
</widget>
</screen>"""
def __init__(self, session, parent):
Screen.__init__(self, session, parent = parent)
self["state_summary"] = StaticText("")
self["speed_summary"] = StaticText("")
self["statusicon_summary"] = MultiPixmap()
self.onShow.append(self.addWatcher)
self.onHide.append(self.removeWatcher)
def addWatcher(self):
self.parent.onChangedEntry.append(self.selectionChanged)
def removeWatcher(self):
self.parent.onChangedEntry.remove(self.selectionChanged)
def selectionChanged(self, state_summary, speed_summary, statusicon_summary):
self["state_summary"].setText(state_summary)
self["speed_summary"].setText(speed_summary)
self["statusicon_summary"].setPixmapNum(int(statusicon_summary))
class InfoBarMoviePlayerSummarySupport:
def __init__(self):
pass
def createSummary(self):
return InfoBarMoviePlayerSummary
class InfoBarTeletextPlugin:
def __init__(self):
self.teletext_plugin = None
for p in plugins.getPlugins(PluginDescriptor.WHERE_TELETEXT):
self.teletext_plugin = p
if self.teletext_plugin is not None:
self["TeletextActions"] = HelpableActionMap(self, "InfobarTeletextActions",
{
"startTeletext": (self.startTeletext, _("View teletext..."))
})
else:
print "no teletext plugin found!"
def startTeletext(self):
self.teletext_plugin(session=self.session, service=self.session.nav.getCurrentService())
class InfoBarSubtitleSupport(object):
def __init__(self):
object.__init__(self)
self["SubtitleSelectionAction"] = HelpableActionMap(self, "InfobarSubtitleSelectionActions",
{
"subtitleSelection": (self.subtitleSelection, _("Subtitle selection...")),
})
self.selected_subtitle = None
if isStandardInfoBar(self):
self.subtitle_window = self.session.instantiateDialog(SubtitleDisplay)
else:
from Screens.InfoBar import InfoBar
self.subtitle_window = InfoBar.instance.subtitle_window
self.subtitle_window.hide()
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.__serviceChanged,
iPlayableService.evEnd: self.__serviceChanged,
iPlayableService.evUpdatedInfo: self.__updatedInfo
})
def getCurrentServiceSubtitle(self):
service = self.session.nav.getCurrentService()
return service and service.subtitle()
def subtitleSelection(self):
service = self.session.nav.getCurrentService()
subtitle = service and service.subtitle()
subtitlelist = subtitle and subtitle.getSubtitleList()
if self.selected_subtitle or subtitlelist and len(subtitlelist)>0:
from Screens.AudioSelection import SubtitleSelection
self.session.open(SubtitleSelection, self)
else:
return 0
def __serviceChanged(self):
if self.selected_subtitle:
self.selected_subtitle = None
self.subtitle_window.hide()
def __updatedInfo(self):
if not self.selected_subtitle:
subtitle = self.getCurrentServiceSubtitle()
cachedsubtitle = subtitle.getCachedSubtitle()
if cachedsubtitle:
self.enableSubtitle(cachedsubtitle)
def enableSubtitle(self, selectedSubtitle):
subtitle = self.getCurrentServiceSubtitle()
self.selected_subtitle = selectedSubtitle
if subtitle and self.selected_subtitle:
subtitle.enableSubtitles(self.subtitle_window.instance, self.selected_subtitle)
self.subtitle_window.show()
else:
if subtitle:
subtitle.disableSubtitles(self.subtitle_window.instance)
self.subtitle_window.hide()
def restartSubtitle(self):
if self.selected_subtitle:
self.enableSubtitle(self.selected_subtitle)
class InfoBarServiceErrorPopupSupport:
def __init__(self):
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evTuneFailed: self.__tuneFailed,
iPlayableService.evTunedIn: self.__serviceStarted,
iPlayableService.evStart: self.__serviceStarted
})
self.__serviceStarted()
def __serviceStarted(self):
self.closeNotificationInstantiateDialog()
self.last_error = None
Notifications.RemovePopup(id = "ZapError")
def __tuneFailed(self):
if not config.usage.hide_zap_errors.value:
service = self.session.nav.getCurrentService()
info = service and service.info()
error = info and info.getInfo(iServiceInformation.sDVBState)
if error == self.last_error:
error = None
else:
self.last_error = error
error = {
eDVBServicePMTHandler.eventNoResources: _("No free tuner!"),
eDVBServicePMTHandler.eventTuneFailed: _("Tune failed!"),
eDVBServicePMTHandler.eventNoPAT: _("No data on transponder!\n(Timeout reading PAT)"),
eDVBServicePMTHandler.eventNoPATEntry: _("Service not found!\n(SID not found in PAT)"),
eDVBServicePMTHandler.eventNoPMT: _("Service invalid!\n(Timeout reading PMT)"),
eDVBServicePMTHandler.eventNewProgramInfo: None,
eDVBServicePMTHandler.eventTuned: None,
eDVBServicePMTHandler.eventSOF: None,
eDVBServicePMTHandler.eventEOF: None,
eDVBServicePMTHandler.eventMisconfiguration: _("Service unavailable!\nCheck tuner configuration!"),
}.get(error) #this returns None when the key not exist in the dict
if error:
self.closeNotificationInstantiateDialog()
if hasattr(self, "dishDialog") and not self.dishDialog.dishState():
Notifications.AddPopup(text = error, type = MessageBox.TYPE_ERROR, timeout = 5, id = "ZapError")
class InfoBarZoom:
def __init__(self):
self.zoomrate=0
self.zoomin=1
self["ZoomActions"] = HelpableActionMap(self, "InfobarZoomActions",
{
"ZoomInOut":(self.ZoomInOut, _("Zoom In/Out TV...")),
"ZoomOff":(self.ZoomOff, _("Zoom Off...")),
}, prio=2)
def ZoomInOut(self):
zoomval=0
if self.zoomrate > 3:
self.zoomin = 0
elif self.zoomrate < -9:
self.zoomin = 1
if self.zoomin == 1:
self.zoomrate += 1
else:
self.zoomrate -= 1
if self.zoomrate < 0:
zoomval=abs(self.zoomrate)+10
else:
zoomval=self.zoomrate
# print "zoomRate:", self.zoomrate
# print "zoomval:", zoomval
file = open("/proc/stb/vmpeg/0/zoomrate", "w")
file.write('%d' % int(zoomval))
file.close()
def ZoomOff(self):
self.zoomrate = 0
self.zoomin = 1
f = open("/proc/stb/vmpeg/0/zoomrate", "w")
f.write(str(0))
f.close()
class InfoBarHdmi:
def __init__(self):
self.hdmi_enabled_full = False
self.hdmi_enabled_pip = False
if getMachineBuild() in ('inihdp', 'hd2400'):
if not self.hdmi_enabled_full:
self.addExtension((self.getHDMIInFullScreen, self.HDMIInFull, lambda: True), "blue")
if not self.hdmi_enabled_pip:
self.addExtension((self.getHDMIInPiPScreen, self.HDMIInPiP, lambda: True), "green")
self["HDMIActions"] = HelpableActionMap(self, "InfobarHDMIActions",
{
"HDMIin":(self.HDMIIn, _("Switch to HDMI in mode")),
"HDMIinLong":(self.HDMIInLong, _("Switch to HDMI in mode")),
}, prio=2)
def HDMIInLong(self):
if self.LongButtonPressed:
if not hasattr(self.session, 'pip') and not self.session.pipshown:
self.session.pip = self.session.instantiateDialog(PictureInPicture)
self.session.pip.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
self.session.pip.show()
self.session.pipshown = True
else:
curref = self.session.pip.getCurrentService()
if curref and curref.type != 8192:
self.session.pip.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
else:
self.session.pipshown = False
del self.session.pip
def HDMIIn(self):
if not self.LongButtonPressed:
slist = self.servicelist
curref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if curref and curref.type != 8192:
self.session.nav.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
else:
self.session.nav.playService(slist.servicelist.getCurrent())
def getHDMIInFullScreen(self):
if not self.hdmi_enabled_full:
return _("Turn on HDMI-IN Full screen mode")
else:
return _("Turn off HDMI-IN Full screen mode")
def getHDMIInPiPScreen(self):
if not self.hdmi_enabled_pip:
return _("Turn on HDMI-IN PiP mode")
else:
return _("Turn off HDMI-IN PiP mode")
def HDMIInPiP(self):
if not hasattr(self.session, 'pip') and not self.session.pipshown:
self.hdmi_enabled_pip = True
self.session.pip = self.session.instantiateDialog(PictureInPicture)
self.session.pip.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
self.session.pip.show()
self.session.pipshown = True
self.session.pip.servicePath = self.servicelist.getCurrentServicePath()
else:
curref = self.session.pip.getCurrentService()
if curref and curref.type != 8192:
self.hdmi_enabled_pip = True
self.session.pip.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
else:
self.hdmi_enabled_pip = False
self.session.pipshown = False
del self.session.pip
def HDMIInFull(self):
slist = self.servicelist
curref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if curref and curref.type != 8192:
self.hdmi_enabled_full = True
self.session.nav.playService(eServiceReference('8192:0:1:0:0:0:0:0:0:0:'))
else:
self.hdmi_enabled_full = False
self.session.nav.playService(slist.servicelist.getCurrent())
| gpl-2.0 | -1,694,368,705,194,031,000 | 32.885854 | 238 | 0.715241 | false |
lyager/rpcz | python/compiler.py | 5 | 1758 | #!/usr/bin/env python
import os
import sys
from distutils import spawn
class CompilerException(Exception):
pass
def generate_proto(source, output_dir,
with_plugin='python', suffix='_pb2.py', plugin_binary=None):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
protoc = spawn.find_executable("protoc")
if protoc is None:
raise CompilerException(
"protoc not found. Make sure that it is in the path.")
output = os.path.join(
output_dir,
os.path.basename(source.replace(".proto", suffix)))
if not os.path.exists(source):
raise CompilerException("Can't find required file: " + source)
if (os.path.exists(output) and
os.path.getmtime(source) <= os.path.getmtime(output)):
print "Generated proto %s is up-to-date." % output
return
print "Generating %s" % output
protoc_command = protoc + ' -I "%s" --%s_out="%s" "%s"' % (
os.path.dirname(source), with_plugin, output_dir, source)
if plugin_binary:
if os.path.exists(plugin_binary):
protoc_command += ' --plugin=protoc-gen-%s=%s' % (with_plugin,
plugin_binary)
else:
print ("Plugin not found at '%s'. We are going to run protoc "
"anyway, and perhaps it will be able to find it in its "
"search path." % plugin_binary)
if os.system(protoc_command) != 0:
raise CompilerException(
"Error occurred while running protoc.")
else:
print "Generated source successfully."
| apache-2.0 | 3,245,843,021,717,457,400 | 34.16 | 79 | 0.590444 | false |
eerwitt/tensorflow | tensorflow/python/kernel_tests/losses_test.py | 21 | 54130 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.ops.losses import util
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class AbsoluteDifferenceLossTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
losses.absolute_difference(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = losses.absolute_difference(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = losses.absolute_difference(self._labels, self._predictions)
with self.test_session():
self.assertAlmostEqual(5.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = losses.absolute_difference(self._labels, self._predictions,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant((1.2, 0.0), shape=(2, 1))
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2, 1])
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(16.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(6.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = losses.absolute_difference(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class SoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
with self.test_session():
with self.assertRaises(ValueError):
losses.softmax_cross_entropy(labels, logits, weights=None)
def testAllCorrect(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
loss = losses.softmax_cross_entropy(labels, logits)
self.assertEquals('softmax_cross_entropy_loss/value', loss.op.name)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrong(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
with self.test_session():
loss = losses.softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = 2.3
with self.test_session():
loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = 2.3
with self.test_session():
loss = losses.softmax_cross_entropy(labels, logits,
constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = constant_op.constant((1.2, 3.4, 5.6))
with self.test_session():
loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = losses.softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
with self.assertRaises(ValueError):
losses.softmax_cross_entropy(labels, logits, weights=weights).eval()
def testSoftmaxLabelSmoothing(self):
with self.test_session():
# Softmax Cross Entropy Loss is:
# -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100] the log partion function becomes
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# so our cross entropy loss is:
# -(1 - L + L/n) * 0 + 400 * L/n = 400 L/n
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
loss = losses.softmax_cross_entropy(
labels, logits, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]])
with self.test_session():
with self.assertRaises(ValueError):
losses.sparse_softmax_cross_entropy(labels, logits, weights=None)
def testAllCorrectInt32Labels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int32)
loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectInt64Labels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectNonColumnLabels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([0, 1, 2])
loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrongInt32Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int32)
with self.test_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongInt64Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int64)
with self.test_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongNonColumnLabels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
with self.test_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits,
constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWith1DTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = losses.sparse_softmax_cross_entropy(
labels, logits, constant_op.constant((weights,)))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithPlaceholderForWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = array_ops.placeholder(dtypes.float32)
with self.test_session() as sess:
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
loss_val = sess.run(loss,
feed_dict={weights: ((1.2,), (3.4,), (5.6,))})
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss_val, 3)
def testNonZeroLossWithPlaceholderForLogitsLabelsAndWeights(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 3))
labels = array_ops.placeholder(dtypes.int32, shape=(None, 1))
weights = array_ops.placeholder(dtypes.float32)
with self.test_session() as sess:
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
loss_val = sess.run(loss,
feed_dict={
logits: [[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]],
labels: [[2], [0], [1]],
weights: ((1.2,), (3.4,), (5.6,)),
})
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss_val, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=(3, 1))
with self.test_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testNonZeroLossWithColumnWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([[1.2], [3.4], [5.6]])
with self.test_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([0, 0, 0], shape=(3, 1))
with self.test_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0], [0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 0, 0], shape=(3, 1))
with self.test_session():
loss = losses.sparse_softmax_cross_entropy(labels, logits, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
with self.assertRaises(ValueError):
losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
def testInconsistentWeightSizeRaisesException(self):
"""The weight tensor has incorrect number of elements."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
def testInconsistentLabelSizeRaisesException(self):
"""The label tensor has incorrect number of elements."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([1.2, 3.4, 5.6])
with self.assertRaises(ValueError):
losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
def testInconsistentWeightShapeRaisesException(self):
"""The weight tensor has incorrect shape."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([[1.2, 3.4], [5.6, 7.8]])
with self.assertRaises(ValueError):
losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
def testInconsistentLabelShapeRaisesException(self):
"""The label tensor has incorrect shape."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 1], [2, 3]])
weights = constant_op.constant(1.2)
with self.assertRaisesRegexp(ValueError, 'dimension'):
losses.sparse_softmax_cross_entropy(
labels, logits, weights=weights).eval()
class SigmoidCrossEntropyLossTest(test.TestCase):
def testAllCorrectSigmoid(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
loss = losses.sigmoid_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights1(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 1))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 1))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = losses.sigmoid_cross_entropy(labels, logits, weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 1)),
labels: np.ones((32, 1)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights2(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 2))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 2))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = losses.sigmoid_cross_entropy(labels, logits, weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 2)),
labels: np.ones((32, 2)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testAllWrongSigmoid(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
loss = losses.sigmoid_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
def testAllWrongSigmoidWithMeasurementSpecificWeights(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
loss = losses.sigmoid_cross_entropy(labels, logits, weights)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(1700.0 / 7.0, loss.eval(), 3)
def testMultiCorrectSigmoid(self):
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0],
[-100.0, 100.0, 100.0]])
labels = constant_op.constant([[1, 0, 1], [1, 1, 0], [0, 1, 1]])
loss = losses.sigmoid_cross_entropy(labels, logits)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
with self.test_session():
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSigmoidLabelSmoothingCorrect(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 1]])
# Sigmoid cross entropy loss is:
# max(x,0) - x*z + log(1 + exp(-abs(x)))
# The new labels are:
# z' = z * (1 - L) + 0.5 L
# 1 -> 1 - 0.5 L
# 0 -> 0.5 L
# here we expect:
# 1/3 * (100 - 100 * (1 - 0.5 L) + 0
# + 0 + 100 * (0.5 L) + 0
# + 0 + 100 * (1 - 0.5 L) + 0)
# = 1/3 * (100 + 50 L)
label_smoothing = 0.1
loss = losses.sigmoid_cross_entropy(
labels, logits, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self):
with self.test_session():
label_smoothing = 0.1
sigmoid_logits = constant_op.constant([[100.0, -100.0, -100.0]])
sigmoid_labels = constant_op.constant([[1, 0, 1]])
sigmoid_loss = losses.sigmoid_cross_entropy(
sigmoid_labels, sigmoid_logits, label_smoothing=label_smoothing)
softmax_logits = constant_op.constant(
[[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
softmax_labels = constant_op.constant([[0, 1], [1, 0], [0, 1]])
softmax_loss = losses.softmax_cross_entropy(
softmax_labels, softmax_logits, label_smoothing=label_smoothing)
self.assertAlmostEqual(sigmoid_loss.eval(), softmax_loss.eval(), 3)
class LogLossTest(test.TestCase):
def setUp(self):
predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
labels = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape((2, 3))
self._np_predictions = predictions
self._np_labels = labels
epsilon = 1e-7
self._expected_losses = np.multiply(
labels, np.log(predictions + epsilon)) + np.multiply(
1 - labels, np.log(1 - predictions + epsilon))
self._predictions = constant_op.constant(predictions)
self._labels = constant_op.constant(labels)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
losses.log_loss(self._labels, self._labels, weights=None)
def testAllCorrectNoLossWeight(self):
loss = losses.log_loss(self._labels, self._labels)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testAllCorrectNoLossWeightWithPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_labels.shape)
loss = losses.log_loss(self._labels, tf_predictions)
with self.test_session():
self.assertAlmostEqual(
0.0, loss.eval(feed_dict={tf_predictions: self._np_labels}), 3)
def testNonZeroLoss(self):
loss = losses.log_loss(self._labels, self._predictions)
with self.test_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = losses.log_loss(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = losses.log_loss(self._labels, self._predictions,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_predictions.shape)
weights = 2.3
loss = losses.log_loss(self._labels, tf_predictions,
constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[None, None])
weights = 2.3
loss = losses.log_loss(self._labels, tf_predictions,
constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant((1.2, 3.4), shape=(2, 1))
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
loss = losses.log_loss(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 6.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant((1.2, 0), shape=(2, 1))
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = losses.log_loss(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2, 1])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = losses.log_loss(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testWeightsWithSameNumDimsButWrongShapeThrowsException(self):
weights = constant_op.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
with self.test_session():
with self.assertRaises(ValueError):
losses.log_loss(self._labels, self._predictions, weights)
def testNonZeroLossWithMeasurementSpecificWeights(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = losses.log_loss(
self._labels,
self._predictions,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss.eval(), 3)
def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
loss = losses.log_loss(
self._labels,
tf_predictions,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss, 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = losses.log_loss(
self._labels,
self._predictions,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses), loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
tf_weights = constant_op.constant(weights, shape=(2, 3))
loss = losses.log_loss(self._labels, tf_predictions, tf_weights)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses), loss, 3)
def testLossWithSampleSpecificWeightsAllZero(self):
tf_weights = array_ops.zeros(shape=(2, 3))
loss = losses.log_loss(self._labels, self._predictions, tf_weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class HingeLossTest(test.TestCase):
def testIncompatibleShapes(self):
with self.test_session():
logits = constant_op.constant([[-1.0], [2.1]])
labels = constant_op.constant([0.0, 1.0])
with self.assertRaises(ValueError):
_ = losses.hinge_loss(labels, logits).eval()
def testAllOutsideMargin(self):
with self.test_session():
logits = constant_op.constant([1.2, -1.4, -1.0, 2.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
loss = losses.hinge_loss(labels, logits)
self.assertAllClose(loss.eval(), 0.0, atol=1e-3)
def testSomeInsideMargin(self):
with self.test_session():
logits = constant_op.constant([[-0.7], [-1.4], [1.4], [0.6]])
labels = constant_op.constant([[0.0], [0.0], [1.0], [1.0]])
loss = losses.hinge_loss(labels, logits)
# Examples 1 and 4 are on the correct side of the hyperplane but within
# the margin so they incur some (small) loss.
self.assertAllClose(loss.eval(), 0.175, atol=1e-3)
def testSomeMisclassified(self):
with self.test_session():
logits = constant_op.constant([[[1.2], [0.4], [-1.0], [-1.1]]])
labels = constant_op.constant([[[1.0], [0.0], [0.0], [1.0]]])
loss = losses.hinge_loss(labels, logits)
# Examples 2 and 4 are on the wrong side of the hyperplane so they incur
# some (fairly large) loss.
self.assertAllClose(loss.eval(), 0.875, atol=1e-3)
class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
losses.mean_squared_error(
self._predictions, self._predictions, weights=None)
def testScalar(self):
with self.test_session():
self.assertEqual(
0.0,
losses.mean_squared_error(predictions=constant_op.constant(0),
labels=constant_op.constant(0)).eval())
def testAllCorrectNoLossWeight(self):
loss = losses.mean_squared_error(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = losses.mean_squared_error(self._labels, self._predictions)
with self.test_session():
self.assertAlmostEqual(49.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = losses.mean_squared_error(self._labels, self._predictions,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2, 1])
loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(587 / 5.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(18.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = losses.mean_squared_error(self._labels, self._predictions, weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class MeanPairwiseSquaredErrorTest(test.TestCase):
def setUp(self):
self._predictions = np.array([[4, 8, 12], [8, 1, 3]])
self._labels = np.array([[1, 9, 2], [-5, -5, 7]])
batch_size, dims = self._labels.shape
# Compute the expected loss 'manually'.
total = np.zeros((batch_size,))
for b in range(batch_size):
for i in range(dims):
for j in range(dims):
x = self._predictions[b, i].item() - self._predictions[b, j].item()
y = self._labels[b, i].item() - self._labels[b, j].item()
diff = (x - y)
total[b] += (diff * diff)
self._expected_losses = np.divide(total, 9.0)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
losses.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
weights=None)
def _test_valid_weights(
self, labels, predictions, expected_loss, weights=1.0):
with self.test_session():
static_inputs_op = losses.mean_pairwise_squared_error(
predictions=predictions, labels=labels, weights=weights)
self.assertAlmostEqual(expected_loss, static_inputs_op.eval(), places=3)
predictions_placeholder = array_ops.placeholder(
dtypes.float32, shape=np.asarray(predictions.shape))
labels_placeholder = array_ops.placeholder(
dtypes.int32, shape=np.asarray(labels.shape))
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=np.asarray(weights).shape)
dynamic_inputs_op = losses.mean_pairwise_squared_error(
predictions=predictions_placeholder,
labels=labels_placeholder,
weights=weights_placeholder)
feed_dict = {
predictions_placeholder: predictions,
labels_placeholder: labels,
weights_placeholder: weights,
}
self.assertAlmostEqual(
expected_loss, dynamic_inputs_op.eval(feed_dict=feed_dict), places=3)
def testAllCorrectNoLossWeight(self):
self._test_valid_weights(
self._labels, self._labels, expected_loss=0.0)
def testNonZeroLoss(self):
self._test_valid_weights(
self._labels, self._predictions,
expected_loss=np.sum(self._expected_losses))
def testGradientWithZeroWeight(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
inputs = array_ops.ones((2, 3))
weights = variable_scope.get_variable(
'weights',
shape=[3, 4],
initializer=init_ops.truncated_normal_initializer())
predictions = math_ops.matmul(inputs, weights)
optimizer = momentum_lib.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
loss = losses.mean_pairwise_squared_error(predictions, predictions, 0)
gradients_to_variables = optimizer.compute_gradients(loss)
init_op = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
for grad, _ in gradients_to_variables:
np_grad = sess.run(grad)
self.assertFalse(np.isnan(np_grad).any())
def testNonZeroLossWithPythonScalarWeight(self):
weight = 2.3
self._test_valid_weights(
self._labels, self._predictions,
expected_loss=weight * np.sum(self._expected_losses),
weights=weight)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = losses.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarZeroWeight(self):
self._test_valid_weights(
self._labels, self._predictions, expected_loss=0.0, weights=0.0)
def test3d(self):
labels = np.array([
[[1, 9, 2], [12, 11, 10], [9, 8, 7]],
[[-5, -5, 7], [6, 5, 4], [3, 2, 1]],
])
predictions = np.array([
[[4, 8, 12], [1, 2, 3], [4, 5, 6]],
[[8, 1, 3], [7, 8, 9], [10, 11, 12]],
])
self._test_valid_weights(
labels, predictions, expected_loss=122.22222)
def test3dWeightedScalar(self):
labels = np.array([
[[1, 9, 2], [12, 11, 10], [9, 8, 7]],
[[-5, -5, 7], [6, 5, 4], [3, 2, 1]],
])
predictions = np.array([
[[4, 8, 12], [1, 2, 3], [4, 5, 6]],
[[8, 1, 3], [7, 8, 9], [10, 11, 12]],
])
weight = 3.0
self._test_valid_weights(
labels, predictions, expected_loss=weight * 122.22222,
weights=weight)
def _test_invalid_weights(
self, labels, predictions, weights=1.0):
expected_error_msg = 'weights can not be broadcast to values'
# Static check.
with self.assertRaisesRegexp(ValueError, expected_error_msg):
losses.mean_pairwise_squared_error(
predictions=predictions, labels=labels, weights=weights)
# Dynamic check.
predictions_placeholder = array_ops.placeholder(dtypes.float32)
labels_placeholder = array_ops.placeholder(dtypes.int32)
weights_placeholder = array_ops.placeholder(dtypes.float32)
dynamic_inputs_op = losses.mean_pairwise_squared_error(
predictions=predictions_placeholder,
labels=labels_placeholder,
weights=weights_placeholder)
with self.test_session():
with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
dynamic_inputs_op.eval(feed_dict={
predictions_placeholder: predictions,
labels_placeholder: labels,
weights_placeholder: weights,
})
def testInvalid3dWeighted2x0(self):
labels = np.array([
[[1, 9, 2], [12, 11, 10], [9, 8, 7]],
[[-5, -5, 7], [6, 5, 4], [3, 2, 1]],
])
predictions = np.array([
[[4, 8, 12], [1, 2, 3], [4, 5, 6]],
[[8, 1, 3], [7, 8, 9], [10, 11, 12]],
])
self._test_invalid_weights(
labels, predictions, weights=np.asarray((1.2, 3.4)))
def test3dWeighted2x3x3(self):
labels = np.array([
[[1, 9, 2], [12, 11, 10], [9, 8, 7]],
[[-5, -5, 7], [6, 5, 4], [3, 2, 1]],
])
predictions = np.array([
[[4, 8, 12], [1, 2, 3], [4, 5, 6]],
[[8, 1, 3], [7, 8, 9], [10, 11, 12]],
])
self._test_valid_weights(
# TODO(ptucker): This doesn't look right.
labels, predictions, expected_loss=9 * 122.22222,
weights=np.ones((2, 3, 3)))
def testLossWithAllZeroBatchSpecificWeights(self):
self._test_valid_weights(
self._labels, self._predictions, expected_loss=0.0,
weights=np.zeros((2, 1)))
class CosineDistanceLossTest(test.TestCase):
def setUp(self):
self._predictions = np.asarray([
[1, 0, 0], # Batch 1
[0, 0, -1],
[1, 0, 0], # Batch 2
[1, 0, 0],
[0, 0, -1], # Batch 3
[1, 0, 0]
]).reshape((3, 2, 3))
self._labels = np.asarray([[1, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0],
[0, 0, 1], [0, 1, 0]]).reshape((3, 2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
losses.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2,
weights=None)
def testAllCorrectNoWeights(self):
loss = losses.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 5)
def testPartiallyCorrectWithIntegerValues(self):
loss = losses.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(1, loss.eval(), 5)
def testPartiallyCorrectFloatingPointValues(self):
predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
labels = np.matrix(('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
tf_preds = constant_op.constant(
predictions, shape=(3, 1, 3), dtype=dtypes.float32)
tf_labels = constant_op.constant(
labels, shape=(3, 1, 3), dtype=dtypes.float32)
loss = losses.cosine_distance(tf_labels, tf_preds, dim=2)
with self.test_session():
self.assertAlmostEqual(1.0, loss.eval(), 5)
def testSampleSpecificWeights(self):
loss = losses.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=np.asarray((1, 0, 0)).reshape((3, 1, 1)))
with self.test_session():
self.assertEqual(1.0, loss.eval())
def testMeasurementSpecificWeights(self):
loss = losses.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2, 1)))
with self.test_session():
self.assertEqual(3.0 / 4.0, loss.eval())
def testMeasurementSpecificWeightsWithPlaceholderWithShape(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._labels.shape)
loss = losses.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2, 1)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._predictions})
self.assertEqual(3.0 / 4.0, loss)
def testZeroLossWhenAllSampleSpecificWeightsAreZero(self):
loss = losses.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3, 1, 1)))
with self.test_session():
self.assertEqual(0, loss.eval())
def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self):
loss = losses.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3, 2, 1)))
with self.test_session():
self.assertEqual(0, loss.eval())
class AddLossTest(test.TestCase):
def testNoCollectLossesBatch2(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]] * 2)
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]] * 2)
self.assertFalse(util.get_losses())
losses.absolute_difference(logits, labels, loss_collection=None)
losses.log_loss(logits, labels, loss_collection=None)
losses.mean_squared_error(logits, labels, loss_collection=None)
losses.sigmoid_cross_entropy(logits, labels, loss_collection=None)
losses.softmax_cross_entropy(logits, labels, loss_collection=None)
self.assertFalse(util.get_losses())
class ComputeWeightedLossTest(test.TestCase):
def setUp(self):
self._shape = (3, 2, 4)
raw_losses = np.zeros(self._shape)
next_loss = 0.0
for i in range(self._shape[0]):
for j in range(self._shape[1]):
for k in range(self._shape[2]):
raw_losses[i][j][k] = next_loss
next_loss += 1.0
raw_losses.setflags(write=False)
self._raw_losses = raw_losses
self._unweighted_loss = np.mean(self._raw_losses)
def testUnweighted(self):
with ops.Graph().as_default():
self.assertEqual(0, len(util.get_losses()))
raw_losses = self._raw_losses
unweighted_losses = (
losses.compute_weighted_loss(raw_losses),
losses.compute_weighted_loss(raw_losses, weights=np.ones((1, 1, 1))),
losses.compute_weighted_loss(raw_losses, weights=np.ones((1, 1, 4))),
losses.compute_weighted_loss(raw_losses, weights=np.ones((1, 2, 1))),
losses.compute_weighted_loss(raw_losses, weights=np.ones((1, 2, 4))),
losses.compute_weighted_loss(raw_losses, weights=np.ones((3, 1, 1))),
losses.compute_weighted_loss(raw_losses, weights=np.ones((3, 1, 4))),
losses.compute_weighted_loss(raw_losses, weights=np.ones((3, 2, 1))),
losses.compute_weighted_loss(raw_losses, weights=np.ones(self._shape))
)
self.assertEqual(9, len(util.get_losses()))
with self.test_session():
for unweighted_loss in unweighted_losses:
self.assertAllClose(self._unweighted_loss, unweighted_loss.eval())
def testScalarWeight(self):
with ops.Graph().as_default():
self.assertEqual(0, len(util.get_losses()))
weight = 17.0
weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weight)
self.assertEqual(1, len(util.get_losses()))
with self.test_session():
self.assertAllClose(
np.mean(weight * self._raw_losses), weighted_loss.eval())
def _test_invalid_weights(self, weights):
with ops.Graph().as_default():
self.assertEqual(0, len(util.get_losses()))
expected_error_msg = 'weights can not be broadcast to values'
# Static check.
with self.assertRaisesRegexp(ValueError, expected_error_msg):
losses.compute_weighted_loss(self._raw_losses, weights=weights)
# Dynamic check.
weights_placeholder = array_ops.placeholder(dtypes.float32)
weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights_placeholder)
self.assertEqual(1, len(util.get_losses()))
with self.test_session():
with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
weighted_loss.eval(feed_dict={weights_placeholder: weights})
def testInvalidWeightTooManyDims(self):
self._test_invalid_weights(np.zeros(shape=(2, 2, 2, 2)))
def testInvalidWeightMismatchedDim(self):
with ops.Graph().as_default():
raw_losses = array_ops.reshape(self._raw_losses, shape=(3, 2, 4, 1))
weights = np.ones(shape=(3, 2, 4, 2))
expected_error_msg = 'weights can not be broadcast to values'
self.assertEqual(0, len(util.get_losses()))
# Static check.
with self.assertRaisesRegexp(ValueError, expected_error_msg):
losses.compute_weighted_loss(raw_losses, weights=weights)
# Dynamic check.
weights_placeholder = array_ops.placeholder(dtypes.float32)
weighted_loss = losses.compute_weighted_loss(
raw_losses, weights=weights_placeholder)
self.assertEqual(1, len(util.get_losses()))
with self.test_session():
with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
weighted_loss.eval(feed_dict={weights_placeholder: weights})
def testInvalid3Weight(self):
self._test_invalid_weights((17.0, 5.0, 2.0))
def testInvalid3x1Weight(self):
self._test_invalid_weights(((17.0,), (5.0,), (2.0,),))
def testInvalid3x2Weight(self):
self._test_invalid_weights((
(17.0, 3.0),
(5.0, 31.0),
(2.0, 7.0),))
def testInvalid1x2Weight(self):
self._test_invalid_weights((17.0, 3.0,),)
def testInvalidScalar1DWeight(self):
self._test_invalid_weights((17.0,),)
def _test_valid_weights(self, weights):
with ops.Graph().as_default():
self.assertEqual(0, len(util.get_losses()))
weighted_loss = losses.compute_weighted_loss(
self._raw_losses, weights=weights)
self.assertEqual(1, len(util.get_losses()))
with self.test_session():
self.assertAllClose(
np.mean(weights * self._raw_losses),
weighted_loss.eval())
def test1x1x1Weight(self):
self._test_valid_weights((((17.0,),),))
def test1x2x1Weight(self):
self._test_valid_weights((((17.0,), (3.0,),),))
def test1x1x4Weight(self):
self._test_valid_weights((((17.0, 13.0, 2.0, 5.0),),))
def test3x1x1Weight(self):
self._test_valid_weights((((17.0,),), ((5.0,),), ((2.0,),),))
def test3x2x1Weight(self):
self._test_valid_weights((
((17.0,), (3.0,)),
((5.0,), (31.0,)),
((2.0,), (7.0,)),
))
def test3x1x4Weight(self):
self._test_valid_weights((
((17.0, 13.0, 2.0, 5.0),),
((5.0, 31.0, 17.0, 5.0),),
((7.0, 3.0, 11.0, 5.0),),
))
def test1x2x4Weight(self):
self._test_valid_weights(((
(17.0, 13.0, 2.0, 5.0),
(3.0, 13.0, 11.0, 2.0),
),))
def test3x2x4Weight(self):
self._test_valid_weights((
((17.0, 13.0, 2.0, 5.0), (3.0, 13.0, 11.0, 2.0),),
((5.0, 31.0, 17.0, 5.0), (13.0, 3.0, 1.0, 11.0),),
((7.0, 3.0, 11.0, 5.0), (13.0, 11.0, 1.0, 7.0),),
))
if __name__ == '__main__':
test.main()
| apache-2.0 | -8,203,578,745,061,066,000 | 40.670516 | 80 | 0.608295 | false |
leorochael/odoo | addons/subscription/__init__.py | 441 | 1076 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import subscription
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 872,003,408,679,586,200 | 42.04 | 79 | 0.610595 | false |
analyseuc3m/ANALYSE-v1 | cms/lib/xblock/tagging.py | 5 | 3903 | """
Structured Tagging based on XBlockAsides
"""
from xblock.core import XBlockAside, XBlock
from xblock.fragment import Fragment
from xblock.fields import Scope, Dict
from xmodule.x_module import STUDENT_VIEW
from xmodule.capa_module import CapaModule
from abc import ABCMeta, abstractproperty
from edxmako.shortcuts import render_to_string
from django.conf import settings
from webob import Response
from collections import OrderedDict
_ = lambda text: text
class AbstractTag(object):
"""
Abstract class for tags
"""
__metaclass__ = ABCMeta
@abstractproperty
def key(self):
"""
Subclasses must implement key
"""
raise NotImplementedError('Subclasses must implement key')
@abstractproperty
def name(self):
"""
Subclasses must implement name
"""
raise NotImplementedError('Subclasses must implement name')
@abstractproperty
def allowed_values(self):
"""
Subclasses must implement allowed_values
"""
raise NotImplementedError('Subclasses must implement allowed_values')
class DifficultyTag(AbstractTag):
"""
Particular implementation tags for difficulty
"""
@property
def key(self):
""" Identifier for the difficulty selector """
return 'difficulty_tag'
@property
def name(self):
""" Label for the difficulty selector """
return _('Difficulty')
@property
def allowed_values(self):
""" Allowed values for the difficulty selector """
return OrderedDict([('easy', 'Easy'), ('medium', 'Medium'), ('hard', 'Hard')])
class StructuredTagsAside(XBlockAside):
"""
Aside that allows tagging blocks
"""
saved_tags = Dict(help=_("Dictionary with the available tags"),
scope=Scope.content,
default={},)
available_tags = [DifficultyTag()]
def _get_studio_resource_url(self, relative_url):
"""
Returns the Studio URL to a static resource.
"""
return settings.STATIC_URL + relative_url
@XBlockAside.aside_for(STUDENT_VIEW)
def student_view_aside(self, block, context): # pylint: disable=unused-argument
"""
Display the tag selector with specific categories and allowed values,
depending on the context.
"""
if isinstance(block, CapaModule):
tags = []
for tag in self.available_tags:
tags.append({
'key': tag.key,
'title': tag.name,
'values': tag.allowed_values,
'current_value': self.saved_tags.get(tag.key, None),
})
fragment = Fragment(render_to_string('structured_tags_block.html', {'tags': tags}))
fragment.add_javascript_url(self._get_studio_resource_url('/js/xblock_asides/structured_tags.js'))
fragment.initialize_js('StructuredTagsInit')
return fragment
else:
return Fragment(u'')
@XBlock.handler
def save_tags(self, request=None, suffix=None): # pylint: disable=unused-argument
"""
Handler to save choosen tags with connected XBlock
"""
found = False
if 'tag' not in request.params:
return Response("The required parameter 'tag' is not passed", status=400)
tag = request.params['tag'].split(':')
for av_tag in self.available_tags:
if av_tag.key == tag[0]:
if tag[1] in av_tag.allowed_values:
self.saved_tags[tag[0]] = tag[1]
found = True
elif tag[1] == '':
self.saved_tags[tag[0]] = None
found = True
if not found:
return Response("Invalid 'tag' parameter", status=400)
return Response()
| agpl-3.0 | 1,244,296,430,862,461,000 | 29.492188 | 110 | 0.595439 | false |
hachard/Cra-Magnet | flask/lib/python3.5/site-packages/migrate/changeset/schema.py | 66 | 24237 | """
Schema module providing common schema operations.
"""
import abc
try: # Python 3
from collections import MutableMapping as DictMixin
except ImportError: # Python 2
from UserDict import DictMixin
import warnings
import six
import sqlalchemy
from sqlalchemy.schema import ForeignKeyConstraint
from sqlalchemy.schema import UniqueConstraint
from migrate.exceptions import *
from migrate.changeset import SQLA_07, SQLA_08
from migrate.changeset import util
from migrate.changeset.databases.visitor import (get_engine_visitor,
run_single_visitor)
__all__ = [
'create_column',
'drop_column',
'alter_column',
'rename_table',
'rename_index',
'ChangesetTable',
'ChangesetColumn',
'ChangesetIndex',
'ChangesetDefaultClause',
'ColumnDelta',
]
def create_column(column, table=None, *p, **kw):
"""Create a column, given the table.
API to :meth:`ChangesetColumn.create`.
"""
if table is not None:
return table.create_column(column, *p, **kw)
return column.create(*p, **kw)
def drop_column(column, table=None, *p, **kw):
"""Drop a column, given the table.
API to :meth:`ChangesetColumn.drop`.
"""
if table is not None:
return table.drop_column(column, *p, **kw)
return column.drop(*p, **kw)
def rename_table(table, name, engine=None, **kw):
"""Rename a table.
If Table instance is given, engine is not used.
API to :meth:`ChangesetTable.rename`.
:param table: Table to be renamed.
:param name: New name for Table.
:param engine: Engine instance.
:type table: string or Table instance
:type name: string
:type engine: obj
"""
table = _to_table(table, engine)
table.rename(name, **kw)
def rename_index(index, name, table=None, engine=None, **kw):
"""Rename an index.
If Index instance is given,
table and engine are not used.
API to :meth:`ChangesetIndex.rename`.
:param index: Index to be renamed.
:param name: New name for index.
:param table: Table to which Index is reffered.
:param engine: Engine instance.
:type index: string or Index instance
:type name: string
:type table: string or Table instance
:type engine: obj
"""
index = _to_index(index, table, engine)
index.rename(name, **kw)
def alter_column(*p, **k):
"""Alter a column.
This is a helper function that creates a :class:`ColumnDelta` and
runs it.
:argument column:
The name of the column to be altered or a
:class:`ChangesetColumn` column representing it.
:param table:
A :class:`~sqlalchemy.schema.Table` or table name to
for the table where the column will be changed.
:param engine:
The :class:`~sqlalchemy.engine.base.Engine` to use for table
reflection and schema alterations.
:returns: A :class:`ColumnDelta` instance representing the change.
"""
if 'table' not in k and isinstance(p[0], sqlalchemy.Column):
k['table'] = p[0].table
if 'engine' not in k:
k['engine'] = k['table'].bind
# deprecation
if len(p) >= 2 and isinstance(p[1], sqlalchemy.Column):
warnings.warn(
"Passing a Column object to alter_column is deprecated."
" Just pass in keyword parameters instead.",
MigrateDeprecationWarning
)
engine = k['engine']
# enough tests seem to break when metadata is always altered
# that this crutch has to be left in until they can be sorted
# out
k['alter_metadata']=True
delta = ColumnDelta(*p, **k)
visitorcallable = get_engine_visitor(engine, 'schemachanger')
engine._run_visitor(visitorcallable, delta)
return delta
def _to_table(table, engine=None):
"""Return if instance of Table, else construct new with metadata"""
if isinstance(table, sqlalchemy.Table):
return table
# Given: table name, maybe an engine
meta = sqlalchemy.MetaData()
if engine is not None:
meta.bind = engine
return sqlalchemy.Table(table, meta)
def _to_index(index, table=None, engine=None):
"""Return if instance of Index, else construct new with metadata"""
if isinstance(index, sqlalchemy.Index):
return index
# Given: index name; table name required
table = _to_table(table, engine)
ret = sqlalchemy.Index(index)
ret.table = table
return ret
# Python3: if we just use:
#
# class ColumnDelta(DictMixin, sqlalchemy.schema.SchemaItem):
# ...
#
# We get the following error:
# TypeError: metaclass conflict: the metaclass of a derived class must be a
# (non-strict) subclass of the metaclasses of all its bases.
#
# The complete inheritance/metaclass relationship list of ColumnDelta can be
# summarized by this following dot file:
#
# digraph test123 {
# ColumnDelta -> MutableMapping;
# MutableMapping -> Mapping;
# Mapping -> {Sized Iterable Container};
# {Sized Iterable Container} -> ABCMeta[style=dashed];
#
# ColumnDelta -> SchemaItem;
# SchemaItem -> {SchemaEventTarget Visitable};
# SchemaEventTarget -> object;
# Visitable -> {VisitableType object} [style=dashed];
# VisitableType -> type;
# }
#
# We need to use a metaclass that inherits from all the metaclasses of
# DictMixin and sqlalchemy.schema.SchemaItem. Let's call it "MyMeta".
class MyMeta(sqlalchemy.sql.visitors.VisitableType, abc.ABCMeta, object):
pass
class ColumnDelta(six.with_metaclass(MyMeta, DictMixin, sqlalchemy.schema.SchemaItem)):
"""Extracts the differences between two columns/column-parameters
May receive parameters arranged in several different ways:
* **current_column, new_column, \*p, \*\*kw**
Additional parameters can be specified to override column
differences.
* **current_column, \*p, \*\*kw**
Additional parameters alter current_column. Table name is extracted
from current_column object.
Name is changed to current_column.name from current_name,
if current_name is specified.
* **current_col_name, \*p, \*\*kw**
Table kw must specified.
:param table: Table at which current Column should be bound to.\
If table name is given, reflection will be used.
:type table: string or Table instance
:param metadata: A :class:`MetaData` instance to store
reflected table names
:param engine: When reflecting tables, either engine or metadata must \
be specified to acquire engine object.
:type engine: :class:`Engine` instance
:returns: :class:`ColumnDelta` instance provides interface for altered attributes to \
`result_column` through :func:`dict` alike object.
* :class:`ColumnDelta`.result_column is altered column with new attributes
* :class:`ColumnDelta`.current_name is current name of column in db
"""
# Column attributes that can be altered
diff_keys = ('name', 'type', 'primary_key', 'nullable',
'server_onupdate', 'server_default', 'autoincrement')
diffs = dict()
__visit_name__ = 'column'
def __init__(self, *p, **kw):
# 'alter_metadata' is not a public api. It exists purely
# as a crutch until the tests that fail when 'alter_metadata'
# behaviour always happens can be sorted out
self.alter_metadata = kw.pop("alter_metadata", False)
self.meta = kw.pop("metadata", None)
self.engine = kw.pop("engine", None)
# Things are initialized differently depending on how many column
# parameters are given. Figure out how many and call the appropriate
# method.
if len(p) >= 1 and isinstance(p[0], sqlalchemy.Column):
# At least one column specified
if len(p) >= 2 and isinstance(p[1], sqlalchemy.Column):
# Two columns specified
diffs = self.compare_2_columns(*p, **kw)
else:
# Exactly one column specified
diffs = self.compare_1_column(*p, **kw)
else:
# Zero columns specified
if not len(p) or not isinstance(p[0], six.string_types):
raise ValueError("First argument must be column name")
diffs = self.compare_parameters(*p, **kw)
self.apply_diffs(diffs)
def __repr__(self):
return '<ColumnDelta altermetadata=%r, %s>' % (
self.alter_metadata,
super(ColumnDelta, self).__repr__()
)
def __getitem__(self, key):
if key not in self.keys():
raise KeyError("No such diff key, available: %s" % self.diffs )
return getattr(self.result_column, key)
def __setitem__(self, key, value):
if key not in self.keys():
raise KeyError("No such diff key, available: %s" % self.diffs )
setattr(self.result_column, key, value)
def __delitem__(self, key):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def keys(self):
return self.diffs.keys()
def compare_parameters(self, current_name, *p, **k):
"""Compares Column objects with reflection"""
self.table = k.pop('table')
self.result_column = self._table.c.get(current_name)
if len(p):
k = self._extract_parameters(p, k, self.result_column)
return k
def compare_1_column(self, col, *p, **k):
"""Compares one Column object"""
self.table = k.pop('table', None)
if self.table is None:
self.table = col.table
self.result_column = col
if len(p):
k = self._extract_parameters(p, k, self.result_column)
return k
def compare_2_columns(self, old_col, new_col, *p, **k):
"""Compares two Column objects"""
self.process_column(new_col)
self.table = k.pop('table', None)
# we cannot use bool() on table in SA06
if self.table is None:
self.table = old_col.table
if self.table is None:
new_col.table
self.result_column = old_col
# set differences
# leave out some stuff for later comp
for key in (set(self.diff_keys) - set(('type',))):
val = getattr(new_col, key, None)
if getattr(self.result_column, key, None) != val:
k.setdefault(key, val)
# inspect types
if not self.are_column_types_eq(self.result_column.type, new_col.type):
k.setdefault('type', new_col.type)
if len(p):
k = self._extract_parameters(p, k, self.result_column)
return k
def apply_diffs(self, diffs):
"""Populate dict and column object with new values"""
self.diffs = diffs
for key in self.diff_keys:
if key in diffs:
setattr(self.result_column, key, diffs[key])
self.process_column(self.result_column)
# create an instance of class type if not yet
if 'type' in diffs and callable(self.result_column.type):
self.result_column.type = self.result_column.type()
# add column to the table
if self.table is not None and self.alter_metadata:
self.result_column.add_to_table(self.table)
def are_column_types_eq(self, old_type, new_type):
"""Compares two types to be equal"""
ret = old_type.__class__ == new_type.__class__
# String length is a special case
if ret and isinstance(new_type, sqlalchemy.types.String):
ret = (getattr(old_type, 'length', None) == \
getattr(new_type, 'length', None))
return ret
def _extract_parameters(self, p, k, column):
"""Extracts data from p and modifies diffs"""
p = list(p)
while len(p):
if isinstance(p[0], six.string_types):
k.setdefault('name', p.pop(0))
elif isinstance(p[0], sqlalchemy.types.TypeEngine):
k.setdefault('type', p.pop(0))
elif callable(p[0]):
p[0] = p[0]()
else:
break
if len(p):
new_col = column.copy_fixed()
new_col._init_items(*p)
k = self.compare_2_columns(column, new_col, **k)
return k
def process_column(self, column):
"""Processes default values for column"""
# XXX: this is a snippet from SA processing of positional parameters
toinit = list()
if column.server_default is not None:
if isinstance(column.server_default, sqlalchemy.FetchedValue):
toinit.append(column.server_default)
else:
toinit.append(sqlalchemy.DefaultClause(column.server_default))
if column.server_onupdate is not None:
if isinstance(column.server_onupdate, FetchedValue):
toinit.append(column.server_default)
else:
toinit.append(sqlalchemy.DefaultClause(column.server_onupdate,
for_update=True))
if toinit:
column._init_items(*toinit)
def _get_table(self):
return getattr(self, '_table', None)
def _set_table(self, table):
if isinstance(table, six.string_types):
if self.alter_metadata:
if not self.meta:
raise ValueError("metadata must be specified for table"
" reflection when using alter_metadata")
meta = self.meta
if self.engine:
meta.bind = self.engine
else:
if not self.engine and not self.meta:
raise ValueError("engine or metadata must be specified"
" to reflect tables")
if not self.engine:
self.engine = self.meta.bind
meta = sqlalchemy.MetaData(bind=self.engine)
self._table = sqlalchemy.Table(table, meta, autoload=True)
elif isinstance(table, sqlalchemy.Table):
self._table = table
if not self.alter_metadata:
self._table.meta = sqlalchemy.MetaData(bind=self._table.bind)
def _get_result_column(self):
return getattr(self, '_result_column', None)
def _set_result_column(self, column):
"""Set Column to Table based on alter_metadata evaluation."""
self.process_column(column)
if not hasattr(self, 'current_name'):
self.current_name = column.name
if self.alter_metadata:
self._result_column = column
else:
self._result_column = column.copy_fixed()
table = property(_get_table, _set_table)
result_column = property(_get_result_column, _set_result_column)
class ChangesetTable(object):
"""Changeset extensions to SQLAlchemy tables."""
def create_column(self, column, *p, **kw):
"""Creates a column.
The column parameter may be a column definition or the name of
a column in this table.
API to :meth:`ChangesetColumn.create`
:param column: Column to be created
:type column: Column instance or string
"""
if not isinstance(column, sqlalchemy.Column):
# It's a column name
column = getattr(self.c, str(column))
column.create(table=self, *p, **kw)
def drop_column(self, column, *p, **kw):
"""Drop a column, given its name or definition.
API to :meth:`ChangesetColumn.drop`
:param column: Column to be droped
:type column: Column instance or string
"""
if not isinstance(column, sqlalchemy.Column):
# It's a column name
try:
column = getattr(self.c, str(column))
except AttributeError:
# That column isn't part of the table. We don't need
# its entire definition to drop the column, just its
# name, so create a dummy column with the same name.
column = sqlalchemy.Column(str(column), sqlalchemy.Integer())
column.drop(table=self, *p, **kw)
def rename(self, name, connection=None, **kwargs):
"""Rename this table.
:param name: New name of the table.
:type name: string
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
"""
engine = self.bind
self.new_name = name
visitorcallable = get_engine_visitor(engine, 'schemachanger')
run_single_visitor(engine, visitorcallable, self, connection, **kwargs)
# Fix metadata registration
self.name = name
self.deregister()
self._set_parent(self.metadata)
def _meta_key(self):
"""Get the meta key for this table."""
return sqlalchemy.schema._get_table_key(self.name, self.schema)
def deregister(self):
"""Remove this table from its metadata"""
if SQLA_07:
self.metadata._remove_table(self.name, self.schema)
else:
key = self._meta_key()
meta = self.metadata
if key in meta.tables:
del meta.tables[key]
class ChangesetColumn(object):
"""Changeset extensions to SQLAlchemy columns."""
def alter(self, *p, **k):
"""Makes a call to :func:`alter_column` for the column this
method is called on.
"""
if 'table' not in k:
k['table'] = self.table
if 'engine' not in k:
k['engine'] = k['table'].bind
return alter_column(self, *p, **k)
def create(self, table=None, index_name=None, unique_name=None,
primary_key_name=None, populate_default=True, connection=None, **kwargs):
"""Create this column in the database.
Assumes the given table exists. ``ALTER TABLE ADD COLUMN``,
for most databases.
:param table: Table instance to create on.
:param index_name: Creates :class:`ChangesetIndex` on this column.
:param unique_name: Creates :class:\
`~migrate.changeset.constraint.UniqueConstraint` on this column.
:param primary_key_name: Creates :class:\
`~migrate.changeset.constraint.PrimaryKeyConstraint` on this column.
:param populate_default: If True, created column will be \
populated with defaults
:param connection: reuse connection istead of creating new one.
:type table: Table instance
:type index_name: string
:type unique_name: string
:type primary_key_name: string
:type populate_default: bool
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
:returns: self
"""
self.populate_default = populate_default
self.index_name = index_name
self.unique_name = unique_name
self.primary_key_name = primary_key_name
for cons in ('index_name', 'unique_name', 'primary_key_name'):
self._check_sanity_constraints(cons)
self.add_to_table(table)
engine = self.table.bind
visitorcallable = get_engine_visitor(engine, 'columngenerator')
engine._run_visitor(visitorcallable, self, connection, **kwargs)
# TODO: reuse existing connection
if self.populate_default and self.default is not None:
stmt = table.update().values({self: engine._execute_default(self.default)})
engine.execute(stmt)
return self
def drop(self, table=None, connection=None, **kwargs):
"""Drop this column from the database, leaving its table intact.
``ALTER TABLE DROP COLUMN``, for most databases.
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
"""
if table is not None:
self.table = table
engine = self.table.bind
visitorcallable = get_engine_visitor(engine, 'columndropper')
engine._run_visitor(visitorcallable, self, connection, **kwargs)
self.remove_from_table(self.table, unset_table=False)
self.table = None
return self
def add_to_table(self, table):
if table is not None and self.table is None:
if SQLA_07:
table.append_column(self)
else:
self._set_parent(table)
def _col_name_in_constraint(self,cons,name):
return False
def remove_from_table(self, table, unset_table=True):
# TODO: remove primary keys, constraints, etc
if unset_table:
self.table = None
to_drop = set()
for index in table.indexes:
columns = []
for col in index.columns:
if col.name!=self.name:
columns.append(col)
if columns:
index.columns = columns
if SQLA_08:
index.expressions = columns
else:
to_drop.add(index)
table.indexes = table.indexes - to_drop
to_drop = set()
for cons in table.constraints:
# TODO: deal with other types of constraint
if isinstance(cons,(ForeignKeyConstraint,
UniqueConstraint)):
for col_name in cons.columns:
if not isinstance(col_name,six.string_types):
col_name = col_name.name
if self.name==col_name:
to_drop.add(cons)
table.constraints = table.constraints - to_drop
if table.c.contains_column(self):
if SQLA_07:
table._columns.remove(self)
else:
table.c.remove(self)
# TODO: this is fixed in 0.6
def copy_fixed(self, **kw):
"""Create a copy of this ``Column``, with all attributes."""
q = util.safe_quote(self)
return sqlalchemy.Column(self.name, self.type, self.default,
key=self.key,
primary_key=self.primary_key,
nullable=self.nullable,
quote=q,
index=self.index,
unique=self.unique,
onupdate=self.onupdate,
autoincrement=self.autoincrement,
server_default=self.server_default,
server_onupdate=self.server_onupdate,
*[c.copy(**kw) for c in self.constraints])
def _check_sanity_constraints(self, name):
"""Check if constraints names are correct"""
obj = getattr(self, name)
if (getattr(self, name[:-5]) and not obj):
raise InvalidConstraintError("Column.create() accepts index_name,"
" primary_key_name and unique_name to generate constraints")
if not isinstance(obj, six.string_types) and obj is not None:
raise InvalidConstraintError(
"%s argument for column must be constraint name" % name)
class ChangesetIndex(object):
"""Changeset extensions to SQLAlchemy Indexes."""
__visit_name__ = 'index'
def rename(self, name, connection=None, **kwargs):
"""Change the name of an index.
:param name: New name of the Index.
:type name: string
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
"""
engine = self.table.bind
self.new_name = name
visitorcallable = get_engine_visitor(engine, 'schemachanger')
engine._run_visitor(visitorcallable, self, connection, **kwargs)
self.name = name
class ChangesetDefaultClause(object):
"""Implements comparison between :class:`DefaultClause` instances"""
def __eq__(self, other):
if isinstance(other, self.__class__):
if self.arg == other.arg:
return True
def __ne__(self, other):
return not self.__eq__(other)
| gpl-3.0 | -2,761,961,698,758,660,600 | 33.574893 | 94 | 0.602674 | false |
mvaled/OpenUpgrade | openerp/report/print_fnc.py | 458 | 1318 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
functions = {
'today': lambda x: time.strftime('%d/%m/%Y', time.localtime()).decode('latin1')
}
#
# TODO: call an object internal function too
#
def print_fnc(fnc, arg):
if fnc in functions:
return functions[fnc](arg)
return ''
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,376,116,647,329,105,000 | 34.621622 | 83 | 0.610774 | false |
ohsangjin/git-core | contrib/hg-to-git/hg-to-git.py | 342 | 8074 | #!/usr/bin/env python
""" hg-to-git.py - A Mercurial to GIT converter
Copyright (C)2007 Stelian Pop <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import os, os.path, sys
import tempfile, pickle, getopt
import re
if sys.hexversion < 0x02030000:
# The behavior of the pickle module changed significantly in 2.3
sys.stderr.write("hg-to-git.py: requires Python 2.3 or later.\n")
sys.exit(1)
# Maps hg version -> git version
hgvers = {}
# List of children for each hg revision
hgchildren = {}
# List of parents for each hg revision
hgparents = {}
# Current branch for each hg revision
hgbranch = {}
# Number of new changesets converted from hg
hgnewcsets = 0
#------------------------------------------------------------------------------
def usage():
print """\
%s: [OPTIONS] <hgprj>
options:
-s, --gitstate=FILE: name of the state to be saved/read
for incrementals
-n, --nrepack=INT: number of changesets that will trigger
a repack (default=0, -1 to deactivate)
-v, --verbose: be verbose
required:
hgprj: name of the HG project to import (directory)
""" % sys.argv[0]
#------------------------------------------------------------------------------
def getgitenv(user, date):
env = ''
elems = re.compile('(.*?)\s+<(.*)>').match(user)
if elems:
env += 'export GIT_AUTHOR_NAME="%s" ;' % elems.group(1)
env += 'export GIT_COMMITTER_NAME="%s" ;' % elems.group(1)
env += 'export GIT_AUTHOR_EMAIL="%s" ;' % elems.group(2)
env += 'export GIT_COMMITTER_EMAIL="%s" ;' % elems.group(2)
else:
env += 'export GIT_AUTHOR_NAME="%s" ;' % user
env += 'export GIT_COMMITTER_NAME="%s" ;' % user
env += 'export GIT_AUTHOR_EMAIL= ;'
env += 'export GIT_COMMITTER_EMAIL= ;'
env += 'export GIT_AUTHOR_DATE="%s" ;' % date
env += 'export GIT_COMMITTER_DATE="%s" ;' % date
return env
#------------------------------------------------------------------------------
state = ''
opt_nrepack = 0
verbose = False
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:n:v', ['gitstate=', 'tempdir=', 'nrepack=', 'verbose'])
for o, a in opts:
if o in ('-s', '--gitstate'):
state = a
state = os.path.abspath(state)
if o in ('-n', '--nrepack'):
opt_nrepack = int(a)
if o in ('-v', '--verbose'):
verbose = True
if len(args) != 1:
raise Exception('params')
except:
usage()
sys.exit(1)
hgprj = args[0]
os.chdir(hgprj)
if state:
if os.path.exists(state):
if verbose:
print 'State does exist, reading'
f = open(state, 'r')
hgvers = pickle.load(f)
else:
print 'State does not exist, first run'
sock = os.popen('hg tip --template "{rev}"')
tip = sock.read()
if sock.close():
sys.exit(1)
if verbose:
print 'tip is', tip
# Calculate the branches
if verbose:
print 'analysing the branches...'
hgchildren["0"] = ()
hgparents["0"] = (None, None)
hgbranch["0"] = "master"
for cset in range(1, int(tip) + 1):
hgchildren[str(cset)] = ()
prnts = os.popen('hg log -r %d --template "{parents}"' % cset).read().strip().split(' ')
prnts = map(lambda x: x[:x.find(':')], prnts)
if prnts[0] != '':
parent = prnts[0].strip()
else:
parent = str(cset - 1)
hgchildren[parent] += ( str(cset), )
if len(prnts) > 1:
mparent = prnts[1].strip()
hgchildren[mparent] += ( str(cset), )
else:
mparent = None
hgparents[str(cset)] = (parent, mparent)
if mparent:
# For merge changesets, take either one, preferably the 'master' branch
if hgbranch[mparent] == 'master':
hgbranch[str(cset)] = 'master'
else:
hgbranch[str(cset)] = hgbranch[parent]
else:
# Normal changesets
# For first children, take the parent branch, for the others create a new branch
if hgchildren[parent][0] == str(cset):
hgbranch[str(cset)] = hgbranch[parent]
else:
hgbranch[str(cset)] = "branch-" + str(cset)
if not hgvers.has_key("0"):
print 'creating repository'
os.system('git init')
# loop through every hg changeset
for cset in range(int(tip) + 1):
# incremental, already seen
if hgvers.has_key(str(cset)):
continue
hgnewcsets += 1
# get info
log_data = os.popen('hg log -r %d --template "{tags}\n{date|date}\n{author}\n"' % cset).readlines()
tag = log_data[0].strip()
date = log_data[1].strip()
user = log_data[2].strip()
parent = hgparents[str(cset)][0]
mparent = hgparents[str(cset)][1]
#get comment
(fdcomment, filecomment) = tempfile.mkstemp()
csetcomment = os.popen('hg log -r %d --template "{desc}"' % cset).read().strip()
os.write(fdcomment, csetcomment)
os.close(fdcomment)
print '-----------------------------------------'
print 'cset:', cset
print 'branch:', hgbranch[str(cset)]
print 'user:', user
print 'date:', date
print 'comment:', csetcomment
if parent:
print 'parent:', parent
if mparent:
print 'mparent:', mparent
if tag:
print 'tag:', tag
print '-----------------------------------------'
# checkout the parent if necessary
if cset != 0:
if hgbranch[str(cset)] == "branch-" + str(cset):
print 'creating new branch', hgbranch[str(cset)]
os.system('git checkout -b %s %s' % (hgbranch[str(cset)], hgvers[parent]))
else:
print 'checking out branch', hgbranch[str(cset)]
os.system('git checkout %s' % hgbranch[str(cset)])
# merge
if mparent:
if hgbranch[parent] == hgbranch[str(cset)]:
otherbranch = hgbranch[mparent]
else:
otherbranch = hgbranch[parent]
print 'merging', otherbranch, 'into', hgbranch[str(cset)]
os.system(getgitenv(user, date) + 'git merge --no-commit -s ours "" %s %s' % (hgbranch[str(cset)], otherbranch))
# remove everything except .git and .hg directories
os.system('find . \( -path "./.hg" -o -path "./.git" \) -prune -o ! -name "." -print | xargs rm -rf')
# repopulate with checkouted files
os.system('hg update -C %d' % cset)
# add new files
os.system('git ls-files -x .hg --others | git update-index --add --stdin')
# delete removed files
os.system('git ls-files -x .hg --deleted | git update-index --remove --stdin')
# commit
os.system(getgitenv(user, date) + 'git commit --allow-empty --allow-empty-message -a -F %s' % filecomment)
os.unlink(filecomment)
# tag
if tag and tag != 'tip':
os.system(getgitenv(user, date) + 'git tag %s' % tag)
# delete branch if not used anymore...
if mparent and len(hgchildren[str(cset)]):
print "Deleting unused branch:", otherbranch
os.system('git branch -d %s' % otherbranch)
# retrieve and record the version
vvv = os.popen('git show --quiet --pretty=format:%H').read()
print 'record', cset, '->', vvv
hgvers[str(cset)] = vvv
if hgnewcsets >= opt_nrepack and opt_nrepack != -1:
os.system('git repack -a -d')
# write the state for incrementals
if state:
if verbose:
print 'Writing state'
f = open(state, 'w')
pickle.dump(hgvers, f)
# vim: et ts=8 sw=4 sts=4
| gpl-2.0 | 2,963,695,399,130,692,000 | 30.662745 | 120 | 0.574313 | false |
minhphung171093/GreenERP | openerp/tools/pdf_utils.py | 72 | 2702 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:[email protected]
manipulate pdf and fdf files. pdftk recommended.
Notes regarding pdftk, pdf forms and fdf files (form definition file)
fields names can be extracted with:
pdftk orig.pdf generate_fdf output truc.fdf
to merge fdf and pdf:
pdftk orig.pdf fill_form test.fdf output result.pdf [flatten]
without flatten, one could further edit the resulting form.
with flatten, everything is turned into text.
"""
from __future__ import with_statement
import os
import tempfile
HEAD="""%FDF-1.2
%\xE2\xE3\xCF\xD3
1 0 obj
<<
/FDF
<<
/Fields [
"""
TAIL="""]
>>
>>
endobj
trailer
<<
/Root 1 0 R
>>
%%EOF
"""
def output_field(f):
return "\xfe\xff" + "".join( [ "\x00"+c for c in f ] )
def extract_keys(lines):
keys = []
for line in lines:
if line.startswith('/V'):
pass #print 'value',line
elif line.startswith('/T'):
key = line[7:-2]
key = ''.join(key.split('\x00'))
keys.append( key )
return keys
def write_field(out, key, value):
out.write("<<\n")
if value:
out.write("/V (%s)\n" %value)
else:
out.write("/V /\n")
out.write("/T (%s)\n" % output_field(key) )
out.write(">> \n")
def write_fields(out, fields):
out.write(HEAD)
for key in fields:
value = fields[key]
write_field(out, key, value)
# write_field(out, key+"a", value) # pour copie-carbone sur autres pages
out.write(TAIL)
def extract_keys_from_pdf(filename):
# what about using 'pdftk filename dump_data_fields' and parsing the output ?
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
os.system('pdftk %s generate_fdf output \"%s\"' % (filename, tmp_file))
with open(tmp_file, "r") as ofile:
lines = ofile.readlines()
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
return extract_keys(lines)
def fill_pdf(infile, outfile, fields):
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
with open(tmp_file, "w") as ofile:
write_fields(ofile, fields)
os.system('pdftk %s fill_form \"%s\" output %s flatten' % (infile, tmp_file, outfile))
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
def testfill_pdf(infile, outfile):
keys = extract_keys_from_pdf(infile)
fields = []
for key in keys:
fields.append( (key, key, '') )
fill_pdf(infile, outfile, fields)
| gpl-3.0 | -3,807,260,001,240,026,600 | 24.018519 | 94 | 0.599926 | false |
MattsFleaMarket/python-for-android | python3-alpha/python3-src/Lib/idlelib/SearchDialogBase.py | 55 | 4385 | from tkinter import *
class SearchDialogBase:
title = "Search Dialog"
icon = "Search"
needwrapbutton = 1
def __init__(self, root, engine):
self.root = root
self.engine = engine
self.top = None
def open(self, text, searchphrase=None):
self.text = text
if not self.top:
self.create_widgets()
else:
self.top.deiconify()
self.top.tkraise()
if searchphrase:
self.ent.delete(0,"end")
self.ent.insert("end",searchphrase)
self.ent.focus_set()
self.ent.selection_range(0, "end")
self.ent.icursor(0)
self.top.grab_set()
def close(self, event=None):
if self.top:
self.top.grab_release()
self.top.withdraw()
def create_widgets(self):
top = Toplevel(self.root)
top.bind("<Return>", self.default_command)
top.bind("<Escape>", self.close)
top.protocol("WM_DELETE_WINDOW", self.close)
top.wm_title(self.title)
top.wm_iconname(self.icon)
self.top = top
self.row = 0
self.top.grid_columnconfigure(0, pad=2, weight=0)
self.top.grid_columnconfigure(1, pad=2, minsize=100, weight=100)
self.create_entries()
self.create_option_buttons()
self.create_other_buttons()
return self.create_command_buttons()
def make_entry(self, label, var):
l = Label(self.top, text=label)
l.grid(row=self.row, column=0, sticky="nw")
e = Entry(self.top, textvariable=var, exportselection=0)
e.grid(row=self.row, column=1, sticky="nwe")
self.row = self.row + 1
return e
def make_frame(self,labeltext=None):
if labeltext:
l = Label(self.top, text=labeltext)
l.grid(row=self.row, column=0, sticky="nw")
f = Frame(self.top)
f.grid(row=self.row, column=1, columnspan=1, sticky="nwe")
self.row = self.row + 1
return f
def make_button(self, label, command, isdef=0):
b = Button(self.buttonframe,
text=label, command=command,
default=isdef and "active" or "normal")
cols,rows=self.buttonframe.grid_size()
b.grid(pady=1,row=rows,column=0,sticky="ew")
self.buttonframe.grid(rowspan=rows+1)
return b
def create_entries(self):
self.ent = self.make_entry("Find:", self.engine.patvar)
def create_option_buttons(self):
f = self.make_frame("Options")
btn = Checkbutton(f, anchor="w",
variable=self.engine.revar,
text="Regular expression")
btn.pack(side="left", fill="both")
if self.engine.isre():
btn.select()
btn = Checkbutton(f, anchor="w",
variable=self.engine.casevar,
text="Match case")
btn.pack(side="left", fill="both")
if self.engine.iscase():
btn.select()
btn = Checkbutton(f, anchor="w",
variable=self.engine.wordvar,
text="Whole word")
btn.pack(side="left", fill="both")
if self.engine.isword():
btn.select()
if self.needwrapbutton:
btn = Checkbutton(f, anchor="w",
variable=self.engine.wrapvar,
text="Wrap around")
btn.pack(side="left", fill="both")
if self.engine.iswrap():
btn.select()
def create_other_buttons(self):
f = self.make_frame("Direction")
#lbl = Label(f, text="Direction: ")
#lbl.pack(side="left")
btn = Radiobutton(f, anchor="w",
variable=self.engine.backvar, value=1,
text="Up")
btn.pack(side="left", fill="both")
if self.engine.isback():
btn.select()
btn = Radiobutton(f, anchor="w",
variable=self.engine.backvar, value=0,
text="Down")
btn.pack(side="left", fill="both")
if not self.engine.isback():
btn.select()
def create_command_buttons(self):
#
# place button frame on the right
f = self.buttonframe = Frame(self.top)
f.grid(row=0,column=2,padx=2,pady=2,ipadx=2,ipady=2)
b = self.make_button("close", self.close)
b.lower()
| apache-2.0 | -8,812,879,029,661,747,000 | 30.321429 | 72 | 0.545268 | false |
cfelton/minnesota | examples/cores/fpgalink/led/fpgalink_led.py | 1 | 2713 |
from myhdl import *
from mn.cores.usb_ext import fl_fx2
from mn.cores.usb_ext import fpgalink_fx2
def fpgalink_led(
# ~~ FX2 interface signals ~~
IFCLK, # 48 MHz clock from FX2
RST, # active low async reset
SLWR, # active low write strobe
SLRD, # active low read strobe
SLOE, # active low output enable
FDI, # input data bus
FDO, # output data bus
FDS, # data select
ADDR, # 2bit address (fifo select)
FLAGA, # not used
FLAGB, # gotroom
FLAGC, # gotdata
FLAGD, # not used
PKTEND, # submit partial (less than 512)
# ~~ peripherals interfaces ~~
LEDS # external LEDs
):
"""
"""
# get the local references for the top-level
clock,reset,fx2_bus,fl_bus = fl_fx2.get_interfaces()
clock = IFCLK
reset = RST
fx2_bus.data_i = FDI
fx2_bus.data_o = FDO
fx2_bus.data_t = FDS
fx2_bus.gotdata = FLAGC
fx2_bus.gotroom = FLAGB
fx2_bus.write = SLWR
fx2_bus.read = SLRD
#SLOE = SLRD now shadowed signals for conversion
fx2_bus.pktend = PKTEND
# instantiate the fpgalink interface
g_fli = fpgalink_fx2(clock, reset, fx2_bus, fl_bus)
# ~~~~~~
lreg = Signal(intbv(0)[8:])
f2hValid_in = fl_bus.valid_i
h2fReady_in = fl_bus.ready_i
h2fValid_out = fl_bus.valid_o
chanAddr_out = fl_bus.chan_addr
f2hData_in = fl_bus.data_i
h2fData_out = fl_bus.data_o
fifosel = fx2_bus.fifosel
@always_comb
def hdl_assigns():
ADDR.next[0] = False
ADDR.next[1] = fifosel
SLOE.next = SLRD
f2hValid_in.next = True
h2fReady_in.next = True
LEDS.next = lreg
if chanAddr_out == 0:
f2hData_in.next = 0xCE
elif chanAddr_out == 1:
f2hData_in.next = lreg
else:
f2hData_in.next = 0x55
@always_seq(clock.posedge, reset=reset)
def hdl_fl():
if h2fValid_out and chanAddr_out == 1:
lreg.next = h2fData_out
return g_fli, hdl_fl, hdl_assigns
def convert():
FDO = Signal(intbv(0)[8:])
FDI = Signal(intbv(0)[8:])
FDS = Signal(bool(0))
SLWR,SLRD,SLOE = [Signal(bool(0)) for ii in range(3)]
FLAGA,FLAGB,FLAGC,FLAGD = [Signal(bool(0)) for ii in range(4)]
ADDR = Signal(intbv(0)[2:])
IFCLK = Signal(bool(0))
RST = ResetSignal(bool(1), active=0, async=True)
LEDS = Signal(intbv(0)[8:])
PKTEND = Signal(bool(0))
toVerilog(fpgalink_led, IFCLK, RST, SLWR, SLRD, SLOE,
FDI, FDO, FDS, ADDR, FLAGA, FLAGB, FLAGC, FLAGD, PKTEND,
LEDS)
if __name__ == '__main__':
convert()
| gpl-3.0 | -5,899,048,860,088,826,000 | 25.861386 | 70 | 0.568743 | false |
huawei-cloud/compass | compass/hdsdiscovery/utils.py | 4 | 4810 | """Utility functions
Including functions of get/getbulk/walk/set of snmp for three versions
"""
import imp
import re
import logging
def load_module(mod_name, path, host=None, credential=None):
""" Load a module instance.
:param str mod_name: module name
:param str path: directory of the module
:param str host: switch ip address
:param str credential: credential used to access switch
"""
instance = None
try:
file, path, descr = imp.find_module(mod_name, [path])
if file:
mod = imp.load_module(mod_name, file, path, descr)
if host and credential:
instance = getattr(mod, mod.CLASS_NAME)(host, credential)
else:
instance = getattr(mod, mod.CLASS_NAME)()
except ImportError as exc:
logging.error('No such plugin : %s', mod_name)
logging.exception(exc)
finally:
return instance
def ssh_remote_execute(host, username, password, cmd, *args):
"""SSH to execute script on remote machine
:param host: ip of the remote machine
:param username: username to access the remote machine
:param password: password to access the remote machine
:param cmd: command to execute
"""
try:
import paramiko
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username=username, password=password)
stdin, stdout, stderr = client.exec_command(cmd)
return stdout.readlines()
except ImportError as exc:
logging.error("[hdsdiscovery][utils][ssh_remote_execute] failed to"
"load module 'paramiko', donnot exist!")
logging.exception(exc)
return None
except Exception as exc:
logging.error("[hdsdiscovery][utils][ssh_remote_execute] failed: %s",
cmd)
logging.exception(exc)
return None
finally:
client.close()
def valid_ip_format(ip_address):
"""Valid the format of an Ip address"""
if not re.match(r'^((([0-2]?\d{0,2}\.){3}([0-2]?\d{0,2}))'
'|(([\da-fA-F]{1,4}:){7}([\da-fA-F]{1,4})))$',
ip_address):
# check IP's format is match ipv4 or ipv6 by regex
return False
return True
#################################################################
# Implement snmpwalk and snmpget funtionality
# The structure of returned dictionary will by tag/iid/value/type
#################################################################
AUTH_VERSIONS = {'v1': 1,
'v2c': 2,
'v3': 3}
def snmp_walk(host, credential, *args):
"""Impelmentation of snmpwalk functionality
:param host: switch ip
:param credential: credential to access switch
:param args: OIDs
"""
try:
import netsnmp
except ImportError:
logging.error("Module 'netsnmp' do not exist! Please install it first")
return None
if 'Version' not in credential or 'Community' not in credential:
logging.error("[utils] missing 'Version' and 'Community' in %s",
credential)
return None
if credential['Version'] in AUTH_VERSIONS:
version = AUTH_VERSIONS[credential['Version']]
credential['Version'] = version
varbind_list = []
for arg in args:
varbind = netsnmp.Varbind(arg)
varbind_list.append(varbind)
var_list = netsnmp.VarList(*varbind_list)
res = netsnmp.snmpwalk(var_list, DestHost=host, **credential)
result = []
for var in var_list:
response = {}
response['elem_name'] = var.tag
response['iid'] = var.iid
response['value'] = var.val
response['type'] = var.type
result.append(response)
return result
def snmp_get(host, credential, object_type):
"""Impelmentation of snmp get functionality
:param object_type: mib object
:param host: switch ip
:param credential: the dict of credential to access switch
"""
try:
import netsnmp
except ImportError:
logging.error("Module 'netsnmp' do not exist! Please install it first")
return None
if 'Version' not in credential or 'Community' not in credential:
logging.error('[uitls][snmp_get] missing keywords in %s for %s',
credential, host)
return None
if credential['Version'] in AUTH_VERSIONS:
version = AUTH_VERSIONS[credential['Version']]
credential['Version'] = version
varbind = netsnmp.Varbind(object_type)
res = netsnmp.snmpget(varbind, DestHost=host, **credential)
if not res:
logging.error('no result found for %s %s', host, credential)
return None
return res[0]
| apache-2.0 | -1,287,850,197,749,334,500 | 29.0625 | 79 | 0.59896 | false |
miguelparaiso/PracticaOdoo | addons/website_membership/models/product.py | 338 | 1264 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class product_template(osv.Model):
_inherit = 'product.template'
_columns = {
'website_published': fields.boolean('Available in the website', copy=False),
}
_defaults = {
'website_published': False,
}
| agpl-3.0 | -6,470,773,106,378,102,000 | 38.5 | 84 | 0.607595 | false |
google/cloudprint_logocert | _ticket.py | 1 | 5318 | """Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Model that represents the CloudJobTicket that's used to submit print jobs to the
Google Cloud Print Service /submit interface.
CloudJobTicket will provide methods to set the various fields of a job ticket:
"""
class CloudJobTicket(object):
"""Represents the print job specifications sent to the printer on
job submission."""
def __init__(self, version = '1.0'):
"""Get a reference to a logger object.
Args:
version: string, gcp version
"""
self.val = {}
self.val['print'] = {}
self.val['version'] = version
def AddColorOption(self, color_type):
"""
Specify the print job's color scheme
Args:
color_type: string, STANDARD_COLOR or STANDARD_MONOCHROME
"""
self.val['print']['color'] = {'type': color_type}
def AddCopiesOption(self, num_copies):
"""
Specify the number of copies to print
Args:
num_copies: integer, number of copies to print
"""
self.val['print']['copies'] = {'copies': num_copies}
def AddDuplexOption(self, duplex_type):
"""
Specify the duplexing type of the print job
Args:
duplex_type: string, NO_DUPLEX, LONG_EDGE, or SHORT_EDGE
"""
self.val['print']['duplex'] = {'type': duplex_type}
def AddPageOrientationOption(self, orientation_type):
"""
Specify the page orientation of the print job
Args:
orientation_type: string, PORTRAIT, LANDSCAPE, or AUTO
"""
self.val['print']['page_orientation'] = {'type': orientation_type}
def AddDpiOption(self, horizontal_dpi, vertical_dpi):
"""
Specify the DPI for the print job
Args:
horizontal_dpi: integer, horizontal dpi
vertical_dpi : integer, vertical dpi
"""
self.val['print']['dpi'] = {'horizontal_dpi': horizontal_dpi,
'vertical_dpi': vertical_dpi}
def AddMarginOption(self, top, right, bottom, left):
"""
Specify the margins for the print job
Args:
top, int, top margin in microns
right, int, right margin in microns
bottom, int, bottom margin in microns
left, int, left margin in microns
"""
self.val['print']['margins'] = {'top_microns': top,
'right_microns': right,
'bottom_microns': bottom,
'left_microns': left}
def AddSizeOption(self, height_microns, width_microns):
"""
Specify the size of the print job
Args:
height_microns: integer, height in microns
width_microns : integer, width in microns
"""
self.val['print']['media_size'] = {'height_microns': height_microns,
'width_microns': width_microns}
def AddReverseOption(self):
"""
Enable the reverse print option
"""
self.val['print']['reverse_order'] = {'reverse_order': True}
def AddFitToPageOption(self, type):
"""
Specify the size of the print job
Args:
type: string, NO_FITTING, FIT_TO_PAGE, GROW_TO_PAGE, SHRINK_TO_PAGE,
or FILL_PAGE
"""
self.val['print']['fit_to_page'] = {'type': type}
def AddPageRangeOption(self, start, end = None):
"""
Specify a range of pages to print
Args:
start: integer, Beginning of the print interval (inclusive)
end : integer, The last page of the range to print (inclusive).
If not specified, all pages after 'start' are printed
"""
# If this is the first page range for this CJT, start with an empty array;
# otherwise, get the existing array
page_ranges = ([] if 'page_range' not in self.val['print'] else
self.val['print']['page_range']['interval'])
new_range = {'start': start}
if end is not None:
new_range['end']= end
page_ranges.append(new_range)
self.val['print']['page_range']= {'interval': page_ranges}
class GCPConstants(object):
"""A class that holds constants that are used in a GCP"""
#
# CJT (Cloud Job Ticket) constants
#
# Color scheme
MONOCHROME = 'STANDARD_MONOCHROME'
COLOR = 'STANDARD_COLOR'
# Page orientation
LANDSCAPE = 'LANDSCAPE'
PORTRAIT = 'PORTRAIT'
# Duplexing
LONG_EDGE = 'LONG_EDGE'
SHORT_EDGE = 'SHORT_EDGE'
# Page fit
NO_FIT = 'NO_FITTING'
FIT = 'FIT_TO_PAGE'
GROW = 'GROW_TO_PAGE'
SHRINK = 'SHRINK_TO_PAGE'
FILL = 'FILL_PAGE'
# A4 size in microns
A4_HEIGHT = 297000
A4_WIDTH = 210000
#
# CJS (Cloud Job State) constants
#
DRAFT = 'DRAFT'
HELD = 'HELD'
QUEUED = 'QUEUED'
IN_PROGRESS = 'IN_PROGRESS'
STOPPED = 'STOPPED'
DONE = 'DONE'
ABORTED = 'ABORTED'
| apache-2.0 | -4,244,256,495,647,302,000 | 26.842932 | 80 | 0.618842 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.