repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
anaran/olympia
|
lib/misc/tests/test_log.py
|
8
|
3836
|
import logging
from django.conf import settings
from nose.tools import eq_
from heka.config import client_from_dict_config
import amo.tests
import commonware.log
from lib.log_settings_base import error_fmt
cfg = {
'version': 1,
'formatters': {
'error': {
'()': commonware.log.Formatter,
'datefmt': '%H:%M:%S',
'format': ('%s: [%%(USERNAME)s][%%(REMOTE_ADDR)s] %s'
% (settings.SYSLOG_TAG, error_fmt)),
},
},
'handlers': {
'test_syslog': {
'class': 'lib.misc.admin_log.ErrorSyslogHandler',
'formatter': 'error',
},
},
'loggers': {
'test.lib.misc.logging': {
'handlers': ['test_syslog'],
'level': 'ERROR',
'propagate': False,
},
},
}
class TestHekaStdLibLogging(amo.tests.TestCase):
"""
The StdLibLoggingStream is only used for *debugging* purposes.
Some detail is lost when you write out to a StdLibLoggingStream -
specifically the logging level.
"""
def setUp(self):
HEKA_CONF = {
'encoder': 'heka.encoders.StdlibPayloadEncoder',
'stream': {
'class': 'heka.streams.logging.StdLibLoggingStream',
'logger_name': 'z.heka',
}
}
self.heka = client_from_dict_config(HEKA_CONF)
self.logger = logging.getLogger('z.heka')
"""
When logging.config.dictConfig is used to configure logging
with a 'one-shot' config dictionary, any previously
instantiated singleton loggers (ie: all old loggers not in
the new config) will be explicitly disabled.
"""
self.logger.disabled = False
self._orig_handlers = self.logger.handlers
self.handler = logging.handlers.BufferingHandler(65536)
self.logger.handlers = [self.handler]
def tearDown(self):
self.logger.handlers = self._orig_handlers
def test_oldstyle_sends_msg(self):
msg = 'an error'
self.heka.error(msg)
logrecord = self.handler.buffer[-1]
self.assertEqual(logrecord.msg, "oldstyle: %s" % msg)
eq_(logrecord.levelno, logging.ERROR)
msg = 'info'
self.heka.info(msg)
logrecord = self.handler.buffer[-1]
self.assertEqual(logrecord.msg, "oldstyle: %s" % msg)
self.assertEqual(logrecord.levelname, 'INFO')
msg = 'warn'
self.heka.warn(msg)
logrecord = self.handler.buffer[-1]
eq_(logrecord.msg, "oldstyle: %s" % msg)
eq_(logrecord.levelno, logging.WARN)
# debug shouldn't log
eq_(logrecord, self.handler.buffer[-1])
def test_other_sends_json(self):
timer = 'footimer'
elapsed = 4
self.heka.timer_send(timer, elapsed)
logrecord = self.handler.buffer[-1]
# Note that the face that this is a timer is lost entirely
eq_(logrecord.levelno, logging.INFO)
eq_(logrecord.msg, "timer: %s" % str(elapsed))
class TestRaven(amo.tests.TestCase):
def setUp(self):
"""
We need to set the settings.HEKA instance to use a
DebugCaptureStream so that we can inspect the sent messages.
"""
heka = settings.HEKA
HEKA_CONF = {
'logger': 'zamboni',
'stream': {'class': 'heka.streams.DebugCaptureStream'},
'encoder': 'heka.encoders.NullEncoder'
}
from heka.config import client_from_dict_config
self.heka = client_from_dict_config(HEKA_CONF, heka)
def test_send_raven(self):
try:
1 / 0
except:
self.heka.raven('blah')
eq_(len(self.heka.stream.msgs), 1)
msg = self.heka.stream.msgs[0]
eq_(msg.type, 'sentry')
|
bsd-3-clause
|
sloanyang/android_external_webkit
|
Tools/Scripts/webkitpy/common/net/testoutputset_unittest.py
|
15
|
6278
|
#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from webkitpy.common.system.zip_mock import MockZip
import testoutputset
import unittest
class TestOutputSetTest(unittest.TestCase):
def _outputset_with_zip(self, zip, **kwargs):
return testoutputset.TestOutputSet('<fake-outputset>', '<fake-platform>', zip, **kwargs)
def test_text_files_get_interpreted_as_text_outputs(self):
zip = MockZip()
zip.insert('fast/dom/some-test-actual.txt', 'actual outputs')
b = self._outputset_with_zip(zip)
self.assertEquals(1, len(b.outputs_for('fast/dom/some-test')))
self.assertEquals('fast/dom/some-test', b.outputs_for('fast/dom/some-test.html')[0].name())
def test_image_and_checksum_files_get_interpreted_as_a_single_image_output(self):
zip = MockZip()
zip.insert('fast/dom/some-test-actual.checksum', 'abc123')
zip.insert('fast/dom/some-test-actual.png', '<image data>')
b = self._outputset_with_zip(zip)
outputs = b.outputs_for('fast/dom/some-test')
self.assertEquals(1, len(outputs))
output = outputs[0]
self.assertEquals('image', output.type())
self.assertEquals('abc123', output.checksum())
def test_multiple_image_outputs_are_detected(self):
zip = MockZip()
zip.insert('platform/win/fast/dom/some-test-actual.checksum', 'checksum1')
zip.insert('platform/win/fast/dom/some-test-actual.png', '<image data 1>')
zip.insert('platform/mac/fast/dom/some-test-actual.checksum', 'checksum2')
zip.insert('platform/mac/fast/dom/some-test-actual.png', '<image data 2>')
b = self._outputset_with_zip(zip)
outputs = b.outputs_for('fast/dom/some-test')
self.assertEquals(2, len(outputs))
self.assertFalse(outputs[0].same_content(outputs[1]))
def test_aggregate_output_set_correctly_retrieves_tests_from_multiple_output_sets(self):
outputset1_zip = MockZip()
outputset1_zip.insert('fast/dom/test-actual.txt', 'linux text output')
outputset1 = testoutputset.TestOutputSet('linux-outputset', 'linux', outputset1_zip)
outputset2_zip = MockZip()
outputset2_zip.insert('fast/dom/test-actual.txt', 'windows text output')
outputset2 = testoutputset.TestOutputSet('win-outputset', 'win', outputset2_zip)
b = testoutputset.AggregateTestOutputSet([outputset1, outputset2])
self.assertEquals(2, len(b.outputs_for('fast/dom/test')))
def test_can_infer_platform_from_path_if_none_provided(self):
# FIXME: unclear what the right behavior on win32 is.
# https://bugs.webkit.org/show_bug.cgi?id=54525.
if sys.platform == 'win32':
return
zip = MockZip()
zip.insert('platform/win/some-test-expected.png', '<image data>')
zip.insert('platform/win/some-test-expected.checksum', 'abc123')
b = testoutputset.TestOutputSet('local LayoutTests outputset', None, zip)
outputs = b.outputs_for('some-test')
self.assertEquals(1, len(outputs))
self.assertEquals('win', outputs[0].platform())
def test_test_extension_is_ignored(self):
zip = MockZip()
zip.insert('test/test-a-actual.txt', 'actual outputs')
b = self._outputset_with_zip(zip)
outputs = b.outputs_for('test/test-a.html')
self.assertEquals(1, len(outputs))
self.assertEquals('test/test-a', outputs[0].name())
def test_existing_outputs_are_marked_as_such(self):
zip = MockZip()
zip.insert('test/test-a-expected.txt', 'expected outputs')
b = self._outputset_with_zip(zip)
outputs = b.outputs_for('test/test-a.html')
self.assertEquals(1, len(outputs))
self.assertFalse(outputs[0].is_actual())
def test_only_returns_outputs_of_specified_type(self):
zip = MockZip()
zip.insert('test/test-a-expected.txt', 'expected outputs')
zip.insert('test/test-a-expected.checksum', 'expected outputs')
zip.insert('test/test-a-expected.png', 'expected outputs')
b = self._outputset_with_zip(zip)
outputs = b.outputs_for('test/test-a.html')
text_outputs = b.outputs_for('test/test-a.html', target_type='text')
image_outputs = b.outputs_for('test/test-a.html', target_type='image')
self.assertEquals(2, len(outputs))
self.assertEquals(1, len(text_outputs))
self.assertEquals(1, len(image_outputs))
self.assertEquals('text', text_outputs[0].type())
self.assertEquals('image', image_outputs[0].type())
def test_exclude_expected_outputs_works(self):
zip = MockZip()
zip.insert('test-expected.txt', 'expected outputs stored on server for some reason')
b = self._outputset_with_zip(zip, include_expected=False)
outputs = b.outputs_for('test', target_type=None)
self.assertEquals(0, len(outputs))
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
|
mmnelemane/neutron
|
neutron/plugins/ml2/drivers/openvswitch/agent/main.py
|
13
|
1564
|
# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014 Fumihiko Kakuma <kakuma at valinux co jp>
# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from neutron.common import config as common_config
from neutron.common import utils as n_utils
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
'common.config')
_main_modules = {
'ovs-ofctl': 'neutron.plugins.ml2.drivers.openvswitch.agent.openflow.'
'ovs_ofctl.main',
}
def main():
common_config.init(sys.argv[1:])
driver_name = cfg.CONF.OVS.of_interface
mod_name = _main_modules[driver_name]
mod = importutils.import_module(mod_name)
mod.init_config()
common_config.setup_logging()
n_utils.log_opt_values(LOG)
mod.main()
|
apache-2.0
|
wilsonxiao/machinekit
|
configs/sim/axis/orphans/pysubs/customtask.py
|
28
|
20532
|
import os
import sys
import signal
import traceback
import emctask
import emccanon
import interpreter
import hal
import emc # ini only
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from userfuncs import UserFuncs
except ImportError:
from nulluserfuncs import UserFuncs
def debug():
return interpreter.this.debugmask & 0x00040000 # EMC_DEBUG_PYTHON_TASK
def handler(signum, frame):
''' controlled shut down
after this, emcIoHalt() will be called, too
'''
print "Python Task shutdown handler"
# this handler overrides the handler in emctaskmain, so call that as well
emctask.emctask_quit(signum)
class CustomTask(emctask.Task,UserFuncs):
def __init__(self):
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
try:
if debug(): print "py: CustomTask()"
emctask.Task.__init__(self)
self.inifile = emc.ini(emctask.ini_filename())
self.tcpins = int(self.inifile.find("TOOL", "TASK_TOOLCHANGE_PINS") or 0)
self.startchange_pins = int(self.inifile.find("TOOL", "TASK_START_CHANGE_PINS") or 0)
self.fault_pins = int(self.inifile.find("TOOL", "TASK_TOOLCHANGE_FAULT_PINS") or 0)
h = hal.component("iocontrol.0")
h.newpin("coolant-flood", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("coolant-mist", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("lube-level", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("lube", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("emc-enable-in", hal.HAL_BIT, hal.HAL_IN)
h.newpin("user-enable-out", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("user-request-enable", hal.HAL_BIT, hal.HAL_OUT)
if self.tcpins:
h.newpin("tool-change", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("tool-changed", hal.HAL_BIT, hal.HAL_IN)
h.newpin("tool-number", hal.HAL_S32, hal.HAL_OUT)
h.newpin("tool-prep-number", hal.HAL_S32, hal.HAL_OUT)
h.newpin("tool-prep-pocket", hal.HAL_S32, hal.HAL_OUT)
h.newpin("tool-prepare", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("tool-prepared", hal.HAL_BIT, hal.HAL_IN)
if self.startchange_pins:
h.newpin("start-change", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("start-change-ack", hal.HAL_BIT, hal.HAL_IN)
if self.fault_pins:
h.newpin("emc-abort", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("emc-abort-ack", hal.HAL_BIT, hal.HAL_IN)
h.newpin("emc-reason", hal.HAL_S32, hal.HAL_OUT)
h.newpin("toolchanger-fault", hal.HAL_BIT, hal.HAL_IN)
h.newpin("toolchanger-fault-ack", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("toolchanger-reason", hal.HAL_S32, hal.HAL_IN)
h.newpin("toolchanger-faulted", hal.HAL_BIT, hal.HAL_OUT)
h.newpin("toolchanger-clear-fault", hal.HAL_BIT, hal.HAL_IN)
h.ready()
self.components = dict()
self.components["iocontrol.0"] = h
self.hal = h
self.hal_init_pins()
self.io = emctask.emcstat.io
self.io.aux.estop = 1
self._callback = None
self._check = None
tt = self.io.tool.toolTable
for p in range(0,len(tt)):
tt[p].zero()
UserFuncs.__init__(self)
self.enqueue = EnqueueCall(self)
except Exception,e:
print "__init__"
print_exc_plus()
self.io.status = emctask.RCS_STATUS.RCS_ERROR
else:
self.io.status = emctask.RCS_STATUS.RCS_DONE
def emcIoInit(self):
if debug(): print "py: emcIoInit tt=",self.tooltable_filename
try:
self.io.aux.estop = 1
self.io.tool.pocketPrepped = -1;
self.io.tool.toolInSpindle = 0;
self.io.coolant.mist = 0
self.io.coolant.flood = 0
self.io.lube.on = 0
self.io.lube.level = 1
self.hal_init_pins()
# on nonrandom machines, always start by assuming the spindle is empty
if not self.random_toolchanger:
self.io.tool.toolTable[0].zero()
if self.inifile.find("TOOL", "ODBC_CONNECT"):
import sqltoolaccess
self.tt = sqltoolaccess.SqlToolAccess(self.inifile, self.random_toolchanger)
else:
import tooltable
self.tt = tooltable.EmcToolTable(self.tooltable_filename, self.random_toolchanger)
self.comments = dict()
self.fms = dict()
self.tt.load_table(self.io.tool.toolTable,self.comments,self.fms)
self.tt.restore_state(emctask.emcstat)
# self.io.tool.toolInSpindle = 2 # works
self.reload_tool_number(self.io.tool.toolInSpindle)
except Exception,e:
print "emcIoInit",e
print_exc_plus()
self.io.status = emctask.RCS_STATUS.RCS_ERROR
else:
self.io.status = emctask.RCS_STATUS.RCS_DONE
finally:
return 0
def emcToolLoadToolTable(self,file):
# triggered by UI if tooltable was edited
if debug(): print "py: emcToolLoadToolTable file = '%s'" % (file)
self.comments = dict()
self.fms = dict()
try:
self.tt.load_table(self.io.tool.toolTable,self.comments,self.fms)
except Exception,e:
print_exc_plus()
self.io.status = emctask.RCS_STATUS.RCS_ERROR
else:
self.reload_tool_number(self.io.tool.toolInSpindle)
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def prepare_complete(self):
if debug(): print "prepare complete"
self.io.tool.pocketPrepped = self.hal["tool-prep-pocket"]
self.hal["tool-prepare"] = 0
def emcToolPrepare(self,p,tool):
if debug(): print "py: emcToolPrepare p =",p,"tool =",tool
if self.random_toolchanger and (p == 0):
if debug(): print "it doesn't make sense to prep the spindle pocket"
return 0
if self.tcpins:
if self.fault_pins and self.hal["toolchanger-faulted"]:
if debug(): print "prepare: toolchanger faulted (reason=%d), next M6 will %s" % (self.hal["toolchanger-reason"], "set fault code and reason" if self.hal["toolchanger-reason"] > 0 else "abort program")
self.hal["tool-prep-pocket"] = p
if not self.random_toolchanger and (p == 0):
self.hal["tool-prep-number"] = 0
else:
self.hal["tool-prep-number"] = self.io.tool.toolTable[p].toolno
self.hal["tool-prepare"] = 1
# and tell task to wait until status changes to RCS_DONE
self.io.status = self.wait_for_named_pin(1,"iocontrol.0.tool-prepared",self.prepare_complete)
else:
self.io.tool.pocketPrepped = p
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def reload_tool_number(self, toolno):
if self.random_toolchanger: return
t = self.io.tool.toolTable
for p in range(1,len(t)):
if toolno == t[p].toolno:
self.load_tool(p)
def load_tool(self,pocket):
if self.random_toolchanger:
self.io.tool.toolTable[0],self.io.tool.toolTable[pocket] = self.io.tool.toolTable[pocket],self.io.tool.toolTable[0]
self.comments[0],self.comments[pocket] = self.comments[pocket],self.comments[0]
self.tt.save_table(self.io.tool.toolTable,self.comments,self.fms)
else:
if pocket == 0:
self.io.tool.toolTable[0].zero()
else:
self.io.tool.toolTable[0] = self.io.tool.toolTable[pocket]
def change_complete(self):
if debug(): print "change complete"
if not self.random_toolchanger and (self.io.tool.pocketPrepped == 0):
self.io.tool.toolInSpindle = 0
else:
self.io.tool.toolInSpindle = self.io.tool.toolTable[self.io.tool.pocketPrepped].toolno
self.hal["tool-number"] = self.io.tool.toolInSpindle
self.load_tool(self.io.tool.pocketPrepped)
self.io.tool.pocketPrepped = -1
self.hal["tool-prep-number"] = 0
self.hal["tool-prep-pocket"] = 0
self.hal["tool-change"] = 0
def emcToolLoad(self):
if debug(): print "py: emcToolLoad"
if self.random_toolchanger and (self.io.tool.pocketPrepped == 0):
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
if not self.random_toolchanger and (self.io.tool.pocketPrepped > 0) and (
self.io.tool.toolInSpindle ==
self.io.tool.toolTable[self.io.tool.pocketPrepped].toolno):
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
if self.tcpins:
if self.fault_pins and self.hal["toolchanger-faulted"]:
self.io.status = emctask.RCS_STATUS.RCS_ERROR
return 0
if self.io.tool.pocketPrepped != -1:
self.hal["tool-change"] = 1
self.io.status = self.wait_for_named_pin(1,"iocontrol.0.tool-changed",self.change_complete)
return 0
else:
if not self.random_toolchanger and (self.io.tool.pocketPrepped == 0):
self.io.tool.toolInSpindle = 0
else:
self.io.tool.toolInSpindle = self.io.tool.toolTable[self.io.tool.pocketPrepped].toolno
self.load_tool(self.io.tool.pocketPrepped)
self.io.tool.pocketPrepped = -1
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcToolUnload(self):
if debug(): print "py: emcToolUnload"
self.io.tool.toolInSpindle = 0
# this isnt in ioControlv1, but I think it should be.
self.hal["tool-number"] = self.io.tool.toolInSpindle
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcToolSetNumber(self,number):
if debug(): print "py: emcToolSetNumber number =",number
self.io.tool.toolInSpindle = number
if self.tcpins:
self.hal["tool-number"] = self.io.tool.toolInSpindle
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcToolSetOffset(self,pocket,toolno,offset,diameter,frontangle,backangle,orientation):
if debug(): print "py: emcToolSetOffset", pocket,toolno,str(offset),diameter,frontangle,backangle,orientation
self.io.tool.toolTable[pocket].toolno = toolno
self.io.tool.toolTable[pocket].orientation = orientation
self.io.tool.toolTable[pocket].diameter = diameter
self.io.tool.toolTable[pocket].frontangle = frontangle
self.io.tool.toolTable[pocket].backangle = backangle
self.io.tool.toolTable[pocket].offset = offset
if debug(): print "new tool enttry: ",str(self.io.tool.toolTable[pocket])
if self.io.tool.toolInSpindle == toolno:
self.io.tool.toolTable[0] = self.io.tool.toolTable[pocket]
self.tt.save_table(self.io.tool.toolTable,self.comments,self.fms)
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcIoPluginCall(self, len, msg):
if debug(): print "py: emcIoPluginCall len=%d msg=%s" %(len,msg)
call = pickle.loads(msg)
func = getattr(self, call[0], None)
if func:
self.io.status = func(*call[1],**call[2])
else:
raise AttributeError, "no such method: " + call[0]
return 0
def emcIoHalt(self):
if debug(): print "py: emcIoHalt"
self.tt.save_state(emctask.emcstat)
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emc_abort_acked(self):
if debug(): print "emc_abort_acked"
self.hal["emc-abort"] = 0
def emcIoAbort(self,reason):
if debug(): print "py: emcIoAbort reason=",reason,"state=",emctask.emcstat.task.state
#if debug(): print "tc fault=",self.io.fault, "tc reason=",self.io.reason
self.io.coolant.mist = 0
self.io.coolant.flood = 0
if self.tcpins:
self.hal["coolant-mist"] = 0
self.hal["coolant-flood"] = 0
self.hal["tool-change"] = 0
self.hal["tool-prepare"] = 0
if self.startchange_pins:
self.hal["start-change"] = 0
if self.fault_pins:
self.hal["emc-reason"] = reason
self.hal["emc-abort"] = 1
self.io.status = self.wait_for_named_pin(1,"iocontrol.0.emc-abort-ack",self.emc_abort_acked)
return 0
if self._callback:
if debug(): print "emcIoAbort: cancelling callback to ",self._callback
self._callback = None
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def start_change_acked(self):
if debug(): print "start_change_acked"
self.hal["start-change"] = 0
def emcToolStartChange(self):
if debug(): print "py: emcToolStartChange", "wait for iocontrol.0.start-change-ack" if self.startchange_pins else "noop"
if self.startchange_pins:
self.hal["start-change"] = 1
self.io.status = self.wait_for_named_pin(1,"iocontrol.0.start-change-ack",self.start_change_acked)
return 0
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcAuxEstopOn(self):
if debug(): print "py: emcAuxEstopOn taskstate=",emctask.emcstat.task.state
self.hal["user-enable-out"] = 0
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcAuxEstopOff(self):
if debug(): print "py: emcAuxEstopOff"
self.hal["user-enable-out"] = 1
self.hal["user-request-enable"] = 1
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcCoolantMistOn(self):
if debug(): print "py: emcCoolantMistOn"
self.hal["coolant-mist"] = 1
self.io.coolant.mist = 1
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcCoolantMistOff(self):
if debug(): print "py: emcCoolantMistOff"
self.hal["coolant-mist"] = 0
self.io.coolant.mist = 0
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcCoolantFloodOn(self):
if debug(): print "py: emcCoolantFloodOn"
self.hal["coolant-flood"] = 1
self.io.coolant.flood = 1
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcCoolantFloodOff(self):
if debug(): print "py: emcCoolantFloodOff"
self.hal["coolant-flood"] = 0
self.io.coolant.flood = 0
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcLubeOn(self):
if debug(): print "py: emcLubeOn"
self.hal["lube"] = 1
self.io.lube.on = 1
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcLubeOff(self):
if debug(): print "py: emcLubeOff"
self.hal["lube"] = 0
self.io.lube.on = 0
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcIoSetDebug(self,debug):
if debug(): print "py: emcIoSetDebug debug =",debug
self.io.status = emctask.RCS_STATUS.RCS_DONE
return 0
def emcIoUpdate(self):
try:
#if debug(): print "py: emcIoUpdate"
self.hal["user-request-enable"] = 0
self.io.aux.estop = not self.hal["emc-enable-in"]
if self.fault_pins:
if self.hal["toolchanger-fault"]:
self.io.reason = self.hal["toolchanger-reason"]
self.hal["toolchanger-fault-ack"] = 1
self.hal["toolchanger-faulted"] = 1 # fault indicator latch
self.io.fault = 1
return 0
else:
self.hal["toolchanger-fault-ack"] = 0
if self.hal["toolchanger-clear-fault"]:
self.hal["toolchanger-faulted"] = 0 # reset fault indicator latch
self.io.reason = 0
if self._check:
self.io.status = self._check()
return 0
except KeyboardInterrupt: # shutting down
print "emcIoUpdate----KeyboardInterrupt:"
return -1
except Exception, e:
print "emcIoUpdate----:"
print_exc_plus()
return -1
else:
return 0
def wait_for_named_pin_callback(self):
if self._comp[self._pin] == self._value:
if debug(): print "pin %s now %d" % (self._pin, self._value)
if self._callback: self._callback()
self._check = None
self._callback = None
return emctask.RCS_STATUS.RCS_DONE
return emctask.RCS_STATUS.RCS_EXEC
def wait_for_named_pin(self,value,name,callback = None):
(component, pin) = name.rsplit('.',1)
comp = self.components[component]
if comp[pin] == value:
if debug(): print "pin: %s already at %d" % (name,value)
if callback: callback()
return emctask.RCS_STATUS.RCS_DONE
else:
if debug(): print "waiting for %s to become %d" % (name,value)
# else set up callback
self._comp = comp
self._pin = pin
self._value = value
self._check = self.wait_for_named_pin_callback
self._callback = callback
# and tell task to wait until status changes to RCS_DONE
return emctask.RCS_STATUS.RCS_EXEC
def hal_init_pins(self):
""" Sets HAL pins default values """
self.hal["user-enable-out"] = 0
self.hal["user-request-enable"] = 0
self.hal["coolant-mist"] = 0
self.hal["coolant-flood"] = 0
self.hal["lube"] = 0
if self.tcpins:
self.hal["tool-prepare"] = 0
self.hal["tool-prepared"] = 0
self.hal["tool-prep-number"] = 0
self.hal["tool-prep-pocket"] = 0
self.hal["tool-change"] = 0
self.hal["tool-number"] = 0
if self.startchange_pins:
self.hal["start-change"] = 0
if self.fault_pins:
self.hal["emc-abort"] = 0
self.hal["emc-reason"] = 0
self.hal["toolchanger-fault-ack"] = 0
self.hal["toolchanger-faulted"] = 0
# support queuing calls from Interp to Task Python methods:
# trap call, pickle a tuple of name and arguments and enqueue with canon IO_PLUGIN_CALL
class EnqueueCall(object):
def __init__(self,e):
if debug(): print "EnqueueCall.__init__()"
self._e = e
def _encode(self,*args,**kwargs):
if hasattr(self._e,self._name) and callable(getattr(self._e,self._name)):
p = pickle.dumps((self._name,args,kwargs),-1) # binary pickle
emccanon.IO_PLUGIN_CALL(int(len(p)),p)
else:
raise AttributeError,"no such Task method: " + self._name
def __getattr__(self, name):
self._name = name
return self._encode
## {{{ http://code.activestate.com/recipes/52215/ (r1)
def print_exc_plus():
"""
Print the usual traceback information, followed by a listing of all the
local variables in each frame.
"""
tb = sys.exc_info()[2]
while 1:
if not tb.tb_next:
break
tb = tb.tb_next
stack = []
f = tb.tb_frame
while f:
stack.append(f)
f = f.f_back
stack.reverse()
traceback.print_exc()
print "Locals by frame, innermost last"
for frame in stack:
print
print "Frame %s in %s at line %s" % (frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno)
for key, value in frame.f_locals.items():
print "\t%20s = " % key,
#We have to be careful not to cause a new error in our error
#printer! Calling str() on an unknown object could cause an
#error we don't want.
try:
print value
except:
print "<ERROR WHILE PRINTING VALUE>"
|
gpl-3.0
|
gavoski/audacity
|
lib-src/lv2/lv2/plugins/eg04-sampler.lv2/waflib/Tools/suncc.py
|
134
|
1378
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Utils
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_scc(conf):
v=conf.env
cc=None
if v['CC']:cc=v['CC']
elif'CC'in conf.environ:cc=conf.environ['CC']
if not cc:cc=conf.find_program('cc',var='CC')
if not cc:conf.fatal('Could not find a Sun C compiler')
cc=conf.cmd_to_list(cc)
try:
conf.cmd_and_log(cc+['-flags'])
except Exception:
conf.fatal('%r is not a Sun compiler'%cc)
v['CC']=cc
v['CC_NAME']='sun'
@conf
def scc_common_flags(conf):
v=conf.env
v['CC_SRC_F']=[]
v['CC_TGT_F']=['-c','-o']
if not v['LINK_CC']:v['LINK_CC']=v['CC']
v['CCLNK_SRC_F']=''
v['CCLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']='-Bdynamic'
v['STLIB_MARKER']='-Bstatic'
v['cprogram_PATTERN']='%s'
v['CFLAGS_cshlib']=['-Kpic','-DPIC']
v['LINKFLAGS_cshlib']=['-G']
v['cshlib_PATTERN']='lib%s.so'
v['LINKFLAGS_cstlib']=['-Bstatic']
v['cstlib_PATTERN']='lib%s.a'
def configure(conf):
conf.find_scc()
conf.find_ar()
conf.scc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
|
gpl-2.0
|
Gadal/sympy
|
sympy/liealgebras/root_system.py
|
76
|
6884
|
# -*- coding: utf-8 -*-
from .cartan_type import CartanType
from sympy.core import Basic
from sympy.core.compatibility import range
class RootSystem(Basic):
"""Represent the root system of a simple Lie algebra
Every simple Lie algebra has a unique root system. To find the root
system, we first consider the Cartan subalgebra of g, which is the maximal
abelian subalgebra, and consider the adjoint action of g on this
subalgebra. There is a root system associated with this action. Now, a
root system over a vector space V is a set of finite vectors Φ (called
roots), which satisfy:
1. The roots span V
2. The only scalar multiples of x in Φ are x and -x
3. For every x in Φ, the set Φ is closed under reflection
through the hyperplane perpendicular to x.
4. If x and y are roots in Φ, then the projection of y onto
the line through x is a half-integral multiple of x.
Now, there is a subset of Φ, which we will call Δ, such that:
1. Δ is a basis of V
2. Each root x in Φ can be written x = Σ k_y y for y in Δ
The elements of Δ are called the simple roots.
Therefore, we see that the simple roots span the root space of a given
simple Lie algebra.
References: https://en.wikipedia.org/wiki/Root_system
Lie Algebras and Representation Theory - Humphreys
"""
def __new__(cls, cartantype):
"""Create a new RootSystem object
This method assigns an attribute called cartan_type to each instance of
a RootSystem object. When an instance of RootSystem is called, it
needs an argument, which should be an instance of a simple Lie algebra.
We then take the CartanType of this argument and set it as the
cartan_type attribute of the RootSystem instance.
"""
obj = Basic.__new__(cls, cartantype)
obj.cartan_type = CartanType(cartantype)
return obj
def simple_roots(self):
"""Generate the simple roots of the Lie algebra
The rank of the Lie algebra determines the number of simple roots that
it has. This method obtains the rank of the Lie algebra, and then uses
the simple_root method from the Lie algebra classes to generate all the
simple roots.
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> roots = c.simple_roots()
>>> roots
{1: [1, -1, 0, 0], 2: [0, 1, -1, 0], 3: [0, 0, 1, -1]}
"""
n = self.cartan_type.rank()
roots = {}
for i in range(1, n+1):
root = self.cartan_type.simple_root(i)
roots[i] = root
return roots
def all_roots(self):
"""Generate all the roots of a given root system
The result is a dictionary where the keys are integer numbers. It
generates the roots by getting the dictionary of all positive roots
from the bases classes, and then taking each root, and multiplying it
by -1 and adding it to the dictionary. In this way all the negative
roots are generated.
"""
alpha = self.cartan_type.positive_roots()
keys = list(alpha.keys())
k = max(keys)
for val in keys:
k += 1
root = alpha[val]
newroot = [-x for x in root]
alpha[k] = newroot
return alpha
def root_space(self):
"""Return the span of the simple roots
The root space is the vector space spanned by the simple roots, i.e. it
is a vector space with a distinguished basis, the simple roots. This
method returns a string that represents the root space as the span of
the simple roots, alpha[1],...., alpha[n].
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.root_space()
'alpha[1] + alpha[2] + alpha[3]'
"""
n = self.cartan_type.rank()
rs = " + ".join("alpha["+str(i) +"]" for i in range(1, n+1))
return rs
def add_simple_roots(self, root1, root2):
"""Add two simple roots together
The function takes as input two integers, root1 and root2. It then
uses these integers as keys in the dictionary of simple roots, and gets
the corresponding simple roots, and then adds them together.
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> newroot = c.add_simple_roots(1, 2)
>>> newroot
[1, 0, -1, 0]
"""
alpha = self.simple_roots()
if root1 > len(alpha) or root2 > len(alpha):
raise ValueError("You've used a root that doesn't exist!")
a1 = alpha[root1]
a2 = alpha[root2]
newroot = []
length = len(a1)
for i in range(length):
newroot.append(a1[i] + a2[i])
return newroot
def add_as_roots(self, root1, root2):
"""Add two roots together if and only if their sum is also a root
It takes as input two vectors which should be roots. It then computes
their sum and checks if it is in the list of all possible roots. If it
is, it returns the sum. Otherwise it returns a string saying that the
sum is not a root.
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.add_as_roots([1, 0, -1, 0], [0, 0, 1, -1])
[1, 0, 0, -1]
>>> c.add_as_roots([1, -1, 0, 0], [0, 0, -1, 1])
'The sum of these two roots is not a root'
"""
alpha = self.all_roots()
newroot = []
for entry in range(len(root1)):
newroot.append(root1[entry] + root2[entry])
if newroot in alpha.values():
return newroot
else:
return "The sum of these two roots is not a root"
def cartan_matrix(self):
"""Cartan matrix of Lie algebra associated with this root system
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, -1, 2]])
"""
return self.cartan_type.cartan_matrix()
def dynkin_diagram(self):
"""Dynkin diagram of the Lie algebra associated with this root system
Examples
========
>>> from sympy.liealgebras.root_system import RootSystem
>>> c = RootSystem("A3")
>>> print(c.dynkin_diagram())
0---0---0
1 2 3
"""
return self.cartan_type.dynkin_diagram()
|
bsd-3-clause
|
gangadharkadam/letzbench
|
bench/app.py
|
10
|
3179
|
import os
from .utils import exec_cmd, get_frappe, check_git_for_shallow_clone, get_config, build_assets, restart_supervisor_processes, get_cmd_output
import logging
import requests
import json
logger = logging.getLogger(__name__)
def get_apps(bench='.'):
try:
with open(os.path.join(bench, 'sites', 'apps.txt')) as f:
return f.read().strip().split('\n')
except IOError:
return []
def add_to_appstxt(app, bench='.'):
apps = get_apps(bench=bench)
if app not in apps:
apps.append(app)
return write_appstxt(apps, bench=bench)
def remove_from_appstxt(app, bench='.'):
apps = get_apps(bench=bench)
if app in apps:
apps.remove(app)
return write_appstxt(apps, bench=bench)
def write_appstxt(apps, bench='.'):
with open(os.path.join(bench, 'sites', 'apps.txt'), 'w') as f:
return f.write('\n'.join(apps))
def get_app(app, git_url, branch=None, bench='.', build_asset_files=True):
logger.info('getting app {}'.format(app))
shallow_clone = '--depth 1' if check_git_for_shallow_clone() and get_config().get('shallow_clone') else ''
branch = '--branch {branch}'.format(branch=branch) if branch else ''
exec_cmd("git clone {git_url} {branch} {shallow_clone} --origin upstream {app}".format(
git_url=git_url,
app=app,
shallow_clone=shallow_clone,
branch=branch),
cwd=os.path.join(bench, 'apps'))
print 'installing', app
install_app(app, bench=bench)
if build_asset_files:
build_assets(bench=bench)
conf = get_config()
if conf.get('restart_supervisor_on_update'):
restart_supervisor_processes(bench=bench)
def new_app(app, bench='.'):
logger.info('creating new app {}'.format(app))
exec_cmd("{frappe} --make_app {apps}".format(frappe=get_frappe(bench=bench), apps=os.path.join(bench, 'apps')))
install_app(app, bench=bench)
def install_app(app, bench='.'):
logger.info('installing {}'.format(app))
conf = get_config()
find_links = '--find-links={}'.format(conf.get('wheel_cache_dir')) if conf.get('wheel_cache_dir') else ''
exec_cmd("{pip} install -q {find_links} -e {app}".format(
pip=os.path.join(bench, 'env', 'bin', 'pip'),
app=os.path.join(bench, 'apps', app),
find_links=find_links))
add_to_appstxt(app, bench=bench)
def pull_all_apps(bench='.'):
apps_dir = os.path.join(bench, 'apps')
apps = [app for app in os.listdir(apps_dir) if os.path.isdir(os.path.join(apps_dir, app))]
rebase = '--rebase' if get_config().get('rebase_on_pull') else ''
for app in apps:
app_dir = os.path.join(apps_dir, app)
if os.path.exists(os.path.join(app_dir, '.git')):
logger.info('pulling {0}'.format(app))
exec_cmd("git pull {rebase} upstream {branch}".format(rebase=rebase, branch=get_current_branch(app_dir)), cwd=app_dir)
def get_current_branch(repo_dir):
return get_cmd_output("basename $(git symbolic-ref -q HEAD)", cwd=repo_dir)
def install_apps_from_path(path, bench='.'):
apps = get_apps_json(path)
for app in apps:
get_app(app['name'], app['url'], branch=app.get('branch'), bench=bench, build_asset_files=False)
build_assets(bench=bench)
def get_apps_json(path):
if path.startswith('http'):
r = requests.get(path)
return r.json()
else:
with open(path) as f:
return json.load(f)
|
gpl-3.0
|
CIBC-Internal/itk
|
Modules/ThirdParty/pygccxml/src/pygccxml/declarations/container_traits.py
|
2
|
23629
|
# Copyright 2014-2015 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
Define a few algorithms that deal with different properties of std containers.
"""
import string
from . import cpptypes
from . import templates
from . import type_traits
from . import namespace
from . import class_declaration
from .. import utils
std_namespaces = ('std', 'stdext', '__gnu_cxx')
class defaults_eraser(object):
def __init__(self, unordered_maps_and_sets):
self.unordered_maps_and_sets = unordered_maps_and_sets
def normalize(self, type_str):
return type_str.replace(' ', '')
def replace_basic_string(self, cls_name):
# Take the lists of all possible string variations
# and clean them up by replacing ::std by std.
str_eq = [
v.replace("::std", "std") for v in
type_traits.string_equivalences]
wstr_eq = [
v.replace("::std", "std") for v in
type_traits.wstring_equivalences]
# Replace all the variations of strings by the smallest one.
strings = {
"std::string": [v for v in str_eq if not v == "std::string"],
"std::wstring": [v for v in wstr_eq if not v == "std::wstring"]}
new_name = cls_name
for short_name, long_names in strings.items():
for lname in long_names:
new_name = new_name.replace(lname, short_name)
return new_name
def decorated_call_prefix(self, cls_name, text, doit):
has_text = cls_name.startswith(text)
if has_text:
cls_name = cls_name[len(text):]
answer = doit(cls_name)
if has_text:
answer = text + answer
return answer
def decorated_call_suffix(self, cls_name, text, doit):
has_text = cls_name.endswith(text)
if has_text:
cls_name = cls_name[: len(text)]
answer = doit(cls_name)
if has_text:
answer = answer + text
return answer
def erase_call(self, cls_name):
global find_container_traits
c_traits = find_container_traits(cls_name)
if not c_traits:
return cls_name
return c_traits.remove_defaults(cls_name)
def no_std(self, cls_name):
return self.decorated_call_prefix(
cls_name, 'std::' + utils.get_tr1(cls_name), self.erase_call)
def no_stdext(self, cls_name):
return self.decorated_call_prefix(
cls_name, 'stdext::', self.no_std)
def no_gnustd(self, cls_name):
return self.decorated_call_prefix(
cls_name, '__gnu_cxx::', self.no_stdext)
def no_const(self, cls_name):
return self.decorated_call_prefix(
cls_name, 'const ', self.no_gnustd)
def no_end_const(self, cls_name):
return self.decorated_call_suffix(
cls_name, ' const', self.no_const)
def erase_recursive(self, cls_name):
return self.no_end_const(cls_name)
def erase_allocator(self, cls_name, default_allocator='std::allocator'):
cls_name = self.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if 2 != len(c_args):
return
value_type = c_args[0]
tmpl = string.Template(
"$container< $value_type, $allocator<$value_type> >")
tmpl = tmpl.substitute(
container=c_name,
value_type=value_type,
allocator=default_allocator)
if self.normalize(cls_name) == \
self.normalize(tmpl):
return templates.join(
c_name, [self.erase_recursive(value_type)])
def erase_container(self, cls_name, default_container_name='std::deque'):
cls_name = self.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if 2 != len(c_args):
return
value_type = c_args[0]
dc_no_defaults = self.erase_recursive(c_args[1])
if self.normalize(dc_no_defaults) \
!= self.normalize(
templates.join(default_container_name, [value_type])):
return
return templates.join(
c_name, [self.erase_recursive(value_type)])
def erase_container_compare(
self,
cls_name,
default_container_name='std::vector',
default_compare='std::less'):
cls_name = self.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if 3 != len(c_args):
return
dc_no_defaults = self.erase_recursive(c_args[1])
if self.normalize(dc_no_defaults) \
!= self.normalize(
templates.join(default_container_name, [c_args[0]])):
return
dcomp_no_defaults = self.erase_recursive(c_args[2])
if self.normalize(dcomp_no_defaults) \
!= self.normalize(
templates.join(default_compare, [c_args[0]])):
return
value_type = self.erase_recursive(c_args[0])
return templates.join(c_name, [value_type])
def erase_compare_allocator(
self,
cls_name,
default_compare='std::less',
default_allocator='std::allocator'):
cls_name = self.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if 3 != len(c_args):
return
value_type = c_args[0]
tmpl = string.Template(
"$container< $value_type, $compare<$value_type>, " +
"$allocator<$value_type> >")
tmpl = tmpl.substitute(
container=c_name,
value_type=value_type,
compare=default_compare,
allocator=default_allocator)
if self.normalize(cls_name) == \
self.normalize(tmpl):
return templates.join(
c_name, [self.erase_recursive(value_type)])
def erase_map_compare_allocator(
self,
cls_name,
default_compare='std::less',
default_allocator='std::allocator'):
cls_name = self.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if 4 != len(c_args):
return
key_type = c_args[0]
mapped_type = c_args[1]
tmpls = [
string.Template(
"$container< $key_type, $mapped_type, $compare<$key_type>, " +
"$allocator< std::pair< const $key_type, $mapped_type> > >"),
string.Template(
"$container< $key_type, $mapped_type, $compare<$key_type>, " +
"$allocator< std::pair< $key_type const, $mapped_type> > >"),
string.Template(
"$container< $key_type, $mapped_type, $compare<$key_type>, " +
"$allocator< std::pair< $key_type, $mapped_type> > >")]
for tmpl in tmpls:
tmpl = tmpl.substitute(
container=c_name,
key_type=key_type,
mapped_type=mapped_type,
compare=default_compare,
allocator=default_allocator)
if self.normalize(cls_name) == \
self.normalize(tmpl):
return templates.join(
c_name,
[self.erase_recursive(key_type),
self.erase_recursive(mapped_type)])
def erase_hash_allocator(self, cls_name):
cls_name = self.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if len(c_args) < 3:
return
default_less = 'std::less'
default_equal_to = 'std::equal_to'
default_allocator = 'std::allocator'
if 3 == len(c_args):
default_hash = 'hash_compare'
tmpl = (
"$container< $value_type, $hash<$value_type, " +
"$less<$value_type> >, $allocator<$value_type> >")
elif 4 == len(c_args):
default_hash = 'hash'
tmpl = (
"$container< $value_type, $hash<$value_type >, " +
"$equal_to<$value_type >, $allocator<$value_type> >")
else:
return
value_type = c_args[0]
tmpl = string.Template(tmpl)
for ns in std_namespaces:
inst = tmpl.substitute(
container=c_name,
value_type=value_type,
hash=ns + '::' + utils.get_tr1(cls_name) + default_hash,
less=default_less,
equal_to=default_equal_to,
allocator=default_allocator)
if self.normalize(cls_name) == \
self.normalize(inst):
return templates.join(
c_name, [self.erase_recursive(value_type)])
def erase_hashmap_compare_allocator(self, cls_name):
cls_name = self.replace_basic_string(cls_name)
c_name, c_args = templates.split(cls_name)
if self.unordered_maps_and_sets:
default_less_or_hash = 'std::hash'
else:
default_less_or_hash = 'std::less'
default_allocator = 'std::allocator'
default_equal_to = 'std::equal_to'
if 2 < len(c_args):
key_type = c_args[0]
mapped_type = c_args[1]
else:
return
if 4 == len(c_args):
default_hash = 'hash_compare'
tmpl = string.Template(
"$container< $key_type, $mapped_type, " +
"$hash<$key_type, $less<$key_type> >, " +
"$allocator< std::pair< const $key_type, $mapped_type> > >")
if key_type.startswith('const ') or key_type.endswith(' const'):
tmpl = string.Template(
"$container< $key_type, $mapped_type, $hash<$key_type, " +
"$less<$key_type> >, $allocator< std::pair< $key_type, " +
"$mapped_type> > >")
elif 5 == len(c_args):
default_hash = 'hash'
if self.unordered_maps_and_sets:
tmpl = string.Template(
"$container<$key_type, $mapped_type, " +
"$hash<$key_type>, " +
"$equal_to<$key_type>, " +
"$allocator<std::pair<const$key_type, " +
"$mapped_type> > >")
if key_type.startswith('const ') or \
key_type.endswith(' const'):
tmpl = string.Template(
"$container<$key_type, $mapped_type, " +
"$hash<$key_type >, " +
"$equal_to<$key_type >, " +
"$allocator<std::pair<$key_type, " +
"$mapped_type> > >")
else:
tmpl = string.Template(
"$container< $key_type, $mapped_type, "
"$hash<$key_type >, " +
"$equal_to<$key_type>, "
"$allocator< $mapped_type> >")
if key_type.startswith('const ') or \
key_type.endswith(' const'):
# TODO: this template is the same than above.
# Make sure why this was needed and if this is
# tested. There may be a const missing somewhere.
tmpl = string.Template(
"$container< $key_type, $mapped_type, " +
"$hash<$key_type >, " +
"$equal_to<$key_type>, " +
"$allocator< $mapped_type > >")
else:
return
for ns in std_namespaces:
inst = tmpl.substitute(
container=c_name,
key_type=key_type,
mapped_type=mapped_type,
hash=ns + '::' + utils.get_tr1(cls_name) + default_hash,
less=default_less_or_hash,
equal_to=default_equal_to,
allocator=default_allocator)
if self.normalize(cls_name) == self.normalize(inst):
return templates.join(
c_name,
[self.erase_recursive(key_type),
self.erase_recursive(mapped_type)])
class container_traits_impl_t(object):
"""
Implements the functionality needed for convenient work with STD container
classes
Implemented functionality:
* find out whether a declaration is STD container or not
* find out container value( mapped ) type
This class tries to be useful as much as possible. For example, for class
declaration (and not definition) it parses the class name in order to
extract the information.
"""
def __init__(
self,
container_name,
element_type_index,
element_type_typedef,
eraser,
key_type_index=None,
key_type_typedef=None,
unordered_maps_and_sets=False):
"""
:param container_name: std container name
:param element_type_index: position of value\\mapped type within
template arguments list
:param element_type_typedef: class typedef to the value\\mapped type
:param key_type_index: position of key type within template arguments
list
:param key_type_typedef: class typedef to the key type
"""
self._name = container_name
self.element_type_index = element_type_index
self.element_type_typedef = element_type_typedef
self.key_type_index = key_type_index
self.key_type_typedef = key_type_typedef
self.unordered_maps_and_sets = unordered_maps_and_sets
# Get the method from defaults_eraser using it's name
self.remove_defaults_impl = getattr(
defaults_eraser(unordered_maps_and_sets), eraser)
def name(self):
return self._name
def get_container_or_none(self, type_):
"""
Returns reference to the class declaration or None.
"""
type_ = type_traits.remove_alias(type_)
type_ = type_traits.remove_cv(type_)
utils.loggers.queries_engine.debug(
"Container traits: cleaned up search %s" % type_)
if isinstance(type_, cpptypes.declarated_t):
cls_declaration = type_traits.remove_alias(type_.declaration)
elif isinstance(type_, class_declaration.class_t):
cls_declaration = type_
elif isinstance(type_, class_declaration.class_declaration_t):
cls_declaration = type_
else:
utils.loggers.queries_engine.debug(
"Container traits: returning None, type not known\n")
return
if not cls_declaration.name.startswith(self.name() + '<'):
utils.loggers.queries_engine.debug(
"Container traits: returning None, " +
"declaration starts with " + self.name() + '<\n')
return
# When using libstd++, some container traits are defined in
# std::tr1::. See remove_template_defaults_tester.py.
# In this case the is_defined_in_xxx test needs to be done
# on the parent
decl = cls_declaration
if isinstance(type_, class_declaration.class_declaration_t):
is_ns = isinstance(type_.parent, namespace.namespace_t)
if is_ns and type_.parent.name == "tr1":
decl = cls_declaration.parent
elif isinstance(type_, cpptypes.declarated_t):
is_ns = isinstance(type_.declaration.parent, namespace.namespace_t)
if is_ns and type_.declaration.parent.name == "tr1":
decl = cls_declaration.parent
for ns in std_namespaces:
if type_traits.impl_details.is_defined_in_xxx(ns, decl):
utils.loggers.queries_engine.debug(
"Container traits: get_container_or_none() will return " +
cls_declaration.name)
# The is_defined_in_xxx check is done on decl, but we return
# the original declation so that the rest of the algorithm
# is able to work with it.
return cls_declaration
# This should not happen
utils.loggers.queries_engine.debug(
"Container traits: get_container_or_none() will return None\n")
def is_my_case(self, type_):
"""
Checks, whether type is STD container or not.
"""
return bool(self.get_container_or_none(type_))
def class_declaration(self, type_):
"""
Returns reference to the class declaration.
"""
utils.loggers.queries_engine.debug(
"Container traits: searching class declaration for %s" % type_)
cls_declaration = self.get_container_or_none(type_)
if not cls_declaration:
raise TypeError(
'Type "%s" is not instantiation of std::%s' %
(type_.decl_string, self.name()))
return cls_declaration
def is_sequence(self, type_):
# raise exception if type is not container
self.class_declaration(type_)
return self.key_type_index is None
def is_mapping(self, type_):
return not self.is_sequence(type_)
def __find_xxx_type(
self,
type_,
xxx_index,
xxx_typedef,
cache_property_name):
cls_declaration = self.class_declaration(type_)
result = getattr(cls_declaration.cache, cache_property_name)
if not result:
if isinstance(cls_declaration, class_declaration.class_t):
xxx_type = cls_declaration.typedef(
xxx_typedef, recursive=False).type
result = type_traits.remove_declarated(xxx_type)
else:
xxx_type_str = templates.args(cls_declaration.name)[xxx_index]
result = type_traits.impl_details.find_value_type(
cls_declaration.top_parent, xxx_type_str)
if None is result:
raise RuntimeError(
"Unable to find out %s '%s' key\\value type." %
(self.name(), cls_declaration.decl_string))
setattr(cls_declaration.cache, cache_property_name, result)
return result
def element_type(self, type_):
"""returns reference to the class value\\mapped type declaration"""
return self.__find_xxx_type(
type_,
self.element_type_index,
self.element_type_typedef,
'container_element_type')
def key_type(self, type_):
"""returns reference to the class key type declaration"""
if not self.is_mapping(type_):
raise TypeError(
'Type "%s" is not "mapping" container' %
str(type_))
return self.__find_xxx_type(
type_,
self.key_type_index,
self.key_type_typedef,
'container_key_type')
def remove_defaults(self, type_or_string):
"""
Removes template defaults from a templated class instantiation.
For example:
.. code-block:: c++
std::vector< int, std::allocator< int > >
will become:
.. code-block:: c++
std::vector< int >
"""
name = type_or_string
if not utils.is_str(type_or_string):
name = self.class_declaration(type_or_string).name
if not self.remove_defaults_impl:
return name
no_defaults = self.remove_defaults_impl(name)
if not no_defaults:
return name
else:
return no_defaults
list_traits = container_traits_impl_t(
'list',
0,
'value_type',
'erase_allocator')
deque_traits = container_traits_impl_t(
'deque',
0,
'value_type',
'erase_allocator')
queue_traits = container_traits_impl_t(
'queue',
0,
'value_type',
'erase_container')
priority_queue_traits = container_traits_impl_t(
'priority_queue',
0,
'value_type',
'erase_container_compare')
vector_traits = container_traits_impl_t(
'vector',
0,
'value_type',
'erase_allocator')
stack_traits = container_traits_impl_t(
'stack',
0,
'value_type',
'erase_container')
map_traits = container_traits_impl_t(
'map',
1,
'mapped_type',
'erase_map_compare_allocator',
key_type_index=0,
key_type_typedef='key_type')
multimap_traits = container_traits_impl_t(
'multimap',
1,
'mapped_type',
'erase_map_compare_allocator',
key_type_index=0,
key_type_typedef='key_type')
hash_map_traits = container_traits_impl_t(
'hash_map',
1,
'mapped_type',
'erase_hashmap_compare_allocator',
key_type_index=0,
key_type_typedef='key_type')
hash_multimap_traits = container_traits_impl_t(
'hash_multimap',
1,
'mapped_type',
'erase_hashmap_compare_allocator',
key_type_index=0,
key_type_typedef='key_type')
set_traits = container_traits_impl_t(
'set',
0,
'value_type',
'erase_compare_allocator')
multiset_traits = container_traits_impl_t(
'multiset',
0,
'value_type',
'erase_compare_allocator')
hash_set_traits = container_traits_impl_t(
'hash_set',
0,
'value_type',
'erase_hash_allocator')
hash_multiset_traits = container_traits_impl_t(
'hash_multiset',
0,
'value_type',
'erase_hash_allocator')
# c++11 equivalents for clang
unordered_map_traits = container_traits_impl_t(
'unordered_map',
1,
'mapped_type',
'erase_hashmap_compare_allocator',
key_type_index=0,
key_type_typedef='key_type',
unordered_maps_and_sets=True)
unordered_multimap_traits = container_traits_impl_t(
'unordered_multimap',
1,
'mapped_type',
'erase_hashmap_compare_allocator',
key_type_index=0,
key_type_typedef='key_type',
unordered_maps_and_sets=True)
unordered_set_traits = container_traits_impl_t(
'unordered_set',
1,
'value_type',
'erase_hash_allocator',
unordered_maps_and_sets=True)
unordered_multiset_traits = container_traits_impl_t(
'unordered_multiset',
1,
'value_type',
'erase_hash_allocator',
unordered_maps_and_sets=True)
container_traits = (
list_traits,
deque_traits,
queue_traits,
priority_queue_traits,
vector_traits,
stack_traits,
map_traits,
multimap_traits,
hash_map_traits,
hash_multimap_traits,
set_traits,
hash_set_traits,
multiset_traits,
hash_multiset_traits,
unordered_map_traits,
unordered_multimap_traits,
unordered_set_traits,
unordered_multiset_traits)
"""tuple of all STD container traits classes"""
def find_container_traits(cls_or_string):
if utils.is_str(cls_or_string):
if not templates.is_instantiation(cls_or_string):
return None
name = templates.name(cls_or_string)
if name.startswith('std::'):
name = name[len('std::'):]
if name.startswith('std::tr1::'):
name = name[len('std::tr1::'):]
for cls_traits in container_traits:
if cls_traits.name() == name:
return cls_traits
else:
for cls_traits in container_traits:
if cls_traits.is_my_case(cls_or_string):
return cls_traits
|
apache-2.0
|
DavidAlphaFox/mysql-router
|
mysql_harness/ext/gtest/scripts/gen_gtest_pred_impl.py
|
2538
|
21986
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email [email protected] if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
|
gpl-2.0
|
pgonda/servo
|
tests/wpt/harness/wptrunner/browsers/base.py
|
195
|
4635
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import platform
import socket
from abc import ABCMeta, abstractmethod
from ..wptcommandline import require_arg
here = os.path.split(__file__)[0]
def cmd_arg(name, value=None):
prefix = "-" if platform.system() == "Windows" else "--"
rv = prefix + name
if value is not None:
rv += "=" + value
return rv
def get_free_port(start_port, exclude=None):
"""Get the first port number after start_port (inclusive) that is
not currently bound.
:param start_port: Integer port number at which to start testing.
:param exclude: Set of port numbers to skip"""
port = start_port
while True:
if exclude and port in exclude:
port += 1
continue
s = socket.socket()
try:
s.bind(("127.0.0.1", port))
except socket.error:
port += 1
else:
return port
finally:
s.close()
def browser_command(binary, args, debug_info):
if debug_info:
if debug_info.requiresEscapedArgs:
args = [item.replace("&", "\\&") for item in args]
debug_args = [debug_info.path] + debug_info.args
else:
debug_args = []
command = [binary] + args
return debug_args, command
class BrowserError(Exception):
pass
class Browser(object):
__metaclass__ = ABCMeta
process_cls = None
init_timeout = 30
def __init__(self, logger):
"""Abstract class serving as the basis for Browser implementations.
The Browser is used in the TestRunnerManager to start and stop the browser
process, and to check the state of that process. This class also acts as a
context manager, enabling it to do browser-specific setup at the start of
the testrun and cleanup after the run is complete.
:param logger: Structured logger to use for output.
"""
self.logger = logger
def __enter__(self):
self.setup()
return self
def __exit__(self, *args, **kwargs):
self.cleanup()
def setup(self):
"""Used for browser-specific setup that happens at the start of a test run"""
pass
@abstractmethod
def start(self):
"""Launch the browser object and get it into a state where is is ready to run tests"""
pass
@abstractmethod
def stop(self):
"""Stop the running browser process."""
pass
@abstractmethod
def pid(self):
"""pid of the browser process or None if there is no pid"""
pass
@abstractmethod
def is_alive(self):
"""Boolean indicating whether the browser process is still running"""
pass
def setup_ssl(self, hosts):
"""Return a certificate to use for tests requiring ssl that will be trusted by the browser"""
raise NotImplementedError("ssl testing not supported")
def cleanup(self):
"""Browser-specific cleanup that is run after the testrun is finished"""
pass
def executor_browser(self):
"""Returns the ExecutorBrowser subclass for this Browser subclass and the keyword arguments
with which it should be instantiated"""
return ExecutorBrowser, {}
def log_crash(self, process, test):
"""Return a list of dictionaries containing information about crashes that happend
in the browser, or an empty list if no crashes occurred"""
self.logger.crash(process, test)
class NullBrowser(Browser):
def start(self):
"""No-op browser to use in scenarios where the TestRunnerManager shouldn't
actually own the browser process (e.g. Servo where we start one browser
per test)"""
pass
def stop(self):
pass
def pid(self):
return None
def is_alive(self):
return True
def on_output(self, line):
raise NotImplementedError
class ExecutorBrowser(object):
def __init__(self, **kwargs):
"""View of the Browser used by the Executor object.
This is needed because the Executor runs in a child process and
we can't ship Browser instances between processes on Windows.
Typically this will have a few product-specific properties set,
but in some cases it may have more elaborate methods for setting
up the browser from the runner process.
"""
for k, v in kwargs.iteritems():
setattr(self, k, v)
|
mpl-2.0
|
hyOzd/cadquery
|
examples/FreeCAD/Ex007_Using_Point_Lists.py
|
5
|
1658
|
#File: Ex007_Using_Point_Lists.py
#To use this example file, you need to first follow the "Using CadQuery From Inside FreeCAD"
#instructions here: https://github.com/dcowden/cadquery#installing----using-cadquery-from-inside-freecad
#You run this example by typing the following in the FreeCAD python console, making sure to change
#the path to this example, and the name of the example appropriately.
#import sys
#sys.path.append('/home/user/Downloads/cadquery/examples/FreeCAD')
#import Ex007_Using_Point_Lists
#If you need to reload the part after making a change, you can use the following lines within the FreeCAD console.
#reload(Ex007_Using_Point_Lists)
#You'll need to delete the original shape that was created, and the new shape should be named sequentially
# (Shape001, etc).
#You can also tie these blocks of code to macros, buttons, and keybindings in FreeCAD for quicker access.
#You can get a more information on this example at
# http://parametricparts.com/docs/examples.html#an-extruded-prismatic-solid
import cadquery
import Part
#The dimensions of the model. These can be modified rather than changing the box's code directly.
plate_radius = 2.0
hole_pattern_radius = 0.25
thickness = 0.125
#Make the plate with 4 holes in it at various points
r = cadquery.Workplane("front").circle(plate_radius) # Make the base
r = r.pushPoints([(1.5, 0), (0, 1.5), (-1.5, 0), (0, -1.5)]) # Now four points are on the stack
r = r.circle(hole_pattern_radius) # Circle will operate on all four points
result = r.extrude(thickness)
#Boiler plate code to render our solid in FreeCAD's GUI
Part.show(result.toFreecad())
|
lgpl-3.0
|
HybridF5/tempest
|
tempest/api/object_storage/test_object_formpost.py
|
21
|
4499
|
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import hmac
import time
from six.moves.urllib import parse as urlparse
from tempest.api.object_storage import base
from tempest.common.utils import data_utils
from tempest import test
class ObjectFormPostTest(base.BaseObjectTest):
metadata = {}
containers = []
@classmethod
def resource_setup(cls):
super(ObjectFormPostTest, cls).resource_setup()
cls.container_name = data_utils.rand_name(name='TestContainer')
cls.object_name = data_utils.rand_name(name='ObjectTemp')
cls.container_client.create_container(cls.container_name)
cls.containers = [cls.container_name]
cls.key = 'Meta'
cls.metadata = {'Temp-URL-Key': cls.key}
cls.account_client.create_account_metadata(metadata=cls.metadata)
def setUp(self):
super(ObjectFormPostTest, self).setUp()
# make sure the metadata has been set
account_client_metadata, _ = \
self.account_client.list_account_metadata()
self.assertIn('x-account-meta-temp-url-key',
account_client_metadata)
self.assertEqual(
account_client_metadata['x-account-meta-temp-url-key'],
self.key)
@classmethod
def resource_cleanup(cls):
cls.account_client.delete_account_metadata(metadata=cls.metadata)
cls.delete_containers(cls.containers)
super(ObjectFormPostTest, cls).resource_cleanup()
def get_multipart_form(self, expires=600):
path = "%s/%s/%s" % (
urlparse.urlparse(self.container_client.base_url).path,
self.container_name,
self.object_name)
redirect = ''
max_file_size = 104857600
max_file_count = 10
expires += int(time.time())
hmac_body = '%s\n%s\n%s\n%s\n%s' % (path,
redirect,
max_file_size,
max_file_count,
expires)
signature = hmac.new(self.key, hmac_body, hashlib.sha1).hexdigest()
fields = {'redirect': redirect,
'max_file_size': str(max_file_size),
'max_file_count': str(max_file_count),
'expires': str(expires),
'signature': signature}
boundary = '--boundary--'
data = []
for (key, value) in fields.items():
data.append('--' + boundary)
data.append('Content-Disposition: form-data; name="%s"' % key)
data.append('')
data.append(value)
data.append('--' + boundary)
data.append('Content-Disposition: form-data; '
'name="file1"; filename="testfile"')
data.append('Content-Type: application/octet-stream')
data.append('')
data.append('hello world')
data.append('--' + boundary + '--')
data.append('')
body = '\r\n'.join(data)
content_type = 'multipart/form-data; boundary=%s' % boundary
return body, content_type
@test.idempotent_id('80fac02b-6e54-4f7b-be0d-a965b5cbef76')
@test.requires_ext(extension='formpost', service='object')
def test_post_object_using_form(self):
body, content_type = self.get_multipart_form()
headers = {'Content-Type': content_type,
'Content-Length': str(len(body))}
url = "%s/%s" % (self.container_name, self.object_name)
resp, body = self.object_client.post(url, body, headers=headers)
self.assertHeaders(resp, "Object", "POST")
# Ensure object is available
resp, body = self.object_client.get("%s/%s%s" % (
self.container_name, self.object_name, "testfile"))
self.assertHeaders(resp, "Object", "GET")
self.assertEqual(body, "hello world")
|
apache-2.0
|
ibinti/intellij-community
|
python/lib/Lib/site-packages/django/contrib/formtools/tests/__init__.py
|
71
|
13996
|
import os
from django import forms, http
from django.conf import settings
from django.contrib.formtools import preview, wizard, utils
from django.test import TestCase
from django.utils import unittest
success_string = "Done was called!"
class TestFormPreview(preview.FormPreview):
def get_context(self, request, form):
context = super(TestFormPreview, self).get_context(request, form)
context.update({'custom_context': True})
return context
def get_initial(self, request):
return {'field1': 'Works!'}
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
class TestForm(forms.Form):
field1 = forms.CharField()
field1_ = forms.CharField()
bool1 = forms.BooleanField(required=False)
class UserSecuredFormPreview(TestFormPreview):
"""
FormPreview with a custum security_hash method
"""
def security_hash(self, request, form):
return "123"
class PreviewTests(TestCase):
urls = 'django.contrib.formtools.tests.urls'
def setUp(self):
# Create a FormPreview instance to share between tests
self.preview = preview.FormPreview(TestForm)
input_template = '<input type="hidden" name="%s" value="%s" />'
self.input = input_template % (self.preview.unused_name('stage'), "%d")
self.test_data = {'field1':u'foo', 'field1_':u'asdf'}
def test_unused_name(self):
"""
Verifies name mangling to get uniue field name.
"""
self.assertEqual(self.preview.unused_name('field1'), 'field1__')
def test_form_get(self):
"""
Test contrib.formtools.preview form retrieval.
Use the client library to see if we can sucessfully retrieve
the form (mostly testing the setup ROOT_URLCONF
process). Verify that an additional hidden input field
is created to manage the stage.
"""
response = self.client.get('/test1/')
stage = self.input % 1
self.assertContains(response, stage, 1)
self.assertEquals(response.context['custom_context'], True)
self.assertEquals(response.context['form'].initial, {'field1': 'Works!'})
def test_form_preview(self):
"""
Test contrib.formtools.preview form preview rendering.
Use the client library to POST to the form to see if a preview
is returned. If we do get a form back check that the hidden
value is correctly managing the state of the form.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage': 1})
response = self.client.post('/test1/', self.test_data)
# Check to confirm stage is set to 2 in output form.
stage = self.input % 2
self.assertContains(response, stage, 1)
def test_form_submit(self):
"""
Test contrib.formtools.preview form submittal.
Use the client library to POST to the form with stage set to 3
to see if our forms done() method is called. Check first
without the security hash, verify failure, retry with security
hash and verify sucess.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage':2})
response = self.client.post('/test1/', self.test_data)
self.assertNotEqual(response.content, success_string)
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/test1/', self.test_data)
self.assertEqual(response.content, success_string)
def test_bool_submit(self):
"""
Test contrib.formtools.preview form submittal when form contains:
BooleanField(required=False)
Ticket: #6209 - When an unchecked BooleanField is previewed, the preview
form's hash would be computed with no value for ``bool1``. However, when
the preview form is rendered, the unchecked hidden BooleanField would be
rendered with the string value 'False'. So when the preview form is
resubmitted, the hash would be computed with the value 'False' for
``bool1``. We need to make sure the hashes are the same in both cases.
"""
self.test_data.update({'stage':2})
hash = self.preview.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash':hash, 'bool1':u'False'})
response = self.client.post('/test1/', self.test_data)
self.assertEqual(response.content, success_string)
def test_form_submit_django12_hash(self):
"""
Test contrib.formtools.preview form submittal, using the hash function
used in Django 1.2
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage':2})
response = self.client.post('/test1/', self.test_data)
self.assertNotEqual(response.content, success_string)
hash = utils.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/test1/', self.test_data)
self.assertEqual(response.content, success_string)
def test_form_submit_django12_hash_custom_hash(self):
"""
Test contrib.formtools.preview form submittal, using the hash function
used in Django 1.2 and a custom security_hash method.
"""
# Pass strings for form submittal and add stage variable to
# show we previously saw first stage of the form.
self.test_data.update({'stage':2})
response = self.client.post('/test2/', self.test_data)
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.content, success_string)
hash = utils.security_hash(None, TestForm(self.test_data))
self.test_data.update({'hash': hash})
response = self.client.post('/test2/', self.test_data)
self.assertNotEqual(response.content, success_string)
class SecurityHashTests(unittest.TestCase):
def test_textfield_hash(self):
"""
Regression test for #10034: the hash generation function should ignore
leading/trailing whitespace so as to be friendly to broken browsers that
submit it (usually in textareas).
"""
f1 = HashTestForm({'name': 'joe', 'bio': 'Nothing notable.'})
f2 = HashTestForm({'name': ' joe', 'bio': 'Nothing notable. '})
hash1 = utils.security_hash(None, f1)
hash2 = utils.security_hash(None, f2)
self.assertEqual(hash1, hash2)
def test_empty_permitted(self):
"""
Regression test for #10643: the security hash should allow forms with
empty_permitted = True, or forms where data has not changed.
"""
f1 = HashTestBlankForm({})
f2 = HashTestForm({}, empty_permitted=True)
hash1 = utils.security_hash(None, f1)
hash2 = utils.security_hash(None, f2)
self.assertEqual(hash1, hash2)
class FormHmacTests(unittest.TestCase):
"""
Same as SecurityHashTests, but with form_hmac
"""
def test_textfield_hash(self):
"""
Regression test for #10034: the hash generation function should ignore
leading/trailing whitespace so as to be friendly to broken browsers that
submit it (usually in textareas).
"""
f1 = HashTestForm({'name': 'joe', 'bio': 'Nothing notable.'})
f2 = HashTestForm({'name': ' joe', 'bio': 'Nothing notable. '})
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertEqual(hash1, hash2)
def test_empty_permitted(self):
"""
Regression test for #10643: the security hash should allow forms with
empty_permitted = True, or forms where data has not changed.
"""
f1 = HashTestBlankForm({})
f2 = HashTestForm({}, empty_permitted=True)
hash1 = utils.form_hmac(f1)
hash2 = utils.form_hmac(f2)
self.assertEqual(hash1, hash2)
class HashTestForm(forms.Form):
name = forms.CharField()
bio = forms.CharField()
class HashTestBlankForm(forms.Form):
name = forms.CharField(required=False)
bio = forms.CharField(required=False)
#
# FormWizard tests
#
class WizardPageOneForm(forms.Form):
field = forms.CharField()
class WizardPageTwoForm(forms.Form):
field = forms.CharField()
class WizardPageThreeForm(forms.Form):
field = forms.CharField()
class WizardClass(wizard.FormWizard):
def get_template(self, step):
return 'formwizard/wizard.html'
def done(self, request, cleaned_data):
return http.HttpResponse(success_string)
class UserSecuredWizardClass(WizardClass):
"""
Wizard with a custum security_hash method
"""
def security_hash(self, request, form):
return "123"
class DummyRequest(http.HttpRequest):
def __init__(self, POST=None):
super(DummyRequest, self).__init__()
self.method = POST and "POST" or "GET"
if POST is not None:
self.POST.update(POST)
self._dont_enforce_csrf_checks = True
class WizardTests(TestCase):
urls = 'django.contrib.formtools.tests.urls'
def setUp(self):
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(
os.path.dirname(__file__),
'templates'
),
)
# Use a known SECRET_KEY to make security_hash tests deterministic
self.old_SECRET_KEY = settings.SECRET_KEY
settings.SECRET_KEY = "123"
def tearDown(self):
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
settings.SECRET_KEY = self.old_SECRET_KEY
def test_step_starts_at_zero(self):
"""
step should be zero for the first form
"""
response = self.client.get('/wizard/')
self.assertEquals(0, response.context['step0'])
def test_step_increments(self):
"""
step should be incremented when we go to the next page
"""
response = self.client.post('/wizard/', {"0-field":"test", "wizard_step":"0"})
self.assertEquals(1, response.context['step0'])
def test_bad_hash(self):
"""
Form should not advance if the hash is missing or bad
"""
response = self.client.post('/wizard/',
{"0-field":"test",
"1-field":"test2",
"wizard_step": "1"})
self.assertEquals(0, response.context['step0'])
def test_good_hash_django12(self):
"""
Form should advance if the hash is present and good, as calculated using
django 1.2 method.
"""
# We are hard-coding a hash value here, but that is OK, since we want to
# ensure that we don't accidentally change the algorithm.
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "2fdbefd4c0cad51509478fbacddf8b13",
"wizard_step": "1"}
response = self.client.post('/wizard/', data)
self.assertEquals(2, response.context['step0'])
def test_good_hash_django12_subclass(self):
"""
The Django 1.2 method of calulating hashes should *not* be used as a
fallback if the FormWizard subclass has provided their own method
of calculating a hash.
"""
# We are hard-coding a hash value here, but that is OK, since we want to
# ensure that we don't accidentally change the algorithm.
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "2fdbefd4c0cad51509478fbacddf8b13",
"wizard_step": "1"}
response = self.client.post('/wizard2/', data)
self.assertEquals(0, response.context['step0'])
def test_good_hash_current(self):
"""
Form should advance if the hash is present and good, as calculated using
current method.
"""
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "7e9cea465f6a10a6fb47fcea65cb9a76350c9a5c",
"wizard_step": "1"}
response = self.client.post('/wizard/', data)
self.assertEquals(2, response.context['step0'])
def test_14498(self):
"""
Regression test for ticket #14498.
"""
that = self
class WizardWithProcessStep(WizardClass):
def process_step(self, request, form, step):
that.assertTrue(hasattr(form, 'cleaned_data'))
wizard = WizardWithProcessStep([WizardPageOneForm,
WizardPageTwoForm,
WizardPageThreeForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "7e9cea465f6a10a6fb47fcea65cb9a76350c9a5c",
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
def test_14576(self):
"""
Regression test for ticket #14576.
The form of the last step is not passed to the done method.
"""
reached = [False]
that = self
class Wizard(WizardClass):
def done(self, request, form_list):
reached[0] = True
that.assertTrue(len(form_list) == 2)
wizard = Wizard([WizardPageOneForm,
WizardPageTwoForm])
data = {"0-field": "test",
"1-field": "test2",
"hash_0": "2fdbefd4c0cad51509478fbacddf8b13",
"wizard_step": "1"}
wizard(DummyRequest(POST=data))
self.assertTrue(reached[0])
|
apache-2.0
|
arleincho/python-oauth2
|
example/server.py
|
375
|
7669
|
"""
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urllib
import oauth.oauth as oauth
# fake urls for the test server
REQUEST_TOKEN_URL = 'https://photos.example.net/request_token'
ACCESS_TOKEN_URL = 'https://photos.example.net/access_token'
AUTHORIZATION_URL = 'https://photos.example.net/authorize'
CALLBACK_URL = 'http://printer.example.com/request_token_ready'
RESOURCE_URL = 'http://photos.example.net/photos'
REALM = 'http://photos.example.net/'
VERIFIER = 'verifier'
# example store for one of each thing
class MockOAuthDataStore(oauth.OAuthDataStore):
def __init__(self):
self.consumer = oauth.OAuthConsumer('key', 'secret')
self.request_token = oauth.OAuthToken('requestkey', 'requestsecret')
self.access_token = oauth.OAuthToken('accesskey', 'accesssecret')
self.nonce = 'nonce'
self.verifier = VERIFIER
def lookup_consumer(self, key):
if key == self.consumer.key:
return self.consumer
return None
def lookup_token(self, token_type, token):
token_attrib = getattr(self, '%s_token' % token_type)
if token == token_attrib.key:
## HACK
token_attrib.set_callback(CALLBACK_URL)
return token_attrib
return None
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
if oauth_token and oauth_consumer.key == self.consumer.key and (oauth_token.key == self.request_token.key or oauth_token.key == self.access_token.key) and nonce == self.nonce:
return self.nonce
return None
def fetch_request_token(self, oauth_consumer, oauth_callback):
if oauth_consumer.key == self.consumer.key:
if oauth_callback:
# want to check here if callback is sensible
# for mock store, we assume it is
self.request_token.set_callback(oauth_callback)
return self.request_token
return None
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
if oauth_consumer.key == self.consumer.key and oauth_token.key == self.request_token.key and oauth_verifier == self.verifier:
# want to check here if token is authorized
# for mock store, we assume it is
return self.access_token
return None
def authorize_request_token(self, oauth_token, user):
if oauth_token.key == self.request_token.key:
# authorize the request token in the store
# for mock store, do nothing
return self.request_token
return None
class RequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.oauth_server = oauth.OAuthServer(MockOAuthDataStore())
self.oauth_server.add_signature_method(oauth.OAuthSignatureMethod_PLAINTEXT())
self.oauth_server.add_signature_method(oauth.OAuthSignatureMethod_HMAC_SHA1())
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
# example way to send an oauth error
def send_oauth_error(self, err=None):
# send a 401 error
self.send_error(401, str(err.message))
# return the authenticate header
header = oauth.build_authenticate_header(realm=REALM)
for k, v in header.iteritems():
self.send_header(k, v)
def do_GET(self):
# debug info
#print self.command, self.path, self.headers
# get the post data (if any)
postdata = None
if self.command == 'POST':
try:
length = int(self.headers.getheader('content-length'))
postdata = self.rfile.read(length)
except:
pass
# construct the oauth request from the request parameters
oauth_request = oauth.OAuthRequest.from_request(self.command, self.path, headers=self.headers, query_string=postdata)
# request token
if self.path.startswith(REQUEST_TOKEN_URL):
try:
# create a request token
token = self.oauth_server.fetch_request_token(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the token
self.wfile.write(token.to_string())
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# user authorization
if self.path.startswith(AUTHORIZATION_URL):
try:
# get the request token
token = self.oauth_server.fetch_request_token(oauth_request)
# authorize the token (kind of does nothing for now)
token = self.oauth_server.authorize_token(token, None)
token.set_verifier(VERIFIER)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the callback url (to show server has it)
self.wfile.write(token.get_callback_url())
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# access token
if self.path.startswith(ACCESS_TOKEN_URL):
try:
# create an access token
token = self.oauth_server.fetch_access_token(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the token
self.wfile.write(token.to_string())
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
# protected resources
if self.path.startswith(RESOURCE_URL):
try:
# verify the request has been oauth authorized
consumer, token, params = self.oauth_server.verify_request(oauth_request)
# send okay response
self.send_response(200, 'OK')
self.end_headers()
# return the extra parameters - just for something to return
self.wfile.write(str(params))
except oauth.OAuthError, err:
self.send_oauth_error(err)
return
def do_POST(self):
return self.do_GET()
def main():
try:
server = HTTPServer(('', 8080), RequestHandler)
print 'Test server running...'
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
if __name__ == '__main__':
main()
|
mit
|
RedbackThomson/LoLShadow
|
sleekxmpp/plugins/google/nosave/stanza.py
|
10
|
1467
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2013 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.jid import JID
from sleekxmpp.xmlstream import ElementBase, register_stanza_plugin
class NoSave(ElementBase):
name = 'x'
namespace = 'google:nosave'
plugin_attrib = 'google_nosave'
interfaces = set(['value'])
def get_value(self):
return self._get_attr('value', '') == 'enabled'
def set_value(self, value):
self._set_attr('value', 'enabled' if value else 'disabled')
class NoSaveQuery(ElementBase):
name = 'query'
namespace = 'google:nosave'
plugin_attrib = 'google_nosave'
interfaces = set()
class Item(ElementBase):
name = 'item'
namespace = 'google:nosave'
plugin_attrib = 'item'
plugin_multi_attrib = 'items'
interfaces = set(['jid', 'source', 'value'])
def get_value(self):
return self._get_attr('value', '') == 'enabled'
def set_value(self, value):
self._set_attr('value', 'enabled' if value else 'disabled')
def get_jid(self):
return JID(self._get_attr('jid', ''))
def set_jid(self, value):
self._set_attr('jid', str(value))
def get_source(self):
return JID(self._get_attr('source', ''))
def set_source(self):
self._set_attr('source', str(value))
register_stanza_plugin(NoSaveQuery, Item)
|
mit
|
TeutoNet-Netzdienste/ansible
|
lib/ansible/runner/poller.py
|
132
|
4480
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import time
from ansible import errors
class AsyncPoller(object):
""" Manage asynchronous jobs. """
def __init__(self, results, runner):
self.runner = runner
self.results = { 'contacted': {}, 'dark': {}}
self.hosts_to_poll = []
self.completed = False
# flag to determine if at least one host was contacted
self.active = False
# True to work with the `and` below
skipped = True
jid = None
for (host, res) in results['contacted'].iteritems():
if res.get('started', False):
self.hosts_to_poll.append(host)
jid = res.get('ansible_job_id', None)
self.runner.vars_cache[host]['ansible_job_id'] = jid
self.active = True
else:
skipped = skipped and res.get('skipped', False)
self.runner.vars_cache[host]['ansible_job_id'] = ''
self.results['contacted'][host] = res
for (host, res) in results['dark'].iteritems():
self.runner.vars_cache[host]['ansible_job_id'] = ''
self.results['dark'][host] = res
if not skipped:
if jid is None:
raise errors.AnsibleError("unexpected error: unable to determine jid")
if len(self.hosts_to_poll)==0:
raise errors.AnsibleError("unexpected error: no hosts to poll")
def poll(self):
""" Poll the job status.
Returns the changes in this iteration."""
self.runner.module_name = 'async_status'
self.runner.module_args = "jid={{ansible_job_id}}"
self.runner.pattern = "*"
self.runner.background = 0
self.runner.complex_args = None
self.runner.inventory.restrict_to(self.hosts_to_poll)
results = self.runner.run()
self.runner.inventory.lift_restriction()
hosts = []
poll_results = { 'contacted': {}, 'dark': {}, 'polled': {}}
for (host, res) in results['contacted'].iteritems():
if res.get('started',False):
hosts.append(host)
poll_results['polled'][host] = res
else:
self.results['contacted'][host] = res
poll_results['contacted'][host] = res
if res.get('failed', False) or res.get('rc', 0) != 0:
self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host]['ansible_job_id'])
else:
self.runner.callbacks.on_async_ok(host, res, self.runner.vars_cache[host]['ansible_job_id'])
for (host, res) in results['dark'].iteritems():
self.results['dark'][host] = res
poll_results['dark'][host] = res
if host in self.hosts_to_poll:
self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host].get('ansible_job_id','XX'))
self.hosts_to_poll = hosts
if len(hosts)==0:
self.completed = True
return poll_results
def wait(self, seconds, poll_interval):
""" Wait a certain time for job completion, check status every poll_interval. """
# jid is None when all hosts were skipped
if not self.active:
return self.results
clock = seconds - poll_interval
while (clock >= 0 and not self.completed):
time.sleep(poll_interval)
poll_results = self.poll()
for (host, res) in poll_results['polled'].iteritems():
if res.get('started'):
self.runner.callbacks.on_async_poll(host, res, self.runner.vars_cache[host]['ansible_job_id'], clock)
clock = clock - poll_interval
return self.results
|
gpl-3.0
|
malayaleecoder/servo
|
tests/wpt/web-platform-tests/tools/pytest/testing/test_assertion.py
|
170
|
19078
|
# -*- coding: utf-8 -*-
import sys
import textwrap
import _pytest.assertion as plugin
import _pytest._code
import py
import pytest
from _pytest.assertion import reinterpret
from _pytest.assertion import util
PY3 = sys.version_info >= (3, 0)
@pytest.fixture
def mock_config():
class Config(object):
verbose = False
def getoption(self, name):
if name == 'verbose':
return self.verbose
raise KeyError('Not mocked out: %s' % name)
return Config()
def interpret(expr):
return reinterpret.reinterpret(expr, _pytest._code.Frame(sys._getframe(1)))
class TestBinReprIntegration:
def test_pytest_assertrepr_compare_called(self, testdir):
testdir.makeconftest("""
l = []
def pytest_assertrepr_compare(op, left, right):
l.append((op, left, right))
def pytest_funcarg__l(request):
return l
""")
testdir.makepyfile("""
def test_hello():
assert 0 == 1
def test_check(l):
assert l == [("==", 0, 1)]
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"*test_hello*FAIL*",
"*test_check*PASS*",
])
def callequal(left, right, verbose=False):
config = mock_config()
config.verbose = verbose
return plugin.pytest_assertrepr_compare(config, '==', left, right)
class TestAssert_reprcompare:
def test_different_types(self):
assert callequal([0, 1], 'foo') is None
def test_summary(self):
summary = callequal([0, 1], [0, 2])[0]
assert len(summary) < 65
def test_text_diff(self):
diff = callequal('spam', 'eggs')[1:]
assert '- spam' in diff
assert '+ eggs' in diff
def test_text_skipping(self):
lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs')
assert 'Skipping' in lines[1]
for line in lines:
assert 'a'*50 not in line
def test_text_skipping_verbose(self):
lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs', verbose=True)
assert '- ' + 'a'*50 + 'spam' in lines
assert '+ ' + 'a'*50 + 'eggs' in lines
def test_multiline_text_diff(self):
left = 'foo\nspam\nbar'
right = 'foo\neggs\nbar'
diff = callequal(left, right)
assert '- spam' in diff
assert '+ eggs' in diff
def test_list(self):
expl = callequal([0, 1], [0, 2])
assert len(expl) > 1
@pytest.mark.parametrize(
['left', 'right', 'expected'], [
([0, 1], [0, 2], """
Full diff:
- [0, 1]
? ^
+ [0, 2]
? ^
"""),
({0: 1}, {0: 2}, """
Full diff:
- {0: 1}
? ^
+ {0: 2}
? ^
"""),
(set([0, 1]), set([0, 2]), """
Full diff:
- set([0, 1])
? ^
+ set([0, 2])
? ^
""" if not PY3 else """
Full diff:
- {0, 1}
? ^
+ {0, 2}
? ^
""")
]
)
def test_iterable_full_diff(self, left, right, expected):
"""Test the full diff assertion failure explanation.
When verbose is False, then just a -v notice to get the diff is rendered,
when verbose is True, then ndiff of the pprint is returned.
"""
expl = callequal(left, right, verbose=False)
assert expl[-1] == 'Use -v to get the full diff'
expl = '\n'.join(callequal(left, right, verbose=True))
assert expl.endswith(textwrap.dedent(expected).strip())
def test_list_different_lenghts(self):
expl = callequal([0, 1], [0, 1, 2])
assert len(expl) > 1
expl = callequal([0, 1, 2], [0, 1])
assert len(expl) > 1
def test_dict(self):
expl = callequal({'a': 0}, {'a': 1})
assert len(expl) > 1
def test_dict_omitting(self):
lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1})
assert lines[1].startswith('Omitting 1 identical item')
assert 'Common items' not in lines
for line in lines[1:]:
assert 'b' not in line
def test_dict_omitting_verbose(self):
lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=True)
assert lines[1].startswith('Common items:')
assert 'Omitting' not in lines[1]
assert lines[2] == "{'b': 1}"
def test_set(self):
expl = callequal(set([0, 1]), set([0, 2]))
assert len(expl) > 1
def test_frozenzet(self):
expl = callequal(frozenset([0, 1]), set([0, 2]))
assert len(expl) > 1
def test_Sequence(self):
col = py.builtin._tryimport(
"collections.abc",
"collections",
"sys")
if not hasattr(col, "MutableSequence"):
pytest.skip("cannot import MutableSequence")
MutableSequence = col.MutableSequence
class TestSequence(MutableSequence): # works with a Sequence subclass
def __init__(self, iterable):
self.elements = list(iterable)
def __getitem__(self, item):
return self.elements[item]
def __len__(self):
return len(self.elements)
def __setitem__(self, item, value):
pass
def __delitem__(self, item):
pass
def insert(self, item, index):
pass
expl = callequal(TestSequence([0, 1]), list([0, 2]))
assert len(expl) > 1
def test_list_tuples(self):
expl = callequal([], [(1,2)])
assert len(expl) > 1
expl = callequal([(1,2)], [])
assert len(expl) > 1
def test_list_bad_repr(self):
class A:
def __repr__(self):
raise ValueError(42)
expl = callequal([], [A()])
assert 'ValueError' in "".join(expl)
expl = callequal({}, {'1': A()})
assert 'faulty' in "".join(expl)
def test_one_repr_empty(self):
"""
the faulty empty string repr did trigger
a unbound local error in _diff_text
"""
class A(str):
def __repr__(self):
return ''
expl = callequal(A(), '')
assert not expl
def test_repr_no_exc(self):
expl = ' '.join(callequal('foo', 'bar'))
assert 'raised in repr()' not in expl
def test_unicode(self):
left = py.builtin._totext('£€', 'utf-8')
right = py.builtin._totext('£', 'utf-8')
expl = callequal(left, right)
assert expl[0] == py.builtin._totext("'£€' == '£'", 'utf-8')
assert expl[1] == py.builtin._totext('- £€', 'utf-8')
assert expl[2] == py.builtin._totext('+ £', 'utf-8')
def test_nonascii_text(self):
"""
:issue: 877
non ascii python2 str caused a UnicodeDecodeError
"""
class A(str):
def __repr__(self):
return '\xff'
expl = callequal(A(), '1')
assert expl
def test_format_nonascii_explanation(self):
assert util.format_explanation('λ')
def test_mojibake(self):
# issue 429
left = 'e'
right = '\xc3\xa9'
if not isinstance(left, py.builtin.bytes):
left = py.builtin.bytes(left, 'utf-8')
right = py.builtin.bytes(right, 'utf-8')
expl = callequal(left, right)
for line in expl:
assert isinstance(line, py.builtin.text)
msg = py.builtin._totext('\n').join(expl)
assert msg
class TestFormatExplanation:
def test_special_chars_full(self, testdir):
# Issue 453, for the bug this would raise IndexError
testdir.makepyfile("""
def test_foo():
assert '\\n}' == ''
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines([
"*AssertionError*",
])
def test_fmt_simple(self):
expl = 'assert foo'
assert util.format_explanation(expl) == 'assert foo'
def test_fmt_where(self):
expl = '\n'.join(['assert 1',
'{1 = foo',
'} == 2'])
res = '\n'.join(['assert 1 == 2',
' + where 1 = foo'])
assert util.format_explanation(expl) == res
def test_fmt_and(self):
expl = '\n'.join(['assert 1',
'{1 = foo',
'} == 2',
'{2 = bar',
'}'])
res = '\n'.join(['assert 1 == 2',
' + where 1 = foo',
' + and 2 = bar'])
assert util.format_explanation(expl) == res
def test_fmt_where_nested(self):
expl = '\n'.join(['assert 1',
'{1 = foo',
'{foo = bar',
'}',
'} == 2'])
res = '\n'.join(['assert 1 == 2',
' + where 1 = foo',
' + where foo = bar'])
assert util.format_explanation(expl) == res
def test_fmt_newline(self):
expl = '\n'.join(['assert "foo" == "bar"',
'~- foo',
'~+ bar'])
res = '\n'.join(['assert "foo" == "bar"',
' - foo',
' + bar'])
assert util.format_explanation(expl) == res
def test_fmt_newline_escaped(self):
expl = '\n'.join(['assert foo == bar',
'baz'])
res = 'assert foo == bar\\nbaz'
assert util.format_explanation(expl) == res
def test_fmt_newline_before_where(self):
expl = '\n'.join(['the assertion message here',
'>assert 1',
'{1 = foo',
'} == 2',
'{2 = bar',
'}'])
res = '\n'.join(['the assertion message here',
'assert 1 == 2',
' + where 1 = foo',
' + and 2 = bar'])
assert util.format_explanation(expl) == res
def test_fmt_multi_newline_before_where(self):
expl = '\n'.join(['the assertion',
'~message here',
'>assert 1',
'{1 = foo',
'} == 2',
'{2 = bar',
'}'])
res = '\n'.join(['the assertion',
' message here',
'assert 1 == 2',
' + where 1 = foo',
' + and 2 = bar'])
assert util.format_explanation(expl) == res
def test_python25_compile_issue257(testdir):
testdir.makepyfile("""
def test_rewritten():
assert 1 == 2
# some comment
""")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines("""
*E*assert 1 == 2*
*1 failed*
""")
def test_rewritten(testdir):
testdir.makepyfile("""
def test_rewritten():
assert "@py_builtins" in globals()
""")
assert testdir.runpytest().ret == 0
def test_reprcompare_notin(mock_config):
detail = plugin.pytest_assertrepr_compare(
mock_config, 'not in', 'foo', 'aaafoobbb')[1:]
assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++']
def test_pytest_assertrepr_compare_integration(testdir):
testdir.makepyfile("""
def test_hello():
x = set(range(100))
y = x.copy()
y.remove(50)
assert x == y
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello():*",
"*assert x == y*",
"*E*Extra items*left*",
"*E*50*",
])
def test_sequence_comparison_uses_repr(testdir):
testdir.makepyfile("""
def test_hello():
x = set("hello x")
y = set("hello y")
assert x == y
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello():*",
"*assert x == y*",
"*E*Extra items*left*",
"*E*'x'*",
"*E*Extra items*right*",
"*E*'y'*",
])
def test_assert_compare_truncate_longmessage(monkeypatch, testdir):
testdir.makepyfile(r"""
def test_long():
a = list(range(200))
b = a[::2]
a = '\n'.join(map(str, a))
b = '\n'.join(map(str, b))
assert a == b
""")
monkeypatch.delenv('CI', raising=False)
result = testdir.runpytest()
# without -vv, truncate the message showing a few diff lines only
result.stdout.fnmatch_lines([
"*- 1",
"*- 3",
"*- 5",
"*- 7",
"*truncated (191 more lines)*use*-vv*",
])
result = testdir.runpytest('-vv')
result.stdout.fnmatch_lines([
"*- 197",
])
monkeypatch.setenv('CI', '1')
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*- 197",
])
def test_assertrepr_loaded_per_dir(testdir):
testdir.makepyfile(test_base=['def test_base(): assert 1 == 2'])
a = testdir.mkdir('a')
a_test = a.join('test_a.py')
a_test.write('def test_a(): assert 1 == 2')
a_conftest = a.join('conftest.py')
a_conftest.write('def pytest_assertrepr_compare(): return ["summary a"]')
b = testdir.mkdir('b')
b_test = b.join('test_b.py')
b_test.write('def test_b(): assert 1 == 2')
b_conftest = b.join('conftest.py')
b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]')
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'*def test_base():*',
'*E*assert 1 == 2*',
'*def test_a():*',
'*E*assert summary a*',
'*def test_b():*',
'*E*assert summary b*'])
def test_assertion_options(testdir):
testdir.makepyfile("""
def test_hello():
x = 3
assert x == 4
""")
result = testdir.runpytest()
assert "3 == 4" in result.stdout.str()
off_options = (("--no-assert",),
("--nomagic",),
("--no-assert", "--nomagic"),
("--assert=plain",),
("--assert=plain", "--no-assert"),
("--assert=plain", "--nomagic"),
("--assert=plain", "--no-assert", "--nomagic"))
for opt in off_options:
result = testdir.runpytest_subprocess(*opt)
assert "3 == 4" not in result.stdout.str()
def test_old_assert_mode(testdir):
testdir.makepyfile("""
def test_in_old_mode():
assert "@py_builtins" not in globals()
""")
result = testdir.runpytest_subprocess("--assert=reinterp")
assert result.ret == 0
def test_triple_quoted_string_issue113(testdir):
testdir.makepyfile("""
def test_hello():
assert "" == '''
'''""")
result = testdir.runpytest("--fulltrace")
result.stdout.fnmatch_lines([
"*1 failed*",
])
assert 'SyntaxError' not in result.stdout.str()
def test_traceback_failure(testdir):
p1 = testdir.makepyfile("""
def g():
return 2
def f(x):
assert x == g()
def test_onefails():
f(3)
""")
result = testdir.runpytest(p1, "--tb=long")
result.stdout.fnmatch_lines([
"*test_traceback_failure.py F",
"====* FAILURES *====",
"____*____",
"",
" def test_onefails():",
"> f(3)",
"",
"*test_*.py:6: ",
"_ _ _ *",
#"",
" def f(x):",
"> assert x == g()",
"E assert 3 == 2",
"E + where 2 = g()",
"",
"*test_traceback_failure.py:4: AssertionError"
])
result = testdir.runpytest(p1) # "auto"
result.stdout.fnmatch_lines([
"*test_traceback_failure.py F",
"====* FAILURES *====",
"____*____",
"",
" def test_onefails():",
"> f(3)",
"",
"*test_*.py:6: ",
"",
" def f(x):",
"> assert x == g()",
"E assert 3 == 2",
"E + where 2 = g()",
"",
"*test_traceback_failure.py:4: AssertionError"
])
@pytest.mark.skipif("'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')" )
def test_warn_missing(testdir):
testdir.makepyfile("")
result = testdir.run(sys.executable, "-OO", "-m", "pytest", "-h")
result.stderr.fnmatch_lines([
"*WARNING*assert statements are not executed*",
])
result = testdir.run(sys.executable, "-OO", "-m", "pytest", "--no-assert")
result.stderr.fnmatch_lines([
"*WARNING*assert statements are not executed*",
])
def test_recursion_source_decode(testdir):
testdir.makepyfile("""
def test_something():
pass
""")
testdir.makeini("""
[pytest]
python_files = *.py
""")
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines("""
<Module*>
""")
def test_AssertionError_message(testdir):
testdir.makepyfile("""
def test_hello():
x,y = 1,2
assert 0, (x,y)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*def test_hello*
*assert 0, (x,y)*
*AssertionError: (1, 2)*
""")
@pytest.mark.skipif(PY3, reason='This bug does not exist on PY3')
def test_set_with_unsortable_elements():
# issue #718
class UnsortableKey(object):
def __init__(self, name):
self.name = name
def __lt__(self, other):
raise RuntimeError()
def __repr__(self):
return 'repr({0})'.format(self.name)
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
left_set = set(UnsortableKey(str(i)) for i in range(1, 3))
right_set = set(UnsortableKey(str(i)) for i in range(2, 4))
expl = callequal(left_set, right_set, verbose=True)
# skip first line because it contains the "construction" of the set, which does not have a guaranteed order
expl = expl[1:]
dedent = textwrap.dedent("""
Extra items in the left set:
repr(1)
Extra items in the right set:
repr(3)
Full diff (fallback to calling repr on each item):
- repr(1)
repr(2)
+ repr(3)
""").strip()
assert '\n'.join(expl) == dedent
|
mpl-2.0
|
JazzeYoung/VeryDeepAutoEncoder
|
theano/gpuarray/opt.py
|
1
|
39678
|
from __future__ import absolute_import, print_function, division
import copy
import numpy
import logging
import pdb
from six.moves import xrange
import theano
from theano import tensor, scalar, gof, config
from theano.compile import optdb
from theano.compile.ops import shape_i
from theano.gof import (local_optimizer, EquilibriumDB, TopoOptimizer,
SequenceDB, Optimizer, toolbox)
from theano.gof.optdb import LocalGroupDB
from theano.ifelse import IfElse
from theano.scalar.basic import Scalar, Pow, Cast
from theano.scan_module import scan_utils, scan_op, scan_opt
from theano.tensor.nnet.conv import ConvOp
from theano.tensor.nnet.blocksparse import SparseBlockGemv, SparseBlockOuter
from theano.tensor.nnet.abstract_conv import (AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs)
from theano.tests.breakpoint import PdbBreakpoint
from .type import (GpuArrayType, GpuArrayConstant, get_context,
ContextNotDefined)
from .basic_ops import (as_gpuarray_variable, infer_context_name,
host_from_gpu, GpuToGpu,
HostFromGpu, GpuFromHost,
GpuSplit, GpuContiguous, gpu_contiguous,
GpuAlloc, GpuAllocEmpty, GpuReshape,
GpuEye, gpu_join, GpuJoin)
from .blas import (gpu_dot22, GpuGemm, GpuGer, GpuGemmBatch,
gpugemm_no_inplace, gpugemm_inplace, gpugemmbatch_no_inplace,
gpugemv_no_inplace, gpugemv_inplace)
from .blocksparse import (GpuSparseBlockGemv, GpuSparseBlockOuter,
gpu_sparse_block_outer, gpu_sparse_block_outer_inplace,
gpu_sparse_block_gemv, gpu_sparse_block_gemv_inplace)
from .nnet import (gpu_crossentropy_softmax_1hot_with_bias_dx,
gpu_crossentropy_softmax_argmax_1hot_with_bias,
gpu_softmax_with_bias, gpu_softmax)
from .elemwise import (GpuElemwise, GpuDimShuffle, GpuCAReduceCuda,
GpuCAReduceCPY)
from .subtensor import (GpuIncSubtensor, GpuSubtensor,
GpuAdvancedSubtensor1,
GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)
from .opt_util import alpha_merge, output_merge
_logger = logging.getLogger("theano.gpuarray.opt")
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
gpu_seqopt = SequenceDB()
# Don't register this right now
conv_groupopt = LocalGroupDB()
conv_groupopt.__name__ = "gpua_conv_opts"
gpu_seqopt.register('gpuarray_local_optimiziations', gpu_optimizer, 1,
'fast_compile', 'fast_run', 'gpuarray')
gpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2,
'fast_compile', 'fast_run', 'gpuarray')
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register('gpuarray_opt', gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpuarray')
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
def register_inplace(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
optdb.register(
name, TopoOptimizer(
local_opt, failure_callback=TopoOptimizer.warn_inplace),
60, 'fast_run', 'inplace', 'gpuarray', *tags)
return local_opt
return f
register_opt('fast_compile')(theano.tensor.opt.local_track_shape_i)
register_opt(final_opt=True, name='gpua_constant_folding')(
tensor.opt.constant_folding)
gpu_optimizer.register('local_remove_all_assert',
theano.tensor.opt.local_remove_all_assert,
'unsafe')
def safe_to_gpu(x, ctx_name):
if isinstance(x.type, tensor.TensorType):
return GpuFromHost(ctx_name)(x)
else:
return x
def safe_to_cpu(x):
if isinstance(x.type, GpuArrayType):
return host_from_gpu(x)
else:
return x
def op_lifter(OP, cuda_only=False):
"""
OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))
gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)
"""
def f(maker):
def local_opt(node):
if type(node.op) in OP:
# Either one of our inputs is on the gpu or
# all of our clients are on the gpu
replace = False
# TODO: Maybe set context_name with infer_context_name()?
context_name = None
# We replace if any input is a host_from_gpu
for i in node.inputs:
if i.owner and i.owner.op == host_from_gpu:
context_name = i.owner.inputs[0].type.context_name
replace = True
break
if not replace:
# We replace if *all* clients are on the GPU
clients = [c for o in node.outputs for c in o.clients]
replace = len(clients) != 0
for c, idx in clients:
if (c == 'output' or
not isinstance(c.op, GpuFromHost)):
replace = False
# TODO: check that the clients want the same context?
if replace:
# All clients are GpuFromHost and we have at least one
context_name = clients[0][0].op.context_name
# Check if we should replace
if (not replace or
(cuda_only and
get_context(context_name).kind != b'cuda')):
return False
# tag the inputs with the context in case
# the context was derived from the outputs
for i in node.inputs:
i.tag.context_name = context_name
new_op = maker(node, context_name)
# This is needed as sometimes new_op inherits from OP.
if new_op and new_op != node.op:
if isinstance(new_op, theano.Op):
return [safe_to_cpu(o) for o in
new_op(*node.inputs, return_list=True)]
elif isinstance(new_op, (tuple, list)):
return [safe_to_cpu(o) for o in new_op]
else: # suppose it is a variable on the GPU
return [host_from_gpu(new_op)]
return False
local_opt.__name__ = maker.__name__
return local_optimizer(OP)(local_opt)
return f
class InputToGpuOptimizer(Optimizer):
"""
Transfer the input to the gpu to start the rolling wave.
"""
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
for input in fgraph.inputs:
if isinstance(input.type, GpuArrayType):
continue
# If all clients are outputs or transfers don't do anything.
if (all(cl[0] == 'output' or isinstance(cl[0].op, GpuFromHost)
for cl in input.clients)):
continue
target = getattr(input.tag, 'target', None)
if target == 'cpu':
continue
try:
new_input = host_from_gpu(GpuFromHost(target)(input))
fgraph.replace_validate(input, new_input,
"InputToGpuOptimizer")
except TypeError:
# This could fail if the inputs are not TensorTypes
pass
except ContextNotDefined:
if hasattr(input.tag, 'target'):
raise
# If there is no context tag and no default context
# then it stays on the CPU
pass
gpu_seqopt.register('InputToGpuArrayOptimizer', InputToGpuOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')
@local_optimizer([GpuFromHost, GpuToGpu, HostFromGpu])
def local_cut_gpu_transfers(node):
# gpu[ab] -> host -> gpub
if (isinstance(node.op, GpuFromHost) and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, HostFromGpu)):
other = node.inputs[0].owner.inputs[0]
if node.op.context_name == other.type.context_name:
return [other]
else:
return [GpuToGpu(node.op.context_name)(other)]
# ? -> gpua -> host
elif (isinstance(node.op, HostFromGpu) and
node.inputs[0].owner):
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [n2.inputs[0]]
# gpub ->
if isinstance(n2.op, GpuToGpu):
return [host_from_gpu(n2.inputs[0])]
# ? -> gpua -> gpub
elif isinstance(node.op, GpuToGpu):
# Transfer within same context
if node.inputs[0].type.context_name == node.op.context_name:
return [node.inputs[0]]
if node.inputs[0].owner:
n2 = node.inputs[0].owner
# host ->
if isinstance(n2.op, GpuFromHost):
return [as_gpuarray_variable(n2.inputs[0],
node.op.context_name)]
# gpuc ->
if isinstance(n2.op, GpuToGpu):
if node.op.context_name == n2.inputs[0].type.context_name:
return [n2.inputs[0]]
else:
return [node.op(n2.inputs[0])]
gpu_cut_copies.register('cut_gpua_host_transfers', local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
gpu_cut_copies.register('cut_gpua_constant_transfers',
tensor.opt.constant_folding,
'fast_compile', 'fast_run', 'gpuarray')
optdb['canonicalize'].register('local_cut_gpua_host_gpua',
local_cut_gpu_transfers,
'fast_compile', 'fast_run', 'gpuarray')
@register_opt('fast_compile')
@local_optimizer([tensor.Alloc])
def local_gpuaalloc2(node):
"""
Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)
Moves an alloc that is an input to join to the gpu.
"""
try:
get_context(None)
except ContextNotDefined:
# If there is no default context then we do not perform the move here.
return
if (isinstance(node.op, tensor.Alloc) and
all(c != 'output' and
c.op == tensor.join and
all(i.owner and
i.owner.op in [host_from_gpu, tensor.alloc]
for i in c.inputs[1:])
for c, idx in node.outputs[0].clients)):
return [host_from_gpu(GpuAlloc(None)(*node.inputs))]
@register_opt('fast_compile')
@op_lifter([tensor.Alloc])
def local_gpuaalloc(node, context_name):
return GpuAlloc(context_name)(*node.inputs)
@register_opt('fast_compile')
@op_lifter([tensor.AllocEmpty])
def local_gpuaallocempty(node, context_name):
# We use _props_dict() to make sure that the GPU op know all the
# CPU op props.
return GpuAllocEmpty(context_name=context_name,
**node.op._props_dict())(*node.inputs)
@register_opt()
@local_optimizer([GpuAlloc])
def local_gpualloc_memset_0(node):
if isinstance(node.op, GpuAlloc) and not node.op.memset_0:
inp = node.inputs[0]
if (isinstance(inp, GpuArrayConstant) and
inp.data.size == 1 and
(numpy.asarray(inp.data) == 0).all()):
new_op = GpuAlloc(node.op.context_name, memset_0=True)
return [new_op(*node.inputs)]
# Don't register by default.
@gof.local_optimizer([GpuAllocEmpty])
def local_gpua_alloc_empty_to_zeros(node):
if isinstance(node.op, GpuAllocEmpty):
context_name = infer_context_name(*node.inputs)
z = numpy.asarray(0, dtype=node.outputs[0].dtype)
return [GpuAlloc()(as_gpuarray_variable(z, context_name),
*node.inputs)]
optdb.register('local_gpua_alloc_empty_to_zeros',
theano.tensor.opt.in2out(local_gpua_alloc_empty_to_zeros),
# After move to gpu and merge2, before inplace.
49.3,
'alloc_empty_to_zeros',)
@register_opt()
@local_optimizer([GpuContiguous])
def local_gpu_contiguous_gpu_contiguous(node):
"""
gpu_contiguous(gpu_contiguous(x)) -> gpu_contiguous(x)
"""
if isinstance(node.op, GpuContiguous):
inp = node.inputs[0]
if inp.owner and isinstance(inp.owner.op, GpuContiguous):
return [inp]
@register_opt('fast_compile')
@op_lifter([tensor.extra_ops.CpuContiguous])
def local_gpu_contiguous(node, context_name):
return gpu_contiguous
@register_opt('fast_compile')
@op_lifter([tensor.Reshape])
def local_gpureshape(node, context_name):
op = node.op
name = op.name
if name:
name = 'Gpu' + name
res = GpuReshape(op.ndim, op.name)
return res
@register_opt('fast_compile')
@op_lifter([tensor.Rebroadcast])
def local_gpu_rebroadcast(node, context_name):
return node.op(as_gpuarray_variable(node.inputs[0], context_name))
@register_opt('fast_compile')
@op_lifter([tensor.Flatten])
def local_gpuflatten(node, context_name):
op = node.op
shp = []
if op.outdim != 1:
shp = [node.inputs[0].shape[i] for i in range(op.outdim - 1)]
shp += [-1]
res = GpuReshape(op.outdim, None)
o = res(node.inputs[0], theano.tensor.as_tensor_variable(shp))
return o
@register_opt('fast_compile')
@op_lifter([tensor.Elemwise])
def local_gpu_elemwise(node, context_name):
op = node.op
scal_op = op.scalar_op
name = op.name
if name:
name = 'Gpu' + name
if len(node.outputs) > 1:
return
res = GpuElemwise(scal_op, name=name,
inplace_pattern=copy.copy(op.inplace_pattern),
nfunc_spec=op.nfunc_spec)
# If the elemwise operation is a pow, casts might be required on the
# inputs and or outputs because only the (float, float)->float and
# (double, double)->double cases are implemented at the moment.
if isinstance(op.scalar_op, Pow):
# Only transfer the computation on the gpu if the output dtype is
# floating point. Else, give up on the transfer to the gpu.
out_dtype = node.outputs[0].dtype
if out_dtype not in ['float16', 'float32', 'float64']:
return
# Transfer the inputs on the GPU and cast them to the right dtype.
new_inputs = []
for inp in node.inputs:
if inp.dtype != out_dtype:
gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))
new_inputs.append(gpu_cast_op(as_gpuarray_variable(inp, context_name)))
else:
new_inputs.append(as_gpuarray_variable(inp, context_name))
# Perform the exponent on the gpu and transfer the output back to the
# cpu.
gpu_output = res(*new_inputs)
cpu_output = host_from_gpu(gpu_output)
return [cpu_output]
else:
return res
def max_inputs_to_GpuElemwise(node):
ptr_size = 8
int_size = 4
# we take the limit from CUDA for now
argument_limit = 232
ndim = node.inputs[0].type.ndim
# number of elements and shape
size_param_mandatory = (int_size * (ndim + 1)) + \
(ptr_size + int_size * ndim) * len(node.outputs)
nb_bytes_avail = argument_limit - size_param_mandatory
nb_bytes_per_input = ptr_size + ndim * int_size
max_nb_inputs = nb_bytes_avail // nb_bytes_per_input
return max_nb_inputs
gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(
GpuElemwise,
max_inputs_to_GpuElemwise)
optdb.register('gpua_elemwise_fusion',
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion), 71.00,
'fast_run', 'fusion', 'local_elemwise_fusion', 'gpuarray')
inplace_gpu_elemwise_opt = tensor.opt.inplace_elemwise_optimizer_op(
GpuElemwise)
optdb.register('gpua_inplace_opt', inplace_gpu_elemwise_opt, 75,
'inplace_elemwise_optimizer', 'fast_run', 'inplace', 'gpuarray')
@register_opt('fast_compile')
@op_lifter([tensor.DimShuffle])
def local_gpua_dimshuffle(node, context_name):
return GpuDimShuffle(node.op.input_broadcastable,
node.op.new_order)
@register_opt('fast_compile')
@op_lifter([tensor.SpecifyShape])
def local_gpua_specifyShape(node, context_name):
if isinstance(node.inputs[0].type, GpuArrayType):
return
inp = [as_gpuarray_variable(node.inputs[0], context_name)]
inp += node.inputs[1:]
return tensor.specify_shape(*inp)
@register_opt('fast_compile')
@op_lifter([theano.compile.ops.Shape])
def local_gpua_shape(node, context_name):
# op_lifter will call this opt too frequently as the output is
# always on the CPU.
if isinstance(node.inputs[0].type, GpuArrayType):
return
return [as_gpuarray_variable(node.inputs[0], context_name).shape]
def gpu_print_wrapper(op, cnda):
op.old_op.global_fn(op.old_op, numpy.asarray(cnda))
@register_opt('fast_compile')
@op_lifter([tensor.printing.Print])
def local_gpu_print_op(node, context_name):
x, = node.inputs
gpu_x = as_gpuarray_variable(x, context_name=context_name)
new_op = node.op.__class__(global_fn=gpu_print_wrapper)
new_op.old_op = node.op
return new_op(gpu_x)
@register_opt('fast_compile')
@local_optimizer([PdbBreakpoint])
def local_gpu_pdbbreakpoint_op(node):
if isinstance(node.op, PdbBreakpoint):
old_inputs = node.inputs
old_outputs = node.outputs
new_inputs = node.inputs[:1]
input_transfered = []
# Go through the monitored variables, only transfering on GPU those
# for which the input comes from the GPU or the output will be
# transfered on the GPU.
nb_monitored_vars = len(node.outputs)
for i in range(nb_monitored_vars):
inp = old_inputs[i + 1]
out = old_outputs[i]
input_is_from_gpu = (inp.owner and
isinstance(inp.owner.op, HostFromGpu))
output_goes_to_gpu = False
for c in out.clients:
if c == 'output':
continue
if isinstance(c[0].op, GpuFromHost):
output_goes_to_gpu = True
context_name = c[0].op.context_name
break
if input_is_from_gpu:
# The op should be applied on the GPU version of the input
new_inputs.append(inp.owner.inputs[0])
input_transfered.append(True)
elif output_goes_to_gpu:
# The input should be transfered to the gpu
new_inputs.append(as_gpuarray_variable(inp, context_name))
input_transfered.append(True)
else:
# No transfer is required.
new_inputs.append(inp)
input_transfered.append(False)
# Only continue the optimization if at least one input has been
# transfered to the gpu
if not any(input_transfered):
return False
# Apply the op on the new inputs
new_op_outputs = node.op(*new_inputs, return_list=True)
# Propagate the transfer to the gpu through the outputs that require
# it
new_outputs = []
for i in range(len(new_op_outputs)):
if input_transfered[i]:
new_outputs.append(host_from_gpu(new_op_outputs[i]))
else:
new_outputs.append(new_op_outputs[i])
return new_outputs
return False
@register_opt('fast_compile')
@op_lifter([IfElse])
def local_gpua_lazy_ifelse(node, context_name):
if node.op.gpu:
return
c = node.inputs[0]
inps = []
for v in node.inputs[1:]:
if isinstance(v.type, (tensor.TensorType, GpuArrayType)):
inps.append(as_gpuarray_variable(v, context_name))
else:
inps.append(v)
return IfElse(node.op.n_outs, gpu=True)(c, *inps, return_list=True)
@register_opt('fast_compile')
@op_lifter([tensor.Join])
def local_gpua_join(node, context_name):
return gpu_join
@register_opt('fast_compile')
@local_optimizer([GpuJoin])
def local_gpuajoin_1(node):
# join of a single element
if (isinstance(node.op, GpuJoin) and
len(node.inputs) == 2):
return [node.inputs[1]]
@register_opt('fast_compile')
@op_lifter([tensor.Split])
def local_gpua_split(node, context_name):
return GpuSplit(node.op.len_splits)
@register_opt('fast_compile')
@op_lifter([tensor.Subtensor])
def local_gpua_subtensor(node, context_name):
x = node.inputs[0]
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_x = x.owner.inputs[0]
if (gpu_x.owner and
isinstance(gpu_x.owner.op, GpuFromHost) and
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or any([isinstance(v.type, GpuArrayType)
for v in n.inputs + n.outputs])
for n, _ in node.outputs[0].clients]):
return
else:
return [host_from_gpu(gpu_x.owner.op(node.outputs[0]))]
return GpuSubtensor(node.op.idx_list)
@register_opt('fast_compile')
@op_lifter([tensor.IncSubtensor])
def local_gpua_incsubtensor(node, context_name):
op = GpuIncSubtensor(node.op.idx_list, node.op.inplace,
node.op.set_instead_of_inc,
node.op.destroyhandler_tolerate_aliased)
ret = op(*node.inputs)
val = getattr(node.outputs[0].tag, 'nan_guard_mode_check', True)
ret.tag.nan_guard_mode_check = val
return ret
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedSubtensor1])
def local_gpua_advanced_subtensor(node, context_name):
return GpuAdvancedSubtensor1()
@register_opt('fast_compile')
@op_lifter([tensor.AdvancedIncSubtensor1])
def local_gpua_advanced_incsubtensor(node, context_name):
context = get_context(context_name)
# This is disabled on non-cuda contexts
if context.kind != b'cuda':
return None
x, y, ilist = node.inputs
# Gpu Ops needs both inputs to have the same dtype
if (x.type.dtype != y.type.dtype):
dtype = scalar.upcast(x.type.dtype, y.type.dtype)
if x.type.dtype != dtype:
x = tensor.cast(x, dtype)
if y.type.dtype != dtype:
y = tensor.cast(y, dtype)
set_instead_of_inc = node.op.set_instead_of_inc
compute_capability = int(context.bin_id[-2])
if (compute_capability < 2 or x.ndim != 2 or y.ndim != 2):
return GpuAdvancedIncSubtensor1(
set_instead_of_inc=set_instead_of_inc)
else:
return GpuAdvancedIncSubtensor1_dev20(
set_instead_of_inc=set_instead_of_inc)
@register_inplace()
@local_optimizer([GpuAdvancedIncSubtensor1, GpuAdvancedIncSubtensor1_dev20])
def local_advincsub1_gpua_inplace(node):
if isinstance(node.op, (GpuAdvancedIncSubtensor1,
GpuAdvancedIncSubtensor1_dev20)):
if not node.op.inplace:
return [node.op.clone_inplace()(*node.inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod])
def local_gpua_careduce(node, context_name):
if isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul,
scalar.Maximum, scalar.Minimum)):
ctx = get_context(context_name)
if ctx.kind == b'opencl':
op = GpuCAReduceCPY
if node.op.scalar_op not in [scalar.add, scalar.mul]:
# We don't support yet all reduction with cpy code.
return
elif ctx.kind == b'cuda':
op = GpuCAReduceCuda
else:
return False
x, = node.inputs
greduce = op(
node.op.scalar_op, axis=node.op.axis,
dtype=getattr(node.op, 'dtype', None),
acc_dtype=getattr(node.op, 'acc_dtype', None))
gvar = greduce(x)
# We need to have the make node called, otherwise the mask can
# be None
if (op is GpuCAReduceCPY or
gvar.owner.op.supports_c_code([
as_gpuarray_variable(x, context_name)])):
return greduce
else:
# Try to make a simpler pattern based on reshaping
# The principle is that if two adjacent dimensions have
# the same value in the reduce_mask, then we can reshape
# to make them a single dimension, do the reduction, and
# then reshape to get them back.
if node.op.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in node.op.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
new_in_shp = [shape_i(x, 0)]
new_mask = [reduce_mask[0]]
for i in xrange(1, x.type.ndim):
if reduce_mask[i] == reduce_mask[i - 1]:
new_in_shp[-1] *= shape_i(x, i)
else:
new_mask.append(reduce_mask[i])
new_in_shp.append(shape_i(x, i))
new_axis = []
for idx, m in enumerate(new_mask):
if m == 1:
new_axis.append(idx)
greduce = op(
node.op.scalar_op,
axis=new_axis, reduce_mask=new_mask,
dtype=getattr(node.op, 'dtype', None),
acc_dtype=getattr(node.op, 'acc_dtype', None))
reshaped_x = x.reshape(tensor.stack(new_in_shp))
gpu_reshaped_x = as_gpuarray_variable(reshaped_x, context_name)
gvar = greduce(gpu_reshaped_x)
# We need to have the make node called, otherwise the mask can
# be None
reshaped_gpu_inputs = [gpu_reshaped_x]
if greduce.supports_c_code(reshaped_gpu_inputs):
reduce_reshaped_x = host_from_gpu(
greduce(gpu_reshaped_x))
if reduce_reshaped_x.ndim != node.outputs[0].ndim:
out_shp = []
for i in range(x.ndim):
if i not in node.op.axis:
out_shp.append(shape_i(x, i))
unreshaped_reduce = reduce_reshaped_x.reshape(
tensor.stack(out_shp))
else:
unreshaped_reduce = reduce_reshaped_x
return [unreshaped_reduce]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemv, tensor.blas_c.CGemv])
def local_gpua_gemv(node, context_name):
if node.op.inplace:
return gpugemv_inplace
else:
return gpugemv_no_inplace
@register_opt('fast_compile')
@op_lifter([tensor.blas.Gemm])
def local_gpua_gemm(node, context_name):
if node.op.inplace:
return gpugemm_inplace
else:
return gpugemm_no_inplace
@register_opt('fast_compile')
@op_lifter([tensor.blas.BatchedDot])
def local_gpua_gemmbatch(node, context_name):
a, b = node.inputs
c = tensor.AllocEmpty(a.dtype)(a.shape[0], a.shape[1], b.shape[2])
return gpugemmbatch_no_inplace(c, 1.0, a, b, 0.0)
@register_opt('fast_compile')
@op_lifter([tensor.basic.Dot])
def local_gpua_hgemm(node, context_name):
from theano.sandbox.cuda import nvcc_compiler
if nvcc_compiler.nvcc_version < '7.5':
_logger.warning("Not performing dot of float16 on the GPU since "
"cuda 7.5 is not available. Updating could speed up "
"your code.")
return
A = node.inputs[0]
B = node.inputs[1]
if (A.ndim == 2 and B.ndim == 2 and
A.dtype == 'float16' and B.dtype == 'float16'):
fgraph = node.inputs[0].fgraph
C = GpuAllocEmpty(dtype='float16', context_name=context_name)(
shape_i(A, 0, fgraph),
shape_i(B, 1, fgraph))
return gpugemm_no_inplace(C, 1.0, A, B, 0.0)
@register_opt()
@alpha_merge(GpuGemm, alpha_in=1, beta_in=4)
def local_gpuagemm_alpha_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemm, alpha_in=1, beta_in=4, out_in=0)
def local_gpuagemm_output_merge(node, *inputs):
return [gpugemm_no_inplace(*inputs)]
@register_opt()
@alpha_merge(GpuGemmBatch, alpha_in=1, beta_in=4)
def local_gpuagemmbatch_alpha_merge(node, *inputs):
return [gpugemmbatch_no_inplace(*inputs)]
@register_opt()
@output_merge(GpuGemmBatch, alpha_in=1, beta_in=4, out_in=0)
def local_gpuagemmbatch_output_merge(node, *inputs):
return [gpugemmbatch_no_inplace(*inputs)]
@register_opt('fast_compile')
@op_lifter([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer])
def local_gpua_ger(node, context_name):
return GpuGer(inplace=node.op.destructive)
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22])
def local_gpua_dot22(node, context_name):
return gpu_dot22
@register_opt('fast_compile')
@op_lifter([tensor.blas.Dot22Scalar])
def local_gpua_dot22scalar(node, context_name):
x, y, a = node.inputs
x = as_gpuarray_variable(x, context_name)
y = as_gpuarray_variable(y, context_name)
z = GpuAllocEmpty(x.dtype, context_name)(x.shape[0], y.shape[1])
return [gpugemm_no_inplace(z, a, x, y, 0)]
@register_opt('fast_compile')
@op_lifter([tensor.basic.Eye])
def local_gpua_eye(node, context_name):
return GpuEye(dtype=node.op.dtype, context_name=context_name)
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], cuda_only=True)
def local_gpua_crossentropysoftmaxargmax1hotwithbias(node, context_name):
return gpu_crossentropy_softmax_argmax_1hot_with_bias
@register_opt('fast_compile')
@op_lifter([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], cuda_only=True)
def local_gpua_crossentropysoftmax1hotwithbiasdx(node, context_name):
return gpu_crossentropy_softmax_1hot_with_bias_dx
@register_opt('fast_compile')
@op_lifter([tensor.nnet.Softmax], cuda_only=True)
def local_gpua_softmax(node, context_name):
return gpu_softmax
@register_opt('fast_compile')
@op_lifter([tensor.nnet.SoftmaxWithBias], cuda_only=True)
def local_gpua_softmaxwithbias(node, context_name):
return gpu_softmax_with_bias
@register_opt('fast_compile')
@op_lifter([theano.tensor.opt.Assert])
def local_assert(node, context_name):
# Check if input nodes are already on the GPU
if isinstance(node.inputs[0].type, GpuArrayType):
return
return [host_from_gpu(node.op(as_gpuarray_variable(node.inputs[0],
context_name),
*node.inputs[1:]))]
@register_opt('fast_compile')
@op_lifter([ConvOp])
def local_error_convop(node, context_name):
assert False, """
ConvOp does not work with the gpuarray backend.
Use the new convolution interface to have GPU convolution working:
theano.tensor.nnet.conv2d()
"""
@register_opt('fast_compile')
@op_lifter([SparseBlockGemv])
def local_lift_sparseblockgemv(node, context_name):
if node.op.inplace:
return gpu_sparse_block_gemv_inplace
else:
return gpu_sparse_block_gemv
@register_opt('fast_compile')
@op_lifter([SparseBlockOuter])
def local_lift_sparseblockouter(node, context_name):
if node.op.inplace:
return gpu_sparse_block_outer_inplace
else:
return gpu_sparse_block_outer
@register_inplace()
@local_optimizer([GpuSparseBlockGemv], inplace=True)
def local_inplace_sparseblockgemv(node):
if isinstance(node.op, GpuSparseBlockGemv) and not node.op.inplace:
return [gpu_sparse_block_gemv_inplace(*node.inputs)]
@register_inplace()
@local_optimizer([GpuSparseBlockOuter], inplace=True)
def local_inplace_sparseblockouter(node):
if isinstance(node.op, GpuSparseBlockOuter) and not node.op.inplace:
return [GpuSparseBlockOuter(inplace=True)(*node.inputs)]
# This deals with any abstract convs that have a transfer somewhere
@register_opt('fast_compile')
@op_lifter([AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs])
def local_lift_abstractconv2d(node, context_name):
if isinstance(node.outputs[0].type, GpuArrayType):
# Don't handle this node here, it's already on the GPU.
return
inps = list(node.inputs)
inps[0] = as_gpuarray_variable(node.inputs[0],
context_name=context_name)
inps[1] = as_gpuarray_variable(node.inputs[1],
context_name=context_name)
return [node.op(*inps)]
# Register this here so that it goes after the abstract lifting
register_opt('fast_compile')(conv_groupopt)
@register_opt("low_memory")
@local_optimizer([GpuCAReduceCuda])
def local_gpu_elemwise_careduce(node):
"""
Merge some GpuCAReduceCuda and GPUElemwise.
"""
if (isinstance(node.op, GpuCAReduceCuda) and
node.op.pre_scalar_op is None and
node.inputs[0].owner and
isinstance(node.inputs[0].owner.op, GpuElemwise) and
# The Op support all scalar with 1 inputs. We don't
# automatically add more case, as some like trigonometic
# operation with some reduction pattern will probably results
# in slow down.
isinstance(node.inputs[0].owner.op.scalar_op, scalar.basic.Sqr)):
op = node.op
inp = node.inputs[0].owner.inputs[0]
return [GpuCAReduceCuda(scalar_op=op.scalar_op,
axis=op.axis,
reduce_mask=op.reduce_mask,
pre_scalar_op=scalar.basic.sqr)(inp)]
@local_optimizer(None)
def local_assert_no_cpu_op(node):
if (all([var.owner and isinstance(var.owner.op, HostFromGpu)
for var in node.inputs]) and
any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)]
for var in node.outputs])):
if config.assert_no_cpu_op == "warn":
_logger.warning(("CPU Op %s is detected in the computation "
"graph") % node)
elif config.assert_no_cpu_op == "raise":
raise AssertionError("The Op %s is on CPU." % node)
elif config.assert_no_cpu_op == "pdb":
pdb.set_trace()
# Register the local_assert_no_cpu_op:
assert_no_cpu_op = theano.tensor.opt.in2out(local_assert_no_cpu_op,
name='assert_no_cpu_op')
# 49.2 is after device specialization & fusion optimizations for last transfers
optdb.register('gpua_assert_no_cpu_op', assert_no_cpu_op, 49.2,
'assert_no_cpu_op')
def tensor_to_gpu(x, context_name):
if isinstance(x.type, tensor.TensorType):
y = GpuArrayType(broadcastable=x.type.broadcastable,
context_name=context_name,
dtype=x.type.dtype)()
if x.name:
y.name = x.name + '[Gpua]'
return y
else:
return x
def gpu_safe_new(x, tag=''):
"""
Internal function that constructs a new variable from x with the same
type, but with a different name (old name + tag). This function is used
by gradient, or the R-op to construct new variables for the inputs of
the inner graph such that there is no interference between the original
graph and the newly constructed graph.
"""
if hasattr(x, 'name') and x.name is not None:
nw_name = x.name + tag
else:
nw_name = None
if isinstance(x, theano.Constant):
return x.clone()
nw_x = x.type()
nw_x.name = nw_name
return nw_x
def gpu_reconstruct_graph(inputs, outputs, tag=None):
"""
Different interface to clone, that allows you to pass inputs.
Compared to clone, this method always replaces the inputs with
new variables of the same type, and returns those (in the same
order as the original inputs).
"""
if tag is None:
tag = ''
nw_inputs = [gpu_safe_new(x, tag) for x in inputs]
givens = {}
for nw_x, x in zip(nw_inputs, inputs):
givens[x] = nw_x
nw_outputs = scan_utils.clone(outputs, replace=givens)
return (nw_inputs, nw_outputs)
@register_opt('scan', 'fast_compile')
@op_lifter([scan_op.Scan])
def local_scan_to_gpua(node, context_name):
info = copy.deepcopy(node.op.info)
if info.get('gpua', False):
return
info['gpua'] = True
nw_ins = [node.inputs[0]]
e = (1 +
node.op.n_seqs +
node.op.n_mit_mot +
node.op.n_mit_sot +
node.op.n_sit_sot +
node.op.n_shared_outs)
nw_ins += [safe_to_gpu(x, context_name) for x in node.inputs[1:e]]
b = e
e = e + node.op.n_nit_sot
nw_ins += node.inputs[b:e]
nw_ins += [safe_to_gpu(x, context_name) for x in node.inputs[e:]]
scan_ins = [tensor_to_gpu(x, context_name) for x in node.op.inputs]
# The inner output corresponding to the looping condition should not be
# moved to the gpu
if node.op.info['as_while']:
scan_outs = [safe_to_gpu(x, context_name) for x in node.op.outputs[:-1]]
scan_outs += [node.op.outputs[-1]]
else:
scan_outs = [safe_to_gpu(x, context_name) for x in node.op.outputs]
scan_outs = scan_utils.clone(
scan_outs,
replace=list(zip(node.op.inputs,
(safe_to_cpu(x) for x in scan_ins))))
# We need to construct the hash here, because scan
# __init__ does not know about the gpu and can not
# handle graphs with inputs being on the gpu
tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)
local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True)
_cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])
info['gpu_hash'] = hash(_cmodule_key)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
nw_op = scan_op.Scan(scan_ins, scan_outs, info,
typeConstructor=typebuild).make_node(*nw_ins)
return nw_op.outputs
def _scan_type_infer(node):
context_name = infer_context_name(*node.inputs)
def typebuild(dtype, broadcastable, context_name=context_name):
return GpuArrayType(dtype=dtype, broadcastable=broadcastable,
context_name=context_name)
return typebuild
# Do not register in fast_run or fast_compile.
# It will be added to fast_run if the GPU is enabled.
optdb.register('gpua_scanOp_make_inplace',
scan_opt.ScanInplaceOptimizer(typeInfer=_scan_type_infer,
gpua_flag=True),
75,
'gpuarray',
'inplace',
'scan')
|
bsd-3-clause
|
phil65/script.tvshow.nextaired
|
dateutil/rrule.py
|
254
|
40402
|
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
import itertools
import datetime
import calendar
import thread
import sys
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = range(1,30), range(1,31), range(1,32)
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = range(-29,0), range(-30,0), range(-31,0)
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0,31,60,91,121,152,182,213,244,274,305,335,366)
M365RANGE = (0,31,59,90,120,151,181,212,243,273,304,334,365)
WDAYMASK = [0,1,2,3,4,5,6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = range(7)
# Imported on demand.
easter = None
parser = None
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError, "Can't create weekday with n == 0"
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase:
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(gen.next())
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxint,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = gen.next()
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
if self._len is None:
for x in self: pass
return self._len
def before(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def between(self, after, before, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
rrulebase.__init__(self, cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if wkst is None:
self._wkst = calendar.firstweekday()
elif type(wkst) is int:
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif type(bysetpos) is int:
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if not (byweekno or byyearday or bymonthday or
byweekday is not None or byeaster is not None):
if freq == YEARLY:
if not bymonth:
bymonth = dtstart.month
bymonthday = dtstart.day
elif freq == MONTHLY:
bymonthday = dtstart.day
elif freq == WEEKLY:
byweekday = dtstart.weekday()
# bymonth
if not bymonth:
self._bymonth = None
elif type(bymonth) is int:
self._bymonth = (bymonth,)
else:
self._bymonth = tuple(bymonth)
# byyearday
if not byyearday:
self._byyearday = None
elif type(byyearday) is int:
self._byyearday = (byyearday,)
else:
self._byyearday = tuple(byyearday)
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if type(byeaster) is int:
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(byeaster)
else:
self._byeaster = None
# bymonthay
if not bymonthday:
self._bymonthday = ()
self._bynmonthday = ()
elif type(bymonthday) is int:
if bymonthday < 0:
self._bynmonthday = (bymonthday,)
self._bymonthday = ()
else:
self._bymonthday = (bymonthday,)
self._bynmonthday = ()
else:
self._bymonthday = tuple([x for x in bymonthday if x > 0])
self._bynmonthday = tuple([x for x in bymonthday if x < 0])
# byweekno
if byweekno is None:
self._byweekno = None
elif type(byweekno) is int:
self._byweekno = (byweekno,)
else:
self._byweekno = tuple(byweekno)
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
elif type(byweekday) is int:
self._byweekday = (byweekday,)
self._bynweekday = None
elif hasattr(byweekday, "n"):
if not byweekday.n or freq > MONTHLY:
self._byweekday = (byweekday.weekday,)
self._bynweekday = None
else:
self._bynweekday = ((byweekday.weekday, byweekday.n),)
self._byweekday = None
else:
self._byweekday = []
self._bynweekday = []
for wday in byweekday:
if type(wday) is int:
self._byweekday.append(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.append(wday.weekday)
else:
self._bynweekday.append((wday.weekday, wday.n))
self._byweekday = tuple(self._byweekday)
self._bynweekday = tuple(self._bynweekday)
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = (dtstart.hour,)
else:
self._byhour = None
elif type(byhour) is int:
self._byhour = (byhour,)
else:
self._byhour = tuple(byhour)
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = (dtstart.minute,)
else:
self._byminute = None
elif type(byminute) is int:
self._byminute = (byminute,)
else:
self._byminute = tuple(byminute)
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = (dtstart.second,)
else:
self._bysecond = None
elif type(bysecond) is int:
self._bysecond = (bysecond,)
else:
self._bysecond = tuple(bysecond)
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY:ii.ydayset,
MONTHLY:ii.mdayset,
WEEKLY:ii.wdayset,
DAILY:ii.ddayset,
HOURLY:ii.ddayset,
MINUTELY:ii.ddayset,
SECONDLY:ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY:ii.htimeset,
MINUTELY:ii.mtimeset,
SECONDLY:ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday
and -ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday
and -ii.nextyearlen+i-ii.yearlen
not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal+i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
while True:
hour += interval
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if not byhour or hour in byhour:
break
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
while True:
minute += interval
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
filtered = False
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute)):
break
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399-(hour*3600+minute*60+second))
//interval)*interval)
while True:
second += self._interval
div, mod = divmod(second, 60)
if div:
second = mod
minute += div
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
break
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365+calendar.isleap(year)
self.nextyearlen = 365+calendar.isleap(year+1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
#no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1,1,1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst)%7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen+
(lyearweekday-rr._wkst)%7)%7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst)%7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and
(month != self.lastmonth or year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday)%7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday)%7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return range(self.yearlen), 0, self.yearlen
def mdayset(self, year, month, day):
set = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
set[i] = i
return set, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
set = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
set[i] = i
i += 1
#if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return set, start, i
def ddayset(self, year, month, day):
set = [None]*self.yearlen
i = datetime.date(year, month, day).toordinal()-self.yearordinal
set[i] = i
return set, i, i+1
def htimeset(self, hour, minute, second):
set = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
set.sort()
return set
def mtimeset(self, hour, minute, second):
set = []
rr = self.rrule
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
set.sort()
return set
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
class _genitem:
def __init__(self, genlist, gen):
try:
self.dt = gen()
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def next(self):
try:
self.dt = self.gen()
except StopIteration:
self.genlist.remove(self)
def __cmp__(self, other):
return cmp(self.dt, other.dt)
def __init__(self, cache=False):
rrulebase.__init__(self, cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
def rrule(self, rrule):
self._rrule.append(rrule)
def rdate(self, rdate):
self._rdate.append(rdate)
def exrule(self, exrule):
self._exrule.append(exrule)
def exdate(self, exdate):
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate).next)
for gen in [iter(x).next for x in self._rrule]:
self._genitem(rlist, gen)
rlist.sort()
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate).next)
for gen in [iter(x).next for x in self._exrule]:
self._genitem(exlist, gen)
exlist.sort()
lastdt = None
total = 0
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exlist[0].next()
exlist.sort()
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
ritem.next()
rlist.sort()
self._len = total
class _rrulestr:
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError, "invalid until date"
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg):
l = []
for wday in value.split(','):
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n: n = int(n)
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError, "unknown parameter name"
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError, "unknown parameter '%s'" % name
except (KeyError, ValueError):
raise ValueError, "invalid '%s': %s" % (name, value)
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
s = s.upper()
if not s.strip():
raise ValueError, "empty string"
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and
(s.find(':') == -1 or s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError, "unsupported RRULE parm: "+parm
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError, "unsupported EXRULE parm: "+parm
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError, "unsupported DTSTART parm: "+parm
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError, "unsupported property: "+name
if (forceset or len(rrulevals) > 1 or
rdatevals or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
set = rruleset(cache=cache)
for value in rrulevals:
set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
set.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
set.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
set.rdate(dtstart)
return set
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
|
gpl-2.0
|
sgraham/nope
|
testing/gmock/scripts/generator/cpp/utils.py
|
1158
|
1153
|
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities for C++ parsing."""
__author__ = '[email protected] (Neal Norwitz)'
import sys
# Set to True to see the start/end token indices.
DEBUG = True
def ReadFile(filename, print_error=True):
"""Returns the contents of a file."""
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print('Error reading %s: %s' % (filename, sys.exc_info()[1]))
return None
|
bsd-3-clause
|
azurestandard/django
|
tests/urls.py
|
91
|
1189
|
from django.conf.urls import patterns, include
urlpatterns = patterns('',
# test_client modeltest urls
(r'^test_client/', include('modeltests.test_client.urls')),
(r'^test_client_regress/', include('regressiontests.test_client_regress.urls')),
# File upload test views
(r'^file_uploads/', include('regressiontests.file_uploads.urls')),
# Always provide the auth system login and logout views
(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout'),
# test urlconf for {% url %} template tag
(r'^url_tag/', include('regressiontests.templates.urls')),
# django built-in views
(r'^views/', include('regressiontests.views.urls')),
# test urlconf for middleware tests
(r'^middleware/', include('regressiontests.middleware.urls')),
# admin widget tests
(r'widget_admin/', include('regressiontests.admin_widgets.urls')),
# admin custom URL tests
(r'^custom_urls/', include('regressiontests.admin_custom_urls.urls')),
# admin scripts tests
(r'^admin_scripts/', include('regressiontests.admin_scripts.urls')),
)
|
bsd-3-clause
|
eugenejen/AutobahnPython
|
autobahn/autobahn/util.py
|
3
|
4969
|
###############################################################################
##
## Copyright 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ("utcnow",
"parseutc",
"utcstr",
"newid",
"rtime",
"Stopwatch",)
import datetime
import time
import random
import sys
UTC_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
def utcnow():
"""
Get current time in UTC as ISO 8601 string.
"""
now = datetime.datetime.utcnow()
return now.strftime(UTC_TIMESTAMP_FORMAT)
def parseutc(s):
"""
Parse an ISO 8601 combined date and time string, like i.e. 2011-11-23T12:23Z
into a UTC datetime instance.
"""
try:
return datetime.datetime.strptime(s, UTC_TIMESTAMP_FORMAT)
except:
return None
def utcstr(dt):
"""
Convert an UTC datetime instance into an ISO 8601 combined date and time,
like i.e. 2011-11-23T12:23Z
"""
try:
return dt.strftime(UTC_TIMESTAMP_FORMAT)
except:
return None
def id():
"""
Generate a new random object ID.
"""
return random.randint(0, 9007199254740992)
def newid():
"""
Generate a new random object ID.
"""
return ''.join([random.choice("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_") for i in xrange(16)])
## Select the most precise walltime measurement function available
## on the platform
##
if sys.platform.startswith('win'):
## On Windows, this function returns wall-clock seconds elapsed since the
## first call to this function, as a floating point number, based on the
## Win32 function QueryPerformanceCounter(). The resolution is typically
## better than one microsecond
rtime = time.clock
_ = rtime()
else:
## On Unix-like platforms, this used the first available from this list:
## (1) gettimeofday() -- resolution in microseconds
## (2) ftime() -- resolution in milliseconds
## (3) time() -- resolution in seconds
rtime = time.time
class Stopwatch:
"""
Stopwatch based on walltime. Can be used to do code timing and uses the
most precise walltime measurement available on the platform. This is
a very light-weight object, so create/dispose is very cheap.
"""
def __init__(self, start = True):
"""
Creates a new stopwatch and by default immediately starts (= resumes) it.
"""
self._elapsed = 0
if start:
self._started = rtime()
self._running = True
else:
self._started = None
self._running = False
def elapsed(self):
"""
Return total time elapsed in seconds during which the stopwatch was running.
"""
if self._running:
now = rtime()
return self._elapsed + (now - self._started)
else:
return self._elapsed
def pause(self):
"""
Pauses the stopwatch and returns total time elapsed in seconds during which
the stopwatch was running.
"""
if self._running:
now = rtime()
self._elapsed += now - self._started
self._running = False
return self._elapsed
else:
return self._elapsed
def resume(self):
"""
Resumes a paused stopwatch and returns total elapsed time in seconds
during which the stopwatch was running.
"""
if not self._running:
self._started = rtime()
self._running = True
return self._elapsed
else:
now = rtime()
return self._elapsed + (now - self._started)
def stop(self):
"""
Stops the stopwatch and returns total time elapsed in seconds during which
the stopwatch was (previously) running.
"""
elapsed = self.pause()
self._elapsed = 0
self._started = None
self._running = False
return elapsed
class EqualityMixin:
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
# we only want the actual message data attributes (not eg _serialize)
for k in self.__dict__:
if not k.startswith('_'):
if not self.__dict__[k] == other.__dict__[k]:
return False
return True
#return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
|
apache-2.0
|
yetu/repotools
|
scm.py
|
7
|
41039
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SCM-specific utility classes."""
import cStringIO
import glob
import logging
import os
import re
import sys
import tempfile
import time
from xml.etree import ElementTree
import gclient_utils
import subprocess2
def ValidateEmail(email):
return (re.match(r"^[a-zA-Z0-9._%-+]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}$", email)
is not None)
def GetCasedPath(path):
"""Elcheapos way to get the real path case on Windows."""
if sys.platform.startswith('win') and os.path.exists(path):
# Reconstruct the path.
path = os.path.abspath(path)
paths = path.split('\\')
for i in range(len(paths)):
if i == 0:
# Skip drive letter.
continue
subpath = '\\'.join(paths[:i+1])
prev = len('\\'.join(paths[:i]))
# glob.glob will return the cased path for the last item only. This is why
# we are calling it in a loop. Extract the data we want and put it back
# into the list.
paths[i] = glob.glob(subpath + '*')[0][prev+1:len(subpath)]
path = '\\'.join(paths)
return path
def GenFakeDiff(filename):
"""Generates a fake diff from a file."""
file_content = gclient_utils.FileRead(filename, 'rb').splitlines(True)
filename = filename.replace(os.sep, '/')
nb_lines = len(file_content)
# We need to use / since patch on unix will fail otherwise.
data = cStringIO.StringIO()
data.write("Index: %s\n" % filename)
data.write('=' * 67 + '\n')
# Note: Should we use /dev/null instead?
data.write("--- %s\n" % filename)
data.write("+++ %s\n" % filename)
data.write("@@ -0,0 +1,%d @@\n" % nb_lines)
# Prepend '+' to every lines.
for line in file_content:
data.write('+')
data.write(line)
result = data.getvalue()
data.close()
return result
def determine_scm(root):
"""Similar to upload.py's version but much simpler.
Returns 'svn', 'git' or None.
"""
if os.path.isdir(os.path.join(root, '.svn')):
return 'svn'
elif os.path.isdir(os.path.join(root, '.git')):
return 'git'
else:
try:
subprocess2.check_call(
['git', 'rev-parse', '--show-cdup'],
stdout=subprocess2.VOID,
stderr=subprocess2.VOID,
cwd=root)
return 'git'
except (OSError, subprocess2.CalledProcessError):
return None
def only_int(val):
if val.isdigit():
return int(val)
else:
return 0
class GIT(object):
current_version = None
@staticmethod
def ApplyEnvVars(kwargs):
env = kwargs.pop('env', None) or os.environ.copy()
# Don't prompt for passwords; just fail quickly and noisily.
# By default, git will use an interactive terminal prompt when a username/
# password is needed. That shouldn't happen in the chromium workflow,
# and if it does, then gclient may hide the prompt in the midst of a flood
# of terminal spew. The only indication that something has gone wrong
# will be when gclient hangs unresponsively. Instead, we disable the
# password prompt and simply allow git to fail noisily. The error
# message produced by git will be copied to gclient's output.
env.setdefault('GIT_ASKPASS', 'true')
env.setdefault('SSH_ASKPASS', 'true')
# 'cat' is a magical git string that disables pagers on all platforms.
env.setdefault('GIT_PAGER', 'cat')
return env
@staticmethod
def Capture(args, cwd, strip_out=True, **kwargs):
env = GIT.ApplyEnvVars(kwargs)
output = subprocess2.check_output(
['git'] + args,
cwd=cwd, stderr=subprocess2.PIPE, env=env, **kwargs)
return output.strip() if strip_out else output
@staticmethod
def CaptureStatus(files, cwd, upstream_branch):
"""Returns git status.
@files can be a string (one file) or a list of files.
Returns an array of (status, file) tuples."""
if upstream_branch is None:
upstream_branch = GIT.GetUpstreamBranch(cwd)
if upstream_branch is None:
raise gclient_utils.Error('Cannot determine upstream branch')
command = ['diff', '--name-status', '--no-renames',
'-r', '%s...' % upstream_branch]
if not files:
pass
elif isinstance(files, basestring):
command.append(files)
else:
command.extend(files)
status = GIT.Capture(command, cwd)
results = []
if status:
for statusline in status.splitlines():
# 3-way merges can cause the status can be 'MMM' instead of 'M'. This
# can happen when the user has 2 local branches and he diffs between
# these 2 branches instead diffing to upstream.
m = re.match('^(\w)+\t(.+)$', statusline)
if not m:
raise gclient_utils.Error(
'status currently unsupported: %s' % statusline)
# Only grab the first letter.
results.append(('%s ' % m.group(1)[0], m.group(2)))
return results
@staticmethod
def IsWorkTreeDirty(cwd):
return GIT.Capture(['status', '-s'], cwd=cwd) != ''
@staticmethod
def GetEmail(cwd):
"""Retrieves the user email address if known."""
# We could want to look at the svn cred when it has a svn remote but it
# should be fine for now, users should simply configure their git settings.
try:
return GIT.Capture(['config', 'user.email'], cwd=cwd)
except subprocess2.CalledProcessError:
return ''
@staticmethod
def ShortBranchName(branch):
"""Converts a name like 'refs/heads/foo' to just 'foo'."""
return branch.replace('refs/heads/', '')
@staticmethod
def GetBranchRef(cwd):
"""Returns the full branch reference, e.g. 'refs/heads/master'."""
return GIT.Capture(['symbolic-ref', 'HEAD'], cwd=cwd)
@staticmethod
def GetBranch(cwd):
"""Returns the short branch name, e.g. 'master'."""
return GIT.ShortBranchName(GIT.GetBranchRef(cwd))
@staticmethod
def IsGitSvn(cwd):
"""Returns True if this repo looks like it's using git-svn."""
# If you have any "svn-remote.*" config keys, we think you're using svn.
try:
GIT.Capture(['config', '--local', '--get-regexp', r'^svn-remote\.'],
cwd=cwd)
return True
except subprocess2.CalledProcessError:
return False
@staticmethod
def MatchSvnGlob(url, base_url, glob_spec, allow_wildcards):
"""Return the corresponding git ref if |base_url| together with |glob_spec|
matches the full |url|.
If |allow_wildcards| is true, |glob_spec| can contain wildcards (see below).
"""
fetch_suburl, as_ref = glob_spec.split(':')
if allow_wildcards:
glob_match = re.match('(.+/)?(\*|{[^/]*})(/.+)?', fetch_suburl)
if glob_match:
# Parse specs like "branches/*/src:refs/remotes/svn/*" or
# "branches/{472,597,648}/src:refs/remotes/svn/*".
branch_re = re.escape(base_url)
if glob_match.group(1):
branch_re += '/' + re.escape(glob_match.group(1))
wildcard = glob_match.group(2)
if wildcard == '*':
branch_re += '([^/]*)'
else:
# Escape and replace surrounding braces with parentheses and commas
# with pipe symbols.
wildcard = re.escape(wildcard)
wildcard = re.sub('^\\\\{', '(', wildcard)
wildcard = re.sub('\\\\,', '|', wildcard)
wildcard = re.sub('\\\\}$', ')', wildcard)
branch_re += wildcard
if glob_match.group(3):
branch_re += re.escape(glob_match.group(3))
match = re.match(branch_re, url)
if match:
return re.sub('\*$', match.group(1), as_ref)
# Parse specs like "trunk/src:refs/remotes/origin/trunk".
if fetch_suburl:
full_url = base_url + '/' + fetch_suburl
else:
full_url = base_url
if full_url == url:
return as_ref
return None
@staticmethod
def GetSVNBranch(cwd):
"""Returns the svn branch name if found."""
# Try to figure out which remote branch we're based on.
# Strategy:
# 1) iterate through our branch history and find the svn URL.
# 2) find the svn-remote that fetches from the URL.
# regexp matching the git-svn line that contains the URL.
git_svn_re = re.compile(r'^\s*git-svn-id: (\S+)@', re.MULTILINE)
# We don't want to go through all of history, so read a line from the
# pipe at a time.
# The -100 is an arbitrary limit so we don't search forever.
cmd = ['git', 'log', '-100', '--pretty=medium']
proc = subprocess2.Popen(cmd, cwd=cwd, stdout=subprocess2.PIPE)
url = None
for line in proc.stdout:
match = git_svn_re.match(line)
if match:
url = match.group(1)
proc.stdout.close() # Cut pipe.
break
if url:
svn_remote_re = re.compile(r'^svn-remote\.([^.]+)\.url (.*)$')
remotes = GIT.Capture(
['config', '--local', '--get-regexp', r'^svn-remote\..*\.url'],
cwd=cwd).splitlines()
for remote in remotes:
match = svn_remote_re.match(remote)
if match:
remote = match.group(1)
base_url = match.group(2)
try:
fetch_spec = GIT.Capture(
['config', '--local', 'svn-remote.%s.fetch' % remote],
cwd=cwd)
branch = GIT.MatchSvnGlob(url, base_url, fetch_spec, False)
except subprocess2.CalledProcessError:
branch = None
if branch:
return branch
try:
branch_spec = GIT.Capture(
['config', '--local', 'svn-remote.%s.branches' % remote],
cwd=cwd)
branch = GIT.MatchSvnGlob(url, base_url, branch_spec, True)
except subprocess2.CalledProcessError:
branch = None
if branch:
return branch
try:
tag_spec = GIT.Capture(
['config', '--local', 'svn-remote.%s.tags' % remote],
cwd=cwd)
branch = GIT.MatchSvnGlob(url, base_url, tag_spec, True)
except subprocess2.CalledProcessError:
branch = None
if branch:
return branch
@staticmethod
def FetchUpstreamTuple(cwd):
"""Returns a tuple containg remote and remote ref,
e.g. 'origin', 'refs/heads/master'
Tries to be intelligent and understand git-svn.
"""
remote = '.'
branch = GIT.GetBranch(cwd)
try:
upstream_branch = GIT.Capture(
['config', '--local', 'branch.%s.merge' % branch], cwd=cwd)
except subprocess2.CalledProcessError:
upstream_branch = None
if upstream_branch:
try:
remote = GIT.Capture(
['config', '--local', 'branch.%s.remote' % branch], cwd=cwd)
except subprocess2.CalledProcessError:
pass
else:
try:
upstream_branch = GIT.Capture(
['config', '--local', 'rietveld.upstream-branch'], cwd=cwd)
except subprocess2.CalledProcessError:
upstream_branch = None
if upstream_branch:
try:
remote = GIT.Capture(
['config', '--local', 'rietveld.upstream-remote'], cwd=cwd)
except subprocess2.CalledProcessError:
pass
else:
# Fall back on trying a git-svn upstream branch.
if GIT.IsGitSvn(cwd):
upstream_branch = GIT.GetSVNBranch(cwd)
else:
# Else, try to guess the origin remote.
remote_branches = GIT.Capture(['branch', '-r'], cwd=cwd).split()
if 'origin/master' in remote_branches:
# Fall back on origin/master if it exits.
remote = 'origin'
upstream_branch = 'refs/heads/master'
elif 'origin/trunk' in remote_branches:
# Fall back on origin/trunk if it exists. Generally a shared
# git-svn clone
remote = 'origin'
upstream_branch = 'refs/heads/trunk'
else:
# Give up.
remote = None
upstream_branch = None
return remote, upstream_branch
@staticmethod
def GetUpstreamBranch(cwd):
"""Gets the current branch's upstream branch."""
remote, upstream_branch = GIT.FetchUpstreamTuple(cwd)
if remote != '.' and upstream_branch:
upstream_branch = upstream_branch.replace('heads', 'remotes/' + remote)
return upstream_branch
@staticmethod
def GenerateDiff(cwd, branch=None, branch_head='HEAD', full_move=False,
files=None):
"""Diffs against the upstream branch or optionally another branch.
full_move means that move or copy operations should completely recreate the
files, usually in the prospect to apply the patch for a try job."""
if not branch:
branch = GIT.GetUpstreamBranch(cwd)
command = ['diff', '-p', '--no-color', '--no-prefix', '--no-ext-diff',
branch + "..." + branch_head]
if full_move:
command.append('--no-renames')
else:
command.append('-C')
# TODO(maruel): --binary support.
if files:
command.append('--')
command.extend(files)
diff = GIT.Capture(command, cwd=cwd, strip_out=False).splitlines(True)
for i in range(len(diff)):
# In the case of added files, replace /dev/null with the path to the
# file being added.
if diff[i].startswith('--- /dev/null'):
diff[i] = '--- %s' % diff[i+1][4:]
return ''.join(diff)
@staticmethod
def GetDifferentFiles(cwd, branch=None, branch_head='HEAD'):
"""Returns the list of modified files between two branches."""
if not branch:
branch = GIT.GetUpstreamBranch(cwd)
command = ['diff', '--name-only', branch + "..." + branch_head]
return GIT.Capture(command, cwd=cwd).splitlines(False)
@staticmethod
def GetPatchName(cwd):
"""Constructs a name for this patch."""
short_sha = GIT.Capture(['rev-parse', '--short=4', 'HEAD'], cwd=cwd)
return "%s#%s" % (GIT.GetBranch(cwd), short_sha)
@staticmethod
def GetCheckoutRoot(cwd):
"""Returns the top level directory of a git checkout as an absolute path.
"""
root = GIT.Capture(['rev-parse', '--show-cdup'], cwd=cwd)
return os.path.abspath(os.path.join(cwd, root))
@staticmethod
def GetGitDir(cwd):
return os.path.abspath(GIT.Capture(['rev-parse', '--git-dir'], cwd=cwd))
@staticmethod
def IsInsideWorkTree(cwd):
try:
return GIT.Capture(['rev-parse', '--is-inside-work-tree'], cwd=cwd)
except (OSError, subprocess2.CalledProcessError):
return False
@staticmethod
def GetGitSvnHeadRev(cwd):
"""Gets the most recently pulled git-svn revision."""
try:
output = GIT.Capture(['svn', 'info'], cwd=cwd)
match = re.search(r'^Revision: ([0-9]+)$', output, re.MULTILINE)
return int(match.group(1)) if match else None
except (subprocess2.CalledProcessError, ValueError):
return None
@staticmethod
def ParseGitSvnSha1(output):
"""Parses git-svn output for the first sha1."""
match = re.search(r'[0-9a-fA-F]{40}', output)
return match.group(0) if match else None
@staticmethod
def GetSha1ForSvnRev(cwd, rev):
"""Returns a corresponding git sha1 for a SVN revision."""
if not GIT.IsGitSvn(cwd=cwd):
return None
try:
output = GIT.Capture(['svn', 'find-rev', 'r' + str(rev)], cwd=cwd)
return GIT.ParseGitSvnSha1(output)
except subprocess2.CalledProcessError:
return None
@staticmethod
def GetBlessedSha1ForSvnRev(cwd, rev):
"""Returns a git commit hash from the master branch history that has
accurate .DEPS.git and git submodules. To understand why this is more
complicated than a simple call to `git svn find-rev`, refer to:
http://www.chromium.org/developers/how-tos/git-repo
"""
git_svn_rev = GIT.GetSha1ForSvnRev(cwd, rev)
if not git_svn_rev:
return None
try:
output = GIT.Capture(
['rev-list', '--ancestry-path', '--reverse',
'--grep', 'SVN changes up to revision [0-9]*',
'%s..refs/remotes/origin/master' % git_svn_rev], cwd=cwd)
if not output:
return None
sha1 = output.splitlines()[0]
if not sha1:
return None
output = GIT.Capture(['rev-list', '-n', '1', '%s^1' % sha1], cwd=cwd)
if git_svn_rev != output.rstrip():
raise gclient_utils.Error(sha1)
return sha1
except subprocess2.CalledProcessError:
return None
@staticmethod
def IsValidRevision(cwd, rev, sha_only=False):
"""Verifies the revision is a proper git revision.
sha_only: Fail unless rev is a sha hash.
"""
# 'git rev-parse foo' where foo is *any* 40 character hex string will return
# the string and return code 0. So strip one character to force 'git
# rev-parse' to do a hash table look-up and returns 128 if the hash is not
# present.
lookup_rev = rev
if re.match(r'^[0-9a-fA-F]{40}$', rev):
lookup_rev = rev[:-1]
try:
sha = GIT.Capture(['rev-parse', lookup_rev], cwd=cwd).lower()
if lookup_rev != rev:
# Make sure we get the original 40 chars back.
return rev.lower() == sha
if sha_only:
return sha.startswith(rev.lower())
return True
except subprocess2.CalledProcessError:
return False
@classmethod
def AssertVersion(cls, min_version):
"""Asserts git's version is at least min_version."""
if cls.current_version is None:
current_version = cls.Capture(['--version'], '.')
matched = re.search(r'version ([0-9\.]+)', current_version)
cls.current_version = matched.group(1)
current_version_list = map(only_int, cls.current_version.split('.'))
for min_ver in map(int, min_version.split('.')):
ver = current_version_list.pop(0)
if ver < min_ver:
return (False, cls.current_version)
elif ver > min_ver:
return (True, cls.current_version)
return (True, cls.current_version)
class SVN(object):
current_version = None
@staticmethod
def Capture(args, cwd, **kwargs):
"""Always redirect stderr.
Throws an exception if non-0 is returned.
"""
return subprocess2.check_output(
['svn'] + args, stderr=subprocess2.PIPE, cwd=cwd, **kwargs)
@staticmethod
def RunAndGetFileList(verbose, args, cwd, file_list, stdout=None):
"""Runs svn checkout, update, or status, output to stdout.
The first item in args must be either "checkout", "update", or "status".
svn's stdout is parsed to collect a list of files checked out or updated.
These files are appended to file_list. svn's stdout is also printed to
sys.stdout as in Run.
Args:
verbose: If True, uses verbose output
args: A sequence of command line parameters to be passed to svn.
cwd: The directory where svn is to be run.
Raises:
Error: An error occurred while running the svn command.
"""
stdout = stdout or sys.stdout
if file_list is None:
# Even if our caller doesn't care about file_list, we use it internally.
file_list = []
# svn update and svn checkout use the same pattern: the first three columns
# are for file status, property status, and lock status. This is followed
# by two spaces, and then the path to the file.
update_pattern = '^... (.*)$'
# The first three columns of svn status are the same as for svn update and
# svn checkout. The next three columns indicate addition-with-history,
# switch, and remote lock status. This is followed by one space, and then
# the path to the file.
status_pattern = '^...... (.*)$'
# args[0] must be a supported command. This will blow up if it's something
# else, which is good. Note that the patterns are only effective when
# these commands are used in their ordinary forms, the patterns are invalid
# for "svn status --show-updates", for example.
pattern = {
'checkout': update_pattern,
'status': status_pattern,
'update': update_pattern,
}[args[0]]
compiled_pattern = re.compile(pattern)
# Place an upper limit.
backoff_time = 5
retries = 0
while True:
retries += 1
previous_list_len = len(file_list)
failure = []
def CaptureMatchingLines(line):
match = compiled_pattern.search(line)
if match:
file_list.append(match.group(1))
if line.startswith('svn: '):
failure.append(line)
try:
gclient_utils.CheckCallAndFilterAndHeader(
['svn'] + args,
cwd=cwd,
always=verbose,
filter_fn=CaptureMatchingLines,
stdout=stdout)
except subprocess2.CalledProcessError:
def IsKnownFailure():
for x in failure:
if (x.startswith('svn: OPTIONS of') or
x.startswith('svn: PROPFIND of') or
x.startswith('svn: REPORT of') or
x.startswith('svn: Unknown hostname') or
x.startswith('svn: Server sent unexpected return value') or
x.startswith('svn: Can\'t connect to host')):
return True
return False
# Subversion client is really misbehaving with Google Code.
if args[0] == 'checkout':
# Ensure at least one file was checked out, otherwise *delete* the
# directory.
if len(file_list) == previous_list_len:
if not IsKnownFailure():
# No known svn error was found, bail out.
raise
# No file were checked out, so make sure the directory is
# deleted in case it's messed up and try again.
# Warning: It's bad, it assumes args[2] is the directory
# argument.
if os.path.isdir(args[2]):
gclient_utils.rmtree(args[2])
else:
# Progress was made, convert to update since an aborted checkout
# is now an update.
args = ['update'] + args[1:]
else:
# It was an update or export.
# We enforce that some progress has been made or a known failure.
if len(file_list) == previous_list_len and not IsKnownFailure():
# No known svn error was found and no progress, bail out.
raise
if retries == 10:
raise
print "Sleeping %.1f seconds and retrying...." % backoff_time
time.sleep(backoff_time)
backoff_time *= 1.3
continue
break
@staticmethod
def CaptureRemoteInfo(url):
"""Returns a dictionary from the svn info output for the given url.
Throws an exception if svn info fails.
"""
assert isinstance(url, str)
return SVN._CaptureInfo([url], None)
@staticmethod
def CaptureLocalInfo(files, cwd):
"""Returns a dictionary from the svn info output for the given files.
Throws an exception if svn info fails.
"""
assert isinstance(files, (list, tuple))
return SVN._CaptureInfo(files, cwd)
@staticmethod
def _CaptureInfo(files, cwd):
"""Returns a dictionary from the svn info output for the given file.
Throws an exception if svn info fails."""
result = {}
info = ElementTree.XML(SVN.Capture(['info', '--xml'] + files, cwd))
if info is None:
return result
entry = info.find('entry')
if entry is None:
return result
# Use .text when the item is not optional.
result['Path'] = entry.attrib['path']
rev = entry.attrib['revision']
try:
result['Revision'] = int(rev)
except ValueError:
result['Revision'] = None
result['Node Kind'] = entry.attrib['kind']
# Differs across versions.
if result['Node Kind'] == 'dir':
result['Node Kind'] = 'directory'
result['URL'] = entry.find('url').text
repository = entry.find('repository')
result['Repository Root'] = repository.find('root').text
result['UUID'] = repository.find('uuid')
wc_info = entry.find('wc-info')
if wc_info is not None:
result['Schedule'] = wc_info.find('schedule').text
result['Copied From URL'] = wc_info.find('copy-from-url')
result['Copied From Rev'] = wc_info.find('copy-from-rev')
else:
result['Schedule'] = None
result['Copied From URL'] = None
result['Copied From Rev'] = None
for key in result.keys():
if isinstance(result[key], unicode):
# Unicode results interferes with the higher layers matching up things
# in the deps dictionary.
result[key] = result[key].encode()
# Automatic conversion of optional parameters.
result[key] = getattr(result[key], 'text', result[key])
return result
@staticmethod
def CaptureRevision(cwd):
"""Get the base revision of a SVN repository.
Returns:
Int base revision
"""
return SVN.CaptureLocalInfo([], cwd).get('Revision')
@staticmethod
def CaptureStatus(files, cwd, no_ignore=False):
"""Returns the svn 1.5 svn status emulated output.
@files can be a string (one file) or a list of files.
Returns an array of (status, file) tuples."""
command = ["status", "--xml"]
if no_ignore:
command.append('--no-ignore')
if not files:
pass
elif isinstance(files, basestring):
command.append(files)
else:
command.extend(files)
status_letter = {
None: ' ',
'': ' ',
'added': 'A',
'conflicted': 'C',
'deleted': 'D',
'external': 'X',
'ignored': 'I',
'incomplete': '!',
'merged': 'G',
'missing': '!',
'modified': 'M',
'none': ' ',
'normal': ' ',
'obstructed': '~',
'replaced': 'R',
'unversioned': '?',
}
dom = ElementTree.XML(SVN.Capture(command, cwd))
results = []
if dom is None:
return results
# /status/target/entry/(wc-status|commit|author|date)
for target in dom.findall('target'):
for entry in target.findall('entry'):
file_path = entry.attrib['path']
wc_status = entry.find('wc-status')
# Emulate svn 1.5 status ouput...
statuses = [' '] * 7
# Col 0
xml_item_status = wc_status.attrib['item']
if xml_item_status in status_letter:
statuses[0] = status_letter[xml_item_status]
else:
raise gclient_utils.Error(
'Unknown item status "%s"; please implement me!' %
xml_item_status)
# Col 1
xml_props_status = wc_status.attrib['props']
if xml_props_status == 'modified':
statuses[1] = 'M'
elif xml_props_status == 'conflicted':
statuses[1] = 'C'
elif (not xml_props_status or xml_props_status == 'none' or
xml_props_status == 'normal'):
pass
else:
raise gclient_utils.Error(
'Unknown props status "%s"; please implement me!' %
xml_props_status)
# Col 2
if wc_status.attrib.get('wc-locked') == 'true':
statuses[2] = 'L'
# Col 3
if wc_status.attrib.get('copied') == 'true':
statuses[3] = '+'
# Col 4
if wc_status.attrib.get('switched') == 'true':
statuses[4] = 'S'
# TODO(maruel): Col 5 and 6
item = (''.join(statuses), file_path)
results.append(item)
return results
@staticmethod
def IsMoved(filename, cwd):
"""Determine if a file has been added through svn mv"""
assert isinstance(filename, basestring)
return SVN.IsMovedInfo(SVN.CaptureLocalInfo([filename], cwd))
@staticmethod
def IsMovedInfo(info):
"""Determine if a file has been added through svn mv"""
return (info.get('Copied From URL') and
info.get('Copied From Rev') and
info.get('Schedule') == 'add')
@staticmethod
def GetFileProperty(filename, property_name, cwd):
"""Returns the value of an SVN property for the given file.
Args:
filename: The file to check
property_name: The name of the SVN property, e.g. "svn:mime-type"
Returns:
The value of the property, which will be the empty string if the property
is not set on the file. If the file is not under version control, the
empty string is also returned.
"""
try:
return SVN.Capture(['propget', property_name, filename], cwd)
except subprocess2.CalledProcessError:
return ''
@staticmethod
def GenerateDiff(filenames, cwd, full_move, revision):
"""Returns a string containing the diff for the given file list.
The files in the list should either be absolute paths or relative to the
given root. If no root directory is provided, the repository root will be
used.
The diff will always use relative paths.
"""
assert isinstance(filenames, (list, tuple))
# If the user specified a custom diff command in their svn config file,
# then it'll be used when we do svn diff, which we don't want to happen
# since we want the unified diff.
if SVN.AssertVersion("1.7")[0]:
# On svn >= 1.7, the "--internal-diff" flag will solve this.
return SVN._GenerateDiffInternal(filenames, cwd, full_move, revision,
["diff", "--internal-diff"],
["diff", "--internal-diff"])
else:
# On svn < 1.7, the "--internal-diff" flag doesn't exist. Using
# --diff-cmd=diff doesn't always work, since e.g. Windows cmd users may
# not have a "diff" executable in their path at all. So we use an empty
# temporary directory as the config directory, which bypasses any user
# settings for the diff-cmd. However, we don't pass this for the
# remote_safe_diff_command parameter, since when a new config-dir is
# specified for an svn diff against a remote URL, it triggers
# authentication prompts. In this case there isn't really a good
# alternative to svn 1.7's --internal-diff flag.
bogus_dir = tempfile.mkdtemp()
try:
return SVN._GenerateDiffInternal(filenames, cwd, full_move, revision,
["diff", "--config-dir", bogus_dir],
["diff"])
finally:
gclient_utils.rmtree(bogus_dir)
@staticmethod
def _GenerateDiffInternal(filenames, cwd, full_move, revision, diff_command,
remote_safe_diff_command):
root = os.path.normcase(os.path.join(cwd, ''))
def RelativePath(path, root):
"""We must use relative paths."""
if os.path.normcase(path).startswith(root):
return path[len(root):]
return path
# Cleanup filenames
filenames = [RelativePath(f, root) for f in filenames]
# Get information about the modified items (files and directories)
data = dict((f, SVN.CaptureLocalInfo([f], root)) for f in filenames)
diffs = []
if full_move:
# Eliminate modified files inside moved/copied directory.
for (filename, info) in data.iteritems():
if SVN.IsMovedInfo(info) and info.get("Node Kind") == "directory":
# Remove files inside the directory.
filenames = [f for f in filenames
if not f.startswith(filename + os.path.sep)]
for filename in data.keys():
if not filename in filenames:
# Remove filtered out items.
del data[filename]
else:
metaheaders = []
for (filename, info) in data.iteritems():
if SVN.IsMovedInfo(info):
# for now, the most common case is a head copy,
# so let's just encode that as a straight up cp.
srcurl = info.get('Copied From URL')
file_root = info.get('Repository Root')
rev = int(info.get('Copied From Rev'))
assert srcurl.startswith(file_root)
src = srcurl[len(file_root)+1:]
try:
srcinfo = SVN.CaptureRemoteInfo(srcurl)
except subprocess2.CalledProcessError, e:
if not 'Not a valid URL' in e.stderr:
raise
# Assume the file was deleted. No idea how to figure out at which
# revision the file was deleted.
srcinfo = {'Revision': rev}
if (srcinfo.get('Revision') != rev and
SVN.Capture(remote_safe_diff_command + ['-r', '%d:head' % rev,
srcurl], cwd)):
metaheaders.append("#$ svn cp -r %d %s %s "
"### WARNING: note non-trunk copy\n" %
(rev, src, filename))
else:
metaheaders.append("#$ cp %s %s\n" % (src,
filename))
if metaheaders:
diffs.append("### BEGIN SVN COPY METADATA\n")
diffs.extend(metaheaders)
diffs.append("### END SVN COPY METADATA\n")
# Now ready to do the actual diff.
for filename in sorted(data):
diffs.append(SVN._DiffItemInternal(
filename, cwd, data[filename], diff_command, full_move, revision))
# Use StringIO since it can be messy when diffing a directory move with
# full_move=True.
buf = cStringIO.StringIO()
for d in filter(None, diffs):
buf.write(d)
result = buf.getvalue()
buf.close()
return result
@staticmethod
def _DiffItemInternal(filename, cwd, info, diff_command, full_move, revision):
"""Grabs the diff data."""
command = diff_command + [filename]
if revision:
command.extend(['--revision', revision])
data = None
if SVN.IsMovedInfo(info):
if full_move:
if info.get("Node Kind") == "directory":
# Things become tricky here. It's a directory copy/move. We need to
# diff all the files inside it.
# This will put a lot of pressure on the heap. This is why StringIO
# is used and converted back into a string at the end. The reason to
# return a string instead of a StringIO is that StringIO.write()
# doesn't accept a StringIO object. *sigh*.
for (dirpath, dirnames, filenames) in os.walk(filename):
# Cleanup all files starting with a '.'.
for d in dirnames:
if d.startswith('.'):
dirnames.remove(d)
for f in filenames:
if f.startswith('.'):
filenames.remove(f)
for f in filenames:
if data is None:
data = cStringIO.StringIO()
data.write(GenFakeDiff(os.path.join(dirpath, f)))
if data:
tmp = data.getvalue()
data.close()
data = tmp
else:
data = GenFakeDiff(filename)
else:
if info.get("Node Kind") != "directory":
# svn diff on a mv/cp'd file outputs nothing if there was no change.
data = SVN.Capture(command, cwd)
if not data:
# We put in an empty Index entry so upload.py knows about them.
data = "Index: %s\n" % filename.replace(os.sep, '/')
# Otherwise silently ignore directories.
else:
if info.get("Node Kind") != "directory":
# Normal simple case.
try:
data = SVN.Capture(command, cwd)
except subprocess2.CalledProcessError:
if revision:
data = GenFakeDiff(filename)
else:
raise
# Otherwise silently ignore directories.
return data
@staticmethod
def GetEmail(cwd):
"""Retrieves the svn account which we assume is an email address."""
try:
infos = SVN.CaptureLocalInfo([], cwd)
except subprocess2.CalledProcessError:
return None
# Should check for uuid but it is incorrectly saved for https creds.
root = infos['Repository Root']
realm = root.rsplit('/', 1)[0]
uuid = infos['UUID']
if root.startswith('https') or not uuid:
regexp = re.compile(r'<%s:\d+>.*' % realm)
else:
regexp = re.compile(r'<%s:\d+> %s' % (realm, uuid))
if regexp is None:
return None
if sys.platform.startswith('win'):
if not 'APPDATA' in os.environ:
return None
auth_dir = os.path.join(os.environ['APPDATA'], 'Subversion', 'auth',
'svn.simple')
else:
if not 'HOME' in os.environ:
return None
auth_dir = os.path.join(os.environ['HOME'], '.subversion', 'auth',
'svn.simple')
for credfile in os.listdir(auth_dir):
cred_info = SVN.ReadSimpleAuth(os.path.join(auth_dir, credfile))
if regexp.match(cred_info.get('svn:realmstring')):
return cred_info.get('username')
@staticmethod
def ReadSimpleAuth(filename):
f = open(filename, 'r')
values = {}
def ReadOneItem(item_type):
m = re.match(r'%s (\d+)' % item_type, f.readline())
if not m:
return None
data = f.read(int(m.group(1)))
if f.read(1) != '\n':
return None
return data
while True:
key = ReadOneItem('K')
if not key:
break
value = ReadOneItem('V')
if not value:
break
values[key] = value
return values
@staticmethod
def GetCheckoutRoot(cwd):
"""Returns the top level directory of the current repository.
The directory is returned as an absolute path.
"""
cwd = os.path.abspath(cwd)
try:
info = SVN.CaptureLocalInfo([], cwd)
cur_dir_repo_root = info['Repository Root']
url = info['URL']
except subprocess2.CalledProcessError:
return None
while True:
parent = os.path.dirname(cwd)
try:
info = SVN.CaptureLocalInfo([], parent)
if (info['Repository Root'] != cur_dir_repo_root or
info['URL'] != os.path.dirname(url)):
break
url = info['URL']
except subprocess2.CalledProcessError:
break
cwd = parent
return GetCasedPath(cwd)
@staticmethod
def IsValidRevision(url):
"""Verifies the revision looks like an SVN revision."""
try:
SVN.Capture(['info', url], cwd=None)
return True
except subprocess2.CalledProcessError:
return False
@classmethod
def AssertVersion(cls, min_version):
"""Asserts svn's version is at least min_version."""
if cls.current_version is None:
cls.current_version = cls.Capture(['--version', '--quiet'], None)
current_version_list = map(only_int, cls.current_version.split('.'))
for min_ver in map(int, min_version.split('.')):
ver = current_version_list.pop(0)
if ver < min_ver:
return (False, cls.current_version)
elif ver > min_ver:
return (True, cls.current_version)
return (True, cls.current_version)
@staticmethod
def Revert(cwd, callback=None, ignore_externals=False, no_ignore=False):
"""Reverts all svn modifications in cwd, including properties.
Deletes any modified files or directory.
A "svn update --revision BASE" call is required after to revive deleted
files.
"""
for file_status in SVN.CaptureStatus(None, cwd, no_ignore=no_ignore):
file_path = os.path.join(cwd, file_status[1])
if (ignore_externals and
file_status[0][0] == 'X' and
file_status[0][1:].isspace()):
# Ignore externals.
logging.info('Ignoring external %s' % file_status[1])
continue
# This is the case where '! L .' is returned by 'svn status'. Just
# strip off the '/.'.
if file_path.endswith(os.path.sep + '.'):
file_path = file_path[:-2]
if callback:
callback(file_status)
if os.path.exists(file_path):
# svn revert is really stupid. It fails on inconsistent line-endings,
# on switched directories, etc. So take no chance and delete everything!
# In theory, it wouldn't be necessary for property-only change but then
# it'd have to look for switched directories, etc so it's not worth
# optimizing this use case.
if os.path.isfile(file_path) or os.path.islink(file_path):
logging.info('os.remove(%s)' % file_path)
os.remove(file_path)
elif os.path.isdir(file_path):
logging.info('rmtree(%s)' % file_path)
gclient_utils.rmtree(file_path)
else:
logging.critical(
('No idea what is %s.\nYou just found a bug in gclient'
', please ping [email protected] ASAP!') % file_path)
if (file_status[0][0] in ('D', 'A', '!') or
not file_status[0][1:].isspace()):
# Added, deleted file requires manual intervention and require calling
# revert, like for properties.
if not os.path.isdir(cwd):
# '.' was deleted. It's not worth continuing.
return
try:
SVN.Capture(['revert', file_status[1]], cwd=cwd)
except subprocess2.CalledProcessError:
if not os.path.exists(file_path):
continue
raise
|
bsd-3-clause
|
gae-init/gae-init-docs
|
main/cache.py
|
25
|
1092
|
# coding: utf-8
from google.appengine.api import memcache
import flask
import config
###############################################################################
# Helpers
###############################################################################
def bump_counter(key, time=3600, limit=4):
client = memcache.Client()
for _ in range(limit):
counter = client.gets(key)
if counter is None:
client.set(key, 0, time=time)
counter = 0
if client.cas(key, counter + 1):
break
###############################################################################
# Auth Attempts stuff
###############################################################################
def get_auth_attempt_key():
return 'auth_attempt_%s' % flask.request.remote_addr
def reset_auth_attempt():
client = memcache.Client()
client.set(get_auth_attempt_key(), 0, time=3600)
def get_auth_attempt():
client = memcache.Client()
return client.get(get_auth_attempt_key()) or 0
def bump_auth_attempt():
bump_counter(get_auth_attempt_key(), limit=config.SIGNIN_RETRY_LIMIT)
|
mit
|
gaddman/ansible
|
lib/ansible/utils/module_docs_fragments/lxca_common.py
|
4
|
2411
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their
# own license to the complete work.
#
# Copyright (C) 2017 Lenovo, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
class ModuleDocFragment(object):
# Standard Pylxca documentation fragment
DOCUMENTATION = '''
author:
- Naval Patel (@navalkp)
- Prashant Bhosale (@prabhosa)
options:
login_user:
description:
The username for use in HTTP basic authentication.
required: true
login_password:
description:
The password for use in HTTP basic authentication.
required: true
auth_url:
description:
lxca https full web address
required: true
requirement:
- pylxca
notes:
- Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca)
- Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca)
- Check mode is not supported.
'''
|
gpl-3.0
|
varunr047/homefile
|
homeassistant/components/switch/digital_ocean.py
|
16
|
3005
|
"""
Support for interacting with Digital Ocean droplets.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.digital_ocean/
"""
import logging
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.components.digital_ocean import (
CONF_DROPLETS, ATTR_CREATED_AT, ATTR_DROPLET_ID, ATTR_DROPLET_NAME,
ATTR_FEATURES, ATTR_IPV4_ADDRESS, ATTR_IPV6_ADDRESS, ATTR_MEMORY,
ATTR_REGION, ATTR_VCPUS)
from homeassistant.loader import get_component
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['digital_ocean']
DEFAULT_NAME = 'Droplet'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DROPLETS): vol.All(cv.ensure_list, [cv.string]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Digital Ocean droplet switch."""
digital_ocean = get_component('digital_ocean')
droplets = config.get(CONF_DROPLETS)
dev = []
for droplet in droplets:
droplet_id = digital_ocean.DIGITAL_OCEAN.get_droplet_id(droplet)
dev.append(DigitalOceanSwitch(
digital_ocean.DIGITAL_OCEAN, droplet_id))
add_devices(dev)
class DigitalOceanSwitch(SwitchDevice):
"""Representation of a Digital Ocean droplet switch."""
def __init__(self, do, droplet_id):
"""Initialize a new Digital Ocean sensor."""
self._digital_ocean = do
self._droplet_id = droplet_id
self.data = None
self._state = None
self.update()
@property
def name(self):
"""Return the name of the switch."""
return self.data.name
@property
def is_on(self):
"""Return true if switch is on."""
return self.data.status == 'active'
@property
def state_attributes(self):
"""Return the state attributes of the Digital Ocean droplet."""
return {
ATTR_CREATED_AT: self.data.created_at,
ATTR_DROPLET_ID: self.data.id,
ATTR_DROPLET_NAME: self.data.name,
ATTR_FEATURES: self.data.features,
ATTR_IPV4_ADDRESS: self.data.ip_address,
ATTR_IPV6_ADDRESS: self.data.ip_v6_address,
ATTR_MEMORY: self.data.memory,
ATTR_REGION: self.data.region['name'],
ATTR_VCPUS: self.data.vcpus,
}
def turn_on(self, **kwargs):
"""Boot-up the droplet."""
if self.data.status != 'active':
self.data.power_on()
def turn_off(self, **kwargs):
"""Shutdown the droplet."""
if self.data.status == 'active':
self.data.power_off()
def update(self):
"""Get the latest data from the device and update the data."""
self._digital_ocean.update()
for droplet in self._digital_ocean.data:
if droplet.id == self._droplet_id:
self.data = droplet
|
mit
|
alfa-addon/addon
|
mediaserver/lib/html5lib/treeadapters/sax.py
|
1835
|
1661
|
from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
|
gpl-3.0
|
aexeagmbh/django-allauth
|
allauth/socialaccount/providers/foursquare/views.py
|
71
|
1366
|
import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import FoursquareProvider
class FoursquareOAuth2Adapter(OAuth2Adapter):
provider_id = FoursquareProvider.id
access_token_url = 'https://foursquare.com/oauth2/access_token'
# Issue ?? -- this one authenticates over and over again...
# authorize_url = 'https://foursquare.com/oauth2/authorize'
authorize_url = 'https://foursquare.com/oauth2/authenticate'
profile_url = 'https://api.foursquare.com/v2/users/self'
def complete_login(self, request, app, token, **kwargs):
# Foursquare needs a version number for their API requests as documented here https://developer.foursquare.com/overview/versioning
resp = requests.get(self.profile_url,
params={'oauth_token': token.token, 'v': '20140116'})
extra_data = resp.json()['response']['user']
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(FoursquareOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(FoursquareOAuth2Adapter)
|
mit
|
vaas-krish/openthread
|
tools/harness-automation/autothreadharness/open_thread_controller.py
|
16
|
10545
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import re
import socket
import threading
import time
import serial
from . import settings
__all__ = ['OpenThreadController']
logger = logging.getLogger(__name__)
linesepx = re.compile(r'\r\n|\n')
class OpenThreadController(threading.Thread):
"""This is an simple wrapper to communicate with openthread"""
_lock = threading.Lock()
viewing = False
def __init__(self, port, log=False):
"""Initialize the controller
Args:
port (str): serial port's path or name(windows)
"""
super(OpenThreadController, self).__init__()
self.port = port
self.handle = None
self.lines = []
self._log = log
self._is_net = False
self._init()
def _init(self):
self._connect()
if not self._log:
return
self.start()
def __del__(self):
self.close()
def close(self):
if self.is_alive():
self.viewing = False
self.join()
self._close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _close(self):
if self.handle:
self.handle.close()
self.handle = None
def _connect(self):
logger.debug('My port is %s', self.port)
if self.port.startswith('NET'):
portnum = settings.SER2NET_PORTBASE + int(self.port.split('NET')[1])
logger.debug('My port num is %d', portnum)
address = (settings.SER2NET_HOSTNAME, portnum)
self.handle = socket.create_connection(address)
self.handle.setblocking(0)
self._is_net = True
elif ':' in self.port:
host, port = self.port.split(':')
self.handle = socket.create_connection((host, port))
self.handle.setblocking(0)
self._is_net = True
else:
self.handle = serial.Serial(self.port, 115200, timeout=0, xonxoff=True)
self._is_net = False
def _read(self, size=512):
if self._is_net:
return self.handle.recv(size)
else:
return self.handle.read(size)
def _write(self, data):
if self._is_net:
self.handle.sendall(data)
else:
self.handle.write(data)
def _expect(self, expected, times=50):
"""Find the `expected` line within `times` trials.
Args:
expected str: the expected string
times int: number of trials
"""
logger.debug('[%s] Expecting [%s]', self.port, expected)
retry_times = 10
while times:
if not retry_times:
break
line = self._readline()
if line == expected:
return
if not line:
retry_times -= 1
time.sleep(0.1)
times -= 1
raise Exception('failed to find expected string[%s]' % expected)
def _readline(self):
"""Read exactly one line from the device, nonblocking.
Returns:
None on no data
"""
if len(self.lines) > 1:
return self.lines.pop(0)
tail = ''
if len(self.lines):
tail = self.lines.pop()
try:
tail += self._read()
except socket.error:
logging.exception('No new data')
time.sleep(0.1)
self.lines += linesepx.split(tail)
if len(self.lines) > 1:
return self.lines.pop(0)
def _sendline(self, line):
"""Send exactly one line to the device
Args:
line str: data send to device
"""
self.lines = []
try:
self._read()
except socket.error:
logging.debug('Nothing cleared')
logger.debug('sending [%s]', line)
self._write(line + '\r\n')
# wait for write to complete
time.sleep(0.5)
def _req(self, req):
"""Send command and wait for response.
The command will be repeated 3 times at most in case data loss of serial port.
Args:
req (str): Command to send, please do not include new line in the end.
Returns:
[str]: The output lines
"""
logger.debug('DUT> %s', req)
self._log and self.pause()
times = 3
res = None
while times:
times = times - 1
try:
self._sendline(req)
self._expect(req)
line = None
res = []
while True:
line = self._readline()
logger.debug('Got line %s', line)
if line == 'Done':
break
if line:
res.append(line)
break
except:
logger.exception('Failed to send command')
self.close()
self._init()
self._log and self.resume()
return res
def run(self):
"""Threading callback"""
self.viewing = True
while self.viewing and self._lock.acquire():
try:
line = self._readline()
except:
pass
else:
logger.info(line)
self._lock.release()
time.sleep(0)
def is_started(self):
"""check if openthread is started
Returns:
bool: started or not
"""
state = self._req('state')[0]
return state != 'disabled'
def start(self):
"""Start openthread
"""
self._req('ifconfig up')
self._req('thread start')
def stop(self):
"""Stop openthread
"""
self._req('thread stop')
self._req('ifconfig down')
def reset(self):
"""Reset openthread device, not equivalent to stop and start
"""
logger.debug('DUT> reset')
self._log and self.pause()
self._sendline('reset')
self._read()
self._log and self.resume()
def resume(self):
"""Start dumping logs"""
self._lock.release()
def pause(self):
"""Start dumping logs"""
self._lock.acquire()
@property
def networkname(self):
"""str: Thread network name."""
return self._req('networkname')[0]
@networkname.setter
def networkname(self, value):
self._req('networkname %s' % value)
@property
def mode(self):
"""str: Thread mode."""
return self._req('mode')[0]
@mode.setter
def mode(self, value):
self._req('mode %s' % value)
@property
def mac(self):
"""str: MAC address of the device"""
return self._req('extaddr')[0]
@property
def addrs(self):
"""[str]: IP addresses of the devices"""
return self._req('ipaddr')
@property
def short_addr(self):
"""str: Short address"""
return self._req('rloc16')[0]
@property
def channel(self):
"""int: Channel number of openthread"""
return int(self._req('channel')[0])
@channel.setter
def channel(self, value):
self._req('channel %d' % value)
@property
def panid(self):
"""str: Thread panid"""
return self._req('panid')[0]
@panid.setter
def panid(self, value):
self._req('panid %s' % value)
@property
def extpanid(self):
"""str: Thread extpanid"""
return self._req('extpanid')[0]
@extpanid.setter
def extpanid(self, value):
self._req('extpanid %s' % value)
@property
def child_timeout(self):
"""str: Thread child timeout in seconds"""
return self._req('childtimeout')[0]
@child_timeout.setter
def child_timeout(self, value):
self._req('childtimeout %d' % value)
@property
def version(self):
"""str: Open thread version"""
return self._req('version')[0]
def add_prefix(self, prefix, flags, prf):
"""Add network prefix.
Args:
prefix (str): network prefix.
flags (str): network prefix flags, please refer thread documentation for details
prf (str): network prf, please refer thread documentation for details
"""
self._req('prefix add %s %s %s' % (prefix, flags, prf))
time.sleep(1)
self._req('netdataregister')
def remove_prefix(self, prefix):
"""Remove network prefix.
"""
self._req('prefix remove %s' % prefix)
time.sleep(1)
self._req('netdataregister')
def enable_blacklist(self):
"""Enable blacklist feature"""
self._req('blacklist enable')
def add_blacklist(self, mac):
"""Add a mac address to blacklist"""
self._req('blacklist add %s' % mac)
|
bsd-3-clause
|
ArtsiomCh/tensorflow
|
tensorflow/contrib/grid_rnn/__init__.py
|
179
|
1060
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GridRNN cells
## This package provides classes for GridRNN
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import, line-too-long
from tensorflow.contrib.grid_rnn.python.ops.grid_rnn_cell import *
# pylint: enable=unused-import,wildcard-import,line-too-long
|
apache-2.0
|
philanthropy-u/edx-platform
|
common/lib/xmodule/xmodule/tests/test_poll.py
|
13
|
2371
|
# -*- coding: utf-8 -*-
"""Test for Poll Xmodule functional logic."""
from mock import Mock
from xmodule.poll_module import PollDescriptor
from . import LogicTest
from .test_import import DummySystem
class PollModuleTest(LogicTest):
"""Logic tests for Poll Xmodule."""
shard = 1
descriptor_class = PollDescriptor
raw_field_data = {
'poll_answers': {'Yes': 1, 'Dont_know': 0, 'No': 0},
'voted': False,
'poll_answer': ''
}
def test_bad_ajax_request(self):
# Make sure that answer for incorrect request is error json.
response = self.ajax_request('bad_answer', {})
self.assertDictEqual(response, {'error': 'Unknown Command!'})
def test_good_ajax_request(self):
# Make sure that ajax request works correctly.
response = self.ajax_request('No', {})
poll_answers = response['poll_answers']
total = response['total']
callback = response['callback']
self.assertDictEqual(poll_answers, {'Yes': 1, 'Dont_know': 0, 'No': 1})
self.assertEqual(total, 2)
self.assertDictEqual(callback, {'objectName': 'Conditional'})
self.assertEqual(self.xmodule.poll_answer, 'No')
def test_poll_export_with_unescaped_characters_xml(self):
"""
Make sure that poll_module will export fine if its xml contains
unescaped characters.
"""
module_system = DummySystem(load_error_modules=True)
id_generator = Mock()
id_generator.target_course_id = self.xmodule.course_id
sample_poll_xml = '''
<poll_question display_name="Poll Question">
<p>How old are you?</p>
<answer id="less18">18</answer>
</poll_question>
'''
output = PollDescriptor.from_xml(sample_poll_xml, module_system, id_generator)
# Update the answer with invalid character.
invalid_characters_poll_answer = output.answers[0]
# Invalid less-than character.
invalid_characters_poll_answer['text'] = '< 18'
output.answers[0] = invalid_characters_poll_answer
output.save()
xml = output.definition_to_xml(None)
# Extract texts of all children.
child_texts = xml.xpath('//text()')
# Last index of child_texts contains text of answer tag.
self.assertEqual(child_texts[-1], '< 18')
|
agpl-3.0
|
golismero/golismero-devel
|
thirdparty_libs/nltk/corpus/reader/chunked.py
|
17
|
8145
|
# Natural Language Toolkit: Chunked Corpus Reader
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
A reader for corpora that contain chunked (and optionally tagged)
documents.
"""
import os.path, codecs
import nltk
from nltk.corpus.reader.bracket_parse import BracketParseCorpusReader
from nltk.tree import Tree
from nltk.tokenize import *
from nltk.chunk import tagstr2tree
from util import *
from api import *
class ChunkedCorpusReader(CorpusReader):
"""
Reader for chunked (and optionally tagged) corpora. Paragraphs
are split using a block reader. They are then tokenized into
sentences using a sentence tokenizer. Finally, these sentences
are parsed into chunk trees using a string-to-chunktree conversion
function. Each of these steps can be performed using a default
function or a custom function. By default, paragraphs are split
on blank lines; sentences are listed one per line; and sentences
are parsed into chunk trees using ``nltk.chunk.tagstr2tree``.
"""
def __init__(self, root, fileids, extension='',
str2chunktree=tagstr2tree,
sent_tokenizer=RegexpTokenizer('\n', gaps=True),
para_block_reader=read_blankline_block,
encoding=None):
"""
:param root: The root directory for this corpus.
:param fileids: A list or regexp specifying the fileids in this corpus.
"""
CorpusReader.__init__(self, root, fileids, encoding)
self._cv_args = (str2chunktree, sent_tokenizer, para_block_reader)
"""Arguments for corpus views generated by this corpus: a tuple
(str2chunktree, sent_tokenizer, para_block_tokenizer)"""
def raw(self, fileids=None):
"""
:return: the given file(s) as a single string.
:rtype: str
"""
if fileids is None: fileids = self._fileids
elif isinstance(fileids, basestring): fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def words(self, fileids=None):
"""
:return: the given file(s) as a list of words
and punctuation symbols.
:rtype: list(str)
"""
return concat([ChunkedCorpusView(f, enc, 0, 0, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences or utterances, each encoded as a list of word
strings.
:rtype: list(list(str))
"""
return concat([ChunkedCorpusView(f, enc, 0, 1, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as lists of word strings.
:rtype: list(list(list(str)))
"""
return concat([ChunkedCorpusView(f, enc, 0, 1, 1, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def tagged_words(self, fileids=None):
"""
:return: the given file(s) as a list of tagged
words and punctuation symbols, encoded as tuples
``(word,tag)``.
:rtype: list(tuple(str,str))
"""
return concat([ChunkedCorpusView(f, enc, 1, 0, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def tagged_sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences, each encoded as a list of ``(word,tag)`` tuples.
:rtype: list(list(tuple(str,str)))
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 0, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def tagged_paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as lists of ``(word,tag)`` tuples.
:rtype: list(list(list(tuple(str,str))))
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 1, 0, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def chunked_words(self, fileids=None):
"""
:return: the given file(s) as a list of tagged
words and chunks. Words are encoded as ``(word, tag)``
tuples (if the corpus has tags) or word strings (if the
corpus has no tags). Chunks are encoded as depth-one
trees over ``(word,tag)`` tuples or word strings.
:rtype: list(tuple(str,str) and Tree)
"""
return concat([ChunkedCorpusView(f, enc, 1, 0, 0, 1, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def chunked_sents(self, fileids=None):
"""
:return: the given file(s) as a list of
sentences, each encoded as a shallow Tree. The leaves
of these trees are encoded as ``(word, tag)`` tuples (if
the corpus has tags) or word strings (if the corpus has no
tags).
:rtype: list(Tree)
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 0, 1, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def chunked_paras(self, fileids=None):
"""
:return: the given file(s) as a list of
paragraphs, each encoded as a list of sentences, which are
in turn encoded as a shallow Tree. The leaves of these
trees are encoded as ``(word, tag)`` tuples (if the corpus
has tags) or word strings (if the corpus has no tags).
:rtype: list(list(Tree))
"""
return concat([ChunkedCorpusView(f, enc, 1, 1, 1, 1, *self._cv_args)
for (f, enc) in self.abspaths(fileids, True)])
def _read_block(self, stream):
return [tagstr2tree(t) for t in read_blankline_block(stream)]
class ChunkedCorpusView(StreamBackedCorpusView):
def __init__(self, fileid, encoding, tagged, group_by_sent,
group_by_para, chunked, str2chunktree, sent_tokenizer,
para_block_reader):
StreamBackedCorpusView.__init__(self, fileid, encoding=encoding)
self._tagged = tagged
self._group_by_sent = group_by_sent
self._group_by_para = group_by_para
self._chunked = chunked
self._str2chunktree = str2chunktree
self._sent_tokenizer = sent_tokenizer
self._para_block_reader = para_block_reader
def read_block(self, stream):
block = []
for para_str in self._para_block_reader(stream):
para = []
for sent_str in self._sent_tokenizer.tokenize(para_str):
sent = self._str2chunktree(sent_str)
# If requested, throw away the tags.
if not self._tagged:
sent = self._untag(sent)
# If requested, throw away the chunks.
if not self._chunked:
sent = sent.leaves()
# Add the sentence to `para`.
if self._group_by_sent:
para.append(sent)
else:
para.extend(sent)
# Add the paragraph to `block`.
if self._group_by_para:
block.append(para)
else:
block.extend(para)
# Return the block
return block
def _untag(self, tree):
for i, child in enumerate(tree):
if isinstance(child, Tree):
self._untag(child)
elif isinstance(child, tuple):
tree[i] = child[0]
else:
raise ValueError('expected child to be Tree or tuple')
return tree
|
gpl-2.0
|
pkreissl/espresso
|
testsuite/scripts/samples/test_visualization_elc.py
|
4
|
1698
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
def disable_GUI(code):
# integrate without visualizer
breakpoint = "visualizer.run(1)"
assert breakpoint in code
code = code.replace(breakpoint, "steps=1\nsystem.integrator.run(steps)", 1)
return code
sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@SAMPLES_DIR@/visualization_elc.py",
substitutions=disable_GUI, steps=5000)
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = sample.system
def test_dipole_moment(self):
import espressomd.observables
obs = espressomd.observables.DipoleMoment(ids=self.system.part[:].id)
dipm = obs.calculate()
self.assertLess(dipm[2], 0, msg="charges moved in the wrong direction")
# the dipole moment should be the strongest along the z-axis
self.assertGreater(abs(dipm[2]), abs(dipm[0]))
self.assertGreater(abs(dipm[2]), abs(dipm[1]))
if __name__ == "__main__":
ut.main()
|
gpl-3.0
|
yongju-hong/thrift
|
test/features/util.py
|
41
|
1256
|
import argparse
import socket
from local_thrift import thrift # noqa
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TBufferedTransport, TFramedTransport
from thrift.transport.THttpClient import THttpClient
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from thrift.protocol.TCompactProtocol import TCompactProtocol
from thrift.protocol.TJSONProtocol import TJSONProtocol
def add_common_args(p):
p.add_argument('--host', default='localhost')
p.add_argument('--port', type=int, default=9090)
p.add_argument('--protocol', default='binary')
p.add_argument('--transport', default='buffered')
p.add_argument('--ssl', action='store_true')
def parse_common_args(argv):
p = argparse.ArgumentParser()
add_common_args(p)
return p.parse_args(argv)
def init_protocol(args):
sock = TSocket(args.host, args.port, socket_family=socket.AF_INET)
sock.setTimeout(500)
trans = {
'buffered': TBufferedTransport,
'framed': TFramedTransport,
'http': THttpClient,
}[args.transport](sock)
trans.open()
return {
'binary': TBinaryProtocol,
'compact': TCompactProtocol,
'json': TJSONProtocol,
}[args.protocol](trans)
|
apache-2.0
|
sharmaking/DataApi
|
DataApi_Linux/dataProcess.py
|
7
|
1215
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#dataProcess
import multiprocessing
import dataApi
class CDataProcess(object):
def __init__(self, HOST, PORT, isAllMarket, subStocks, requestType, flag, startTime, endTime):
super(CDataProcess, self).__init__()
self.name = "Socket Data Process"
self.bufferStack = { #每个合约一个堆栈
"Multiple" : multiprocessing.Queue(), #多信号策略
"__SystemMessage__": multiprocessing.Queue() #系统信息
}
self.isAllMarket, self.subStocks, self.requestType, self.flag, self.startTime, self.endTime \
= isAllMarket, subStocks, requestType, flag, startTime, endTime
self.dataSocketServerApi = dataApi.CDataApi(HOST, PORT, self.bufferStack)
self.creatBufferStack()
#创建缓存对象
def creatBufferStack(self):
for stock in self.subStocks:
self.bufferStack[stock] = multiprocessing.Queue()
#开始接收数据
def run(self):
self.dataSocketServerApi.connectServer()
#订阅股票
self.dataSocketServerApi.subscibeStock(self.isAllMarket, self.subStocks)
#请求数据参数
self.dataSocketServerApi.requestData(self.requestType, self.flag, self.startTime, self.endTime)
#开始接收数据
self.dataSocketServerApi.run()
|
mit
|
mattskone/garage_alarm
|
run.py
|
2
|
1439
|
import logging
import os
import shutil
import alerts
import camera
import config
import features
import models
logging.basicConfig(filename=os.path.join(config.INSTALL_DIR, 'app.log'),
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
model = models.get_trained_model()
new_trial_dir = os.path.join(config.INSTALL_DIR, config.NEW_TRIAL_DIR)
c = camera.Camera(new_trial_dir)
new_trial_file_name = c.take_photo()
trial_full_file_name = os.path.join(new_trial_dir, new_trial_file_name)
logger.info('Classifying new trial {0}'.format(new_trial_file_name))
new_trial_features = features.get_features_for_image(trial_full_file_name)
labels = model.predict(new_trial_features)
if labels[0] == 0:
shutil.move(trial_full_file_name,
os.path.join(config.INSTALL_DIR,
config.NEGATIVE_TRIAL_DIR,
new_trial_file_name))
logger.info('Classified negative')
else:
shutil.move(trial_full_file_name,
os.path.join(config.INSTALL_DIR,
config.POSITIVE_TRIAL_DIR,
new_trial_file_name))
alerts.trigger_alert()
logger.info('Classified positive')
if __name__ == '__main__':
main()
|
mit
|
materialsproject/pymatgen
|
pymatgen/analysis/chemenv/utils/tests/test_chemenv_config.py
|
4
|
2031
|
#!/usr/bin/env python
__author__ = "waroquiers"
import os
import shutil
import unittest
from monty.tempfile import ScratchDir
from pymatgen.core import SETTINGS
from pymatgen.analysis.chemenv.utils.chemenv_config import ChemEnvConfig
config_file_dir = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"..",
"..",
"test_files",
"chemenv",
"config",
)
class ChemenvConfigTest(unittest.TestCase):
def test_chemenv_config(self):
with ScratchDir("."):
config = ChemEnvConfig()
if SETTINGS.get("PMG_MAPI_KEY", "") != "":
self.assertTrue(config.has_materials_project_access)
else:
self.assertFalse(config.has_materials_project_access)
package_options = ChemEnvConfig.DEFAULT_PACKAGE_OPTIONS
package_options["default_max_distance_factor"] = 1.8
config = ChemEnvConfig(package_options=package_options)
self.assertEqual(
config.package_options_description(),
"Package options :\n"
" - Maximum distance factor : 1.8000\n"
' - Default strategy is "SimplestChemenvStrategy" :\n'
" Simplest ChemenvStrategy using fixed angle and distance parameters \n"
" for the definition of neighbors in the Voronoi approach. \n"
" The coordination environment is then given as the one with the \n"
" lowest continuous symmetry measure.\n"
" with options :\n"
" - distance_cutoff : 1.4\n"
" - angle_cutoff : 0.3\n"
" - additional_condition : 1\n"
" - continuous_symmetry_measure_cutoff : 10.0\n",
)
config.save(root_dir="tmp_dir")
config = config.auto_load(root_dir="tmp_dir")
self.assertEqual(config.package_options, package_options)
if __name__ == "__main__":
unittest.main()
|
mit
|
subins2000/TorrentBro
|
torrentbro/lib/tpb/constants.py
|
1
|
3066
|
import sys
if sys.version_info >= (3, 0):
class_type = type
else:
from new import classobj
class_type = classobj
class ConstantType(type):
"""
Tree representation metaclass for class attributes. Metaclass is extended
to all child classes too.
"""
def __new__(cls, clsname, bases, dct):
"""
Extend metaclass to all class attributes too.
"""
attrs = {}
for name, attr in dct.items():
if isinstance(attr, class_type):
# substitute attr with a new class with Constants as
# metaclass making it possible to spread this same method
# to all child classes
attr = ConstantType(
attr.__name__, attr.__bases__, attr.__dict__)
attrs[name] = attr
return super(ConstantType, cls).__new__(cls, clsname, bases, attrs)
def __repr__(cls):
"""
Tree representation of class attributes. Child classes are also
represented.
"""
# dump current class name
tree = cls.__name__ + ':\n'
for name in dir(cls):
if not name.startswith('_'):
attr = getattr(cls, name)
output = repr(attr)
if not isinstance(attr, ConstantType):
output = '{}: {}'.format(name, output)
# indent all child attrs
tree += '\n'.join([' ' * 4 +
line for line in output.splitlines()]) + '\n'
return tree
def __str__(cls):
return repr(cls)
Constants = ConstantType('Constants', (object,), {})
class ORDERS(Constants):
class NAME:
DES = 1
ASC = 2
class UPLOADED:
DES = 3
ASC = 4
class SIZE:
DES = 5
ASC = 6
class SEEDERS:
DES = 7
ASC = 8
class LEECHERS:
DES = 9
ASC = 10
class UPLOADER:
DES = 11
ASC = 12
class TYPE:
DES = 13
ASC = 14
class CATEGORIES(Constants):
ALL = 0
class AUDIO:
ALL = 100
MUSIC = 101
AUDIO_BOOKS = 102
SOUND_CLIPS = 103
FLAC = 104
OTHER = 199
class VIDEO:
ALL = 200
MOVIES = 201
MOVIES_DVDR = 202
MUSIC_VIDEOS = 203
MOVIE_CLIPS = 204
TV_SHOWS = 205
HANDHELD = 206
HD_MOVIES = 207
HD_TV_SHOWS = 208
THREE_DIMENSIONS = 209
OTHER = 299
class APPLICATIONS:
ALL = 300
WINDOWS = 301
MAC = 302
UNIX = 303
HANDHELD = 304
IOS = 305
ANDROID = 306
OTHER = 399
class GAMES:
ALL = 400
PC = 401
MAC = 402
PSX = 403
XBOX360 = 404
WII = 405
HANDHELD = 406
IOS = 407
ANDROID = 408
OTHER = 499
class OTHER:
EBOOKS = 601
COMICS = 602
PICTURES = 603
COVERS = 604
PHYSIBLES = 605
OTHER = 699
|
gpl-3.0
|
hashworks/CouchPotatoServer
|
libs/pyutil/humanreadable.py
|
106
|
4483
|
# Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2002-2009 Zooko "Zooko" Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import exceptions, os
from repr import Repr
class BetterRepr(Repr):
def __init__(self):
Repr.__init__(self)
# Note: These levels can get adjusted dynamically! My goal is to get more info when printing important debug stuff like exceptions and stack traces and less info when logging normal events. --Zooko 2000-10-14
self.maxlevel = 6
self.maxdict = 6
self.maxlist = 6
self.maxtuple = 6
self.maxstring = 300
self.maxother = 300
def repr_function(self, obj, level):
if hasattr(obj, 'func_code'):
return '<' + obj.func_name + '() at ' + os.path.basename(obj.func_code.co_filename) + ':' + str(obj.func_code.co_firstlineno) + '>'
else:
return '<' + obj.func_name + '() at (builtin)'
def repr_instance_method(self, obj, level):
if hasattr(obj, 'func_code'):
return '<' + obj.im_class.__name__ + '.' + obj.im_func.__name__ + '() at ' + os.path.basename(obj.im_func.func_code.co_filename) + ':' + str(obj.im_func.func_code.co_firstlineno) + '>'
else:
return '<' + obj.im_class.__name__ + '.' + obj.im_func.__name__ + '() at (builtin)'
def repr_long(self, obj, level):
s = `obj` # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)/2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
if s[-1] == 'L':
return s[:-1]
return s
def repr_instance(self, obj, level):
"""
If it is an instance of Exception, format it nicely (trying to emulate
the format that you see when an exception is actually raised, plus
bracketing '<''s). If it is an instance of dict call self.repr_dict()
on it. If it is an instance of list call self.repr_list() on it. Else
call Repr.repr_instance().
"""
if isinstance(obj, exceptions.Exception):
# Don't cut down exception strings so much.
tms = self.maxstring
self.maxstring = max(512, tms * 4)
tml = self.maxlist
self.maxlist = max(12, tml * 4)
try:
if hasattr(obj, 'args'):
if len(obj.args) == 1:
return '<' + obj.__class__.__name__ + ': ' + self.repr1(obj.args[0], level-1) + '>'
else:
return '<' + obj.__class__.__name__ + ': ' + self.repr1(obj.args, level-1) + '>'
else:
return '<' + obj.__class__.__name__ + '>'
finally:
self.maxstring = tms
self.maxlist = tml
if isinstance(obj, dict):
return self.repr_dict(obj, level)
if isinstance(obj, list):
return self.repr_list(obj, level)
return Repr.repr_instance(self, obj, level)
def repr_list(self, obj, level):
"""
copied from standard repr.py and fixed to work on multithreadedly mutating lists.
"""
if level <= 0: return '[...]'
n = len(obj)
myl = obj[:min(n, self.maxlist)]
s = ''
for item in myl:
entry = self.repr1(item, level-1)
if s: s = s + ', '
s = s + entry
if n > self.maxlist: s = s + ', ...'
return '[' + s + ']'
def repr_dict(self, obj, level):
"""
copied from standard repr.py and fixed to work on multithreadedly mutating dicts.
"""
if level <= 0: return '{...}'
s = ''
n = len(obj)
items = obj.items()[:min(n, self.maxdict)]
items.sort()
for key, val in items:
entry = self.repr1(key, level-1) + ':' + self.repr1(val, level-1)
if s: s = s + ', '
s = s + entry
if n > self.maxdict: s = s + ', ...'
return '{' + s + '}'
# This object can be changed by other code updating this module's "brepr"
# variables. This is so that (a) code can use humanreadable with
# "from humanreadable import hr; hr(mything)", and (b) code can override
# humanreadable to provide application-specific human readable output
# (e.g. libbase32's base32id.AbbrevRepr).
brepr = BetterRepr()
def hr(x):
return brepr.repr(x)
|
gpl-3.0
|
multipath-tcp/mptcp_3.0.x
|
tools/perf/scripts/python/netdev-times.py
|
11271
|
15048
|
# Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
gpl-2.0
|
spallavolu/scikit-learn
|
sklearn/cluster/setup.py
|
263
|
1449
|
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
jbedorf/tensorflow
|
tensorflow/python/training/momentum_test.py
|
18
|
27623
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class MomentumOptimizerTest(test.TestCase):
def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):
var = var + accum * lr * momentum
accum = accum * momentum + g
var = var - lr * accum
var = var - accum * lr * momentum
return var, accum
def doTestBasic(self, use_resource=False, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype, name="var1_%d" % i)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 2.0
momentum = lambda: 0.9
if not use_callable_params:
learning_rate = learning_rate()
momentum = momentum()
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
if not context.executing_eagerly():
self.assertFalse(slot0 in variables.trainable_variables())
self.assertFalse(slot1 in variables.trainable_variables())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
if not context.executing_eagerly():
self.evaluate(mom_update)
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
if context.executing_eagerly():
mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
else:
self.evaluate(mom_update)
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
def testVariablesAcrossGraphs(self):
optimizer = momentum_lib.MomentumOptimizer(0.01, 0.5)
with ops.Graph().as_default():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtypes.float32, name="var0")
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtypes.float32, name="var1")
loss = math_ops.reduce_sum(var0 + var1)
optimizer.minimize(loss)
optimizer_variables = optimizer.variables()
self.assertStartsWith(optimizer_variables[0].name, "var0")
self.assertStartsWith(optimizer_variables[1].name, "var1")
self.assertEquals(2, len(optimizer_variables))
with ops.Graph().as_default():
var2 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtypes.float32, name="var2")
var3 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtypes.float32, name="var3")
loss = math_ops.reduce_sum(var2 + var3)
optimizer.minimize(loss)
optimizer_variables = optimizer.variables()
self.assertStartsWith(optimizer_variables[0].name, "var2")
self.assertStartsWith(optimizer_variables[1].name, "var3")
self.assertEquals(2, len(optimizer_variables))
@test_util.run_deprecated_v1
def testNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
cost = 5 * var0 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name="global_step")
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
opt_op = mom_op.minimize(cost, global_step, [var0, var1])
variables.global_variables_initializer().run()
for t in range(1, 5):
opt_op.run()
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
self.assertAllClose(var0_np, self.evaluate(var0))
self.assertAllClose(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
grads = []
for t in range(1, 5):
grads.append(var0_np * 10)
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
loss = 5 * var0 * var0 + 3 * var1
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
x_feed = array_ops.placeholder(dtype)
y_feed = ops.IndexedSlices(
x_feed, constant_op.constant([0, 1]), constant_op.constant([2]))
grads_and_vars = [(y_feed, var0), (constant_op.constant(
[3.0, 3.0], dtype=dtype), var1)]
opt_update = mom_op.apply_gradients(grads_and_vars)
variables.global_variables_initializer().run()
for t in range(1, 5):
opt_update.run(feed_dict={x_feed: grads[t - 1]})
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
self.assertAllClose(var0_np, self.evaluate(var0))
self.assertAllClose(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# This test invokes the ResourceSparseApplyMomentum operation, which
# did not have a registered GPU kernel as of April 2018. With graph
# execution, the placement algorithm notices this and automatically
# places the variable in CPU (host) memory. With eager execution,
# the variable would be placed in GPU memory if available, which
# would then conflict with the future invocation of the
# ResourceSparseApplyMomentum operation.
# To work around this discrepancy, for now we force the variable
# to be placed on CPU.
with ops.device("/cpu:0"):
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
# pylint: disable=cell-var-from-loop
def loss():
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
return pred * pred
# pylint: enable=cell-var-from-loop
opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0)
sgd_op = opt.minimize(loss)
self.evaluate(variables.global_variables_initializer())
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0))
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testMinimizeWith2DIndiciesForEmbeddingLookup(self):
# This test invokes the ResourceSparseApplyMomentum operation, which
# did not have a registered GPU kernel as of April 2018. With graph
# execution, the placement algorithm notices this and automatically
# places the variable in CPU (host) memory. With eager execution,
# the variable would be placed in GPU memory if available, which
# would then conflict with the future invocation of the
# ResourceSparseApplyMomentum operation.
# To work around this discrepancy, for now we force the variable
# to be placed on CPU.
with ops.device("/cpu:0"):
var0 = resource_variable_ops.ResourceVariable(array_ops.ones([2, 2]))
def loss():
return math_ops.reduce_sum(embedding_ops.embedding_lookup(var0, [[1]]))
opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0)
sgd_op = opt.minimize(loss)
self.evaluate(variables.global_variables_initializer())
self.evaluate(sgd_op)
self.assertAllCloseAccordingToType([[1, 1], [0, 0]], self.evaluate(var0))
@test_util.run_deprecated_v1
def testTensorLearningRateAndMomentum(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=constant_op.constant(2.0),
momentum=constant_op.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
def _dbParamsMom01(self):
"""Return dist-belief momentum values.
Return values been generated from the dist-belief momentum unittest,
running with a learning rate of 0.1 and a momentum of 0.1.
These values record how a parameter vector of size 10, initialized with 0.0,
gets updated with 10 consecutive momentum steps. It uses random gradients.
Returns:
db_grad: The gradients to apply
db_out: The parameters after the momentum update.
"""
db_grad = [[]] * 10
db_out = [[]] * 10
# pylint: disable=line-too-long
db_grad[0] = [
0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018,
0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615
]
db_out[0] = [
-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018,
-0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618
]
db_grad[1] = [
0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378,
0.5513742, 0.94687688, 0.16012503, 0.22159521
]
db_out[1] = [
-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884,
-0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544
]
db_grad[2] = [
0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965,
0.31168157, 0.43203235, 0.16792089, 0.24644311
]
db_out[2] = [
-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978,
-0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189
]
db_grad[3] = [
0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098,
0.81454384, 0.03848977, 0.89759839, 0.93665648
]
db_out[3] = [
-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105,
-0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303
]
db_grad[4] = [
0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359,
0.69107032, 0.81897682, 0.5433259, 0.67860287
]
db_out[4] = [
-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165,
-0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544
]
db_grad[5] = [
0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563,
0.84163809, 0.41172323, 0.83259648, 0.44941229
]
db_out[5] = [
-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094,
-0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717
]
db_grad[6] = [
0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221,
0.73577434, 0.16014607, 0.57500273, 0.071136251
]
db_out[6] = [
-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685,
-0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997
]
db_grad[7] = [
0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646,
0.74053431, 0.16033, 0.66625422, 0.73515922
]
db_out[7] = [
-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838,
-0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418
]
db_grad[8] = [
0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039,
0.55561525, 0.22567581, 0.93331909, 0.29438227
]
db_out[8] = [
-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527,
-0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781
]
db_grad[9] = [
0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893,
0.68593478, 0.50580865, 0.12602448, 0.093537711
]
db_out[9] = [
-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302,
-0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295
]
# pylint: enable=line-too-long
return db_grad, db_out
@test_util.run_deprecated_v1
def testLikeDistBeliefMom01(self):
with self.cached_session():
db_grad, db_out = self._dbParamsMom01()
num_samples = len(db_grad)
var0 = variables.Variable([0.0] * num_samples)
grads0 = constant_op.constant([0.0] * num_samples)
mom_opt = momentum_lib.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
mom_update = mom_opt.apply_gradients(zip([grads0], [var0]))
variables.global_variables_initializer().run()
for i in xrange(num_samples):
mom_update.run(feed_dict={grads0: db_grad[i]})
self.assertAllClose(np.array(db_out[i]), self.evaluate(var0))
@test_util.run_deprecated_v1
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable(array_ops.zeros([4, 2], dtype=dtype))
var1 = variables.Variable(constant_op.constant(1.0, dtype, [4, 2]))
grads0 = ops.IndexedSlices(
constant_op.constant(
[[.1, .1]], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([4, 2]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[[.01, .01], [.01, .01]], dtype=dtype),
constant_op.constant([2, 3]),
constant_op.constant([4, 2]))
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([0, 0], self.evaluate(var0)[0])
self.assertAllClose([0, 0], self.evaluate(var0)[1])
self.assertAllClose([1, 1], self.evaluate(var1)[2])
# Step 1: the momentum accumulators are 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0, 0]),
self.evaluate(slot0)[0])
self.assertAllCloseAccordingToType(
np.array([.1, .1]),
self.evaluate(slot0)[1])
self.assertAllCloseAccordingToType(
np.array([.01, .01]),
self.evaluate(slot1)[2])
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([0, 0]),
self.evaluate(var0)[0])
self.assertAllCloseAccordingToType(
np.array([-(0.1 * 2.0), -(0.1 * 2.0)]),
self.evaluate(var0)[1])
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]),
self.evaluate(var1)[2])
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), self.evaluate(slot0)[0])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0)[1])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1)[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), self.evaluate(var0)[0])
self.assertAllCloseAccordingToType(
np.array([
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]),
self.evaluate(var0)[1])
self.assertAllCloseAccordingToType(
np.array([
0.98 - ((0.9 * 0.01 + 0.01) * 2.0),
0.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]),
self.evaluate(var1)[2])
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update1 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
mom_update2 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update1.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the second momentum accumulators contain the previous update.
mom_update2.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
ilya-epifanov/ansible
|
contrib/inventory/ssh_config.py
|
41
|
3977
|
#!/usr/bin/env python
# (c) 2014, Tomas Karasek <[email protected]>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Dynamic inventory script which lets you use aliases from ~/.ssh/config.
#
# There were some issues with various Paramiko versions. I took a deeper look
# and tested heavily. Now, ansible parses this alright with Paramiko versions
# 1.7.2 to 1.15.2.
#
# It prints inventory based on parsed ~/.ssh/config. You can refer to hosts
# with their alias, rather than with the IP or hostname. It takes advantage
# of the ansible_ssh_{host,port,user,private_key_file}.
#
# If you have in your .ssh/config:
# Host git
# HostName git.domain.org
# User tkarasek
# IdentityFile /home/tomk/keys/thekey
#
# You can do
# $ ansible git -m ping
#
# Example invocation:
# ssh_config.py --list
# ssh_config.py --host <alias>
import argparse
import os.path
import sys
import paramiko
try:
import json
except ImportError:
import simplejson as json
SSH_CONF = '~/.ssh/config'
_key = 'ssh_config'
_ssh_to_ansible = [('user', 'ansible_ssh_user'),
('hostname', 'ansible_ssh_host'),
('identityfile', 'ansible_ssh_private_key_file'),
('port', 'ansible_ssh_port')]
def get_config():
if not os.path.isfile(os.path.expanduser(SSH_CONF)):
return {}
with open(os.path.expanduser(SSH_CONF)) as f:
cfg = paramiko.SSHConfig()
cfg.parse(f)
ret_dict = {}
for d in cfg._config:
if type(d['host']) is list:
alias = d['host'][0]
else:
alias = d['host']
if ('?' in alias) or ('*' in alias):
continue
_copy = dict(d)
del _copy['host']
if 'config' in _copy:
ret_dict[alias] = _copy['config']
else:
ret_dict[alias] = _copy
return ret_dict
def print_list():
cfg = get_config()
meta = {'hostvars': {}}
for alias, attributes in cfg.items():
tmp_dict = {}
for ssh_opt, ans_opt in _ssh_to_ansible:
if ssh_opt in attributes:
# If the attribute is a list, just take the first element.
# Private key is returned in a list for some reason.
attr = attributes[ssh_opt]
if type(attr) is list:
attr = attr[0]
tmp_dict[ans_opt] = attr
if tmp_dict:
meta['hostvars'][alias] = tmp_dict
print json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta})
def print_host(host):
cfg = get_config()
print json.dumps(cfg[host])
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script parsing .ssh/config')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from .ssh/config inventory'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])
|
gpl-3.0
|
mynext/svgedit
|
build/tools/ship.py
|
62
|
4264
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ship.py
#
# Licensed under the Apache 2 License as is the rest of the project
# Copyright (c) 2011 Jeff Schiller
#
# This script has very little real-world application. It is only used in our pure-client web app
# served on GoogleCode so we can have one HTML file, run a build script and generate a 'release'
# version without having to maintain two separate HTML files. It does this by evaluating
# 'processing comments' that are suspicously similar to IE conditional comments and then outputting
# a new HTML file after evaluating particular variables.
#
# This script takes the following inputs:
#
# * a HTML file (--i=in.html)
# * a series of flag names (--on=Foo --on=Bar)
#
# Example:
#
# in.html:
# <!--{if foo}>
# FOO!
# <!{else}-->
# BAR!
# <!--{endif}-->
#
# $ ship.py --i in.html --on foo
#
# out.html:
# <!--{if foo}-->
# FOO!
# <!--{else}>
# BAR!
# <!{endif}-->
#
# It has the following limitations:
#
# 1) Only if-else-endif are currently supported.
# 2) All processing comments must be on one line with no other non-whitespace characters.
# 3) Comments cannot be nested.
import optparse
import os
inside_if = False
last_if_true = False
_options_parser = optparse.OptionParser(
usage='%prog --i input.html [--on flag1]',
description=('Rewrites an HTML file based on conditional comments and flags'))
_options_parser.add_option('--i',
action='store', dest='input_html_file', help='Input HTML filename')
_options_parser.add_option('--on',
action='append', type='string', dest='enabled_flags',
help='name of flag to enable')
def parse_args(args=None):
options, rargs = _options_parser.parse_args(args)
return options, (None, None)
def parseComment(line, line_num, enabled_flags):
global inside_if
global last_if_true
start = line.find('{')
end = line.find('}')
statement = line[start+1:end].strip()
if statement.startswith('if '):
if inside_if == True:
print 'Fatal Error: Nested {if} found on line ' + str(line_num)
print line
quit()
# Evaluate whether the expression is true/false.
# only one variable name allowed for now
variable_name = statement[3:].strip()
if variable_name in enabled_flags:
last_if_true = True
line = '<!--{if ' + variable_name + '}-->'
else:
last_if_true = False
line = '<!--{if ' + variable_name + '}>'
inside_if = True
elif statement == 'else':
if inside_if == False:
print 'Fatal Error: {else} found without {if} on line ' + str(line_num)
print line
quit()
if inside_if == 'else':
print 'Fatal Error: Multiple {else} clauses found in the same if on line ' + str(line_num)
print line
quit()
if last_if_true:
line = '<!--{else}>'
else:
line = '<!{else}-->'
# invert the logic so the endif clause is closed properly
last_if_true = not last_if_true
# ensure we don't have two else statements in the same if
inside_if = 'else'
elif statement == 'endif':
if inside_if == False:
print 'Fatal Error: {endif} found without {if} on line ' + str(line_num)
print line
quit()
if last_if_true:
line = '<!--{endif}-->'
else:
line = '<!{endif}-->'
inside_if = False
return line
def ship(inFileName, enabled_flags):
# read in HTML file
lines = file(inFileName, 'r').readlines()
out_lines = []
i = 0
# loop for each line of markup
for line in lines:
strline = line.strip()
# if we find a comment, process it and print out
if strline.startswith('<!--{') or strline.startswith('<!{'):
# using the same indentation as the previous line
start = line.find('<')
out_lines.append(line[:start] \
+ parseComment(strline, i, enabled_flags) \
+ os.linesep)
else: # else append line to the output list
out_lines.append(line)
i += 1
return ''.join(out_lines)
if __name__ == '__main__':
options, (input, output) = parse_args()
if options.input_html_file != None:
enabled_flags = []
if options.enabled_flags != None:
enabled_flags.extend(options.enabled_flags)
out_file = ship(options.input_html_file, enabled_flags)
print out_file
|
mit
|
jackylee0424/dfr
|
tornado/test/simple_httpclient_test.py
|
1
|
16087
|
from __future__ import absolute_import, division, print_function, with_statement
import collections
from contextlib import closing
import errno
import gzip
import logging
import os
import re
import socket
import sys
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders
from tornado.ioloop import IOLoop
from tornado.log import gen_log
from tornado.simple_httpclient import SimpleAsyncHTTPClient, _DEFAULT_CA_CERTS
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler
from tornado.test import httpclient_test
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.web import RequestHandler, Application, asynchronous, url
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(io_loop=self.io_loop,
force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@asynchronous
def get(self):
logging.debug("queuing trigger")
self.queue.append(self.finish)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
class HangHandler(RequestHandler):
@asynchronous
def get(self):
pass
class ContentLengthHandler(RequestHandler):
def get(self):
self.set_header("Content-Length", self.get_argument("value"))
self.write("ok")
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class OptionsHandler(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
class NoContentHandler(RequestHandler):
def get(self):
if self.get_argument("error", None):
self.set_header("Content-Length", "7")
self.set_status(204)
class SeeOtherPostHandler(RequestHandler):
def post(self):
redirect_code = int(self.request.body)
assert redirect_code in (302, 303), "unexpected body %r" % self.request.body
self.set_header("Location", "/see_other_get")
self.set_status(redirect_code)
class SeeOtherGetHandler(RequestHandler):
def get(self):
if self.request.body:
raise Exception("unexpected body %r" % self.request.body)
self.write("ok")
class HostEchoHandler(RequestHandler):
def get(self):
self.write(self.request.headers["Host"])
class SimpleHTTPClientTestMixin(object):
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/options", OptionsHandler),
url("/no_content", NoContentHandler),
url("/see_other_post", SeeOtherPostHandler),
url("/see_other_get", SeeOtherGetHandler),
url("/host_echo", HostEchoHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is
SimpleAsyncHTTPClient(self.io_loop))
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(self.io_loop,
force_instance=True))
# different IOLoops use different objects
with closing(IOLoop()) as io_loop2:
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(io_loop2))
def test_connection_limit(self):
with closing(self.create_client(max_clients=2)) as client:
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger"),
lambda response, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/countdown/3'), self.stop,
max_redirects=3)
response = self.wait()
response.rethrow()
def test_default_certificates_exist(self):
open(_DEFAULT_CA_CERTS).close()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b"asdfqwer")
# Our test data gets bigger when gzipped. Oops. :)
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b"asdfqwer")
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effective_url.endswith("/countdown/2"))
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
def test_header_reuse(self):
# Apps may reuse a headers object if they are only passing in constant
# headers like user-agent. The header object should not be modified.
headers = HTTPHeaders({'User-Agent': 'Foo'})
self.fetch("/hello", headers=headers)
self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')])
def test_see_other_redirect(self):
for code in (302, 303):
response = self.fetch("/see_other_post", method="POST", body="%d" % code)
self.assertEqual(200, response.code)
self.assertTrue(response.request.url.endswith("/see_other_post"))
self.assertTrue(response.effective_url.endswith("/see_other_get"))
# request is the original request, is a POST still
self.assertEqual("POST", response.request.method)
@skipOnTravis
def test_request_timeout(self):
response = self.fetch('/trigger?wake=false', request_timeout=0.1)
self.assertEqual(response.code, 599)
self.assertTrue(0.099 < response.request_time < 0.15, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
# trigger the hanging request to let it clean up after itself
self.triggers.popleft()()
@unittest.skipIf(not socket.has_ipv6, 'ipv6 support not present')
def test_ipv6(self):
try:
self.http_server.listen(self.get_http_port(), address='::1')
except socket.gaierror as e:
if e.args[0] == socket.EAI_ADDRFAMILY:
# python supports ipv6, but it's not configured on the network
# interface, so skip this test.
return
raise
url = self.get_url("/hello").replace("localhost", "[::1]")
# ipv6 is currently disabled by default and must be explicitly requested
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(response.code, 599)
self.http_client.fetch(url, self.stop, allow_ipv6=True)
response = self.wait()
self.assertEqual(response.body, b"Hello world!")
def test_multiple_content_length_accepted(self):
response = self.fetch("/content_length?value=2,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,%202,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,4")
self.assertEqual(response.code, 599)
response = self.fetch("/content_length?value=2,%202,3")
self.assertEqual(response.code, 599)
def test_head_request(self):
response = self.fetch("/head", method="HEAD")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "7")
self.assertFalse(response.body)
def test_options_request(self):
response = self.fetch("/options", method="OPTIONS")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "2")
self.assertEqual(response.headers["access-control-allow-origin"], "*")
self.assertEqual(response.body, b"ok")
def test_no_content(self):
response = self.fetch("/no_content")
self.assertEqual(response.code, 204)
# 204 status doesn't need a content-length, but tornado will
# add a zero content-length anyway.
self.assertEqual(response.headers["Content-length"], "0")
# 204 status with non-zero content length is malformed
response = self.fetch("/no_content?error=1")
self.assertEqual(response.code, 599)
def test_host_header(self):
host_re = re.compile(b"^localhost:[0-9]+$")
response = self.fetch("/host_echo")
self.assertTrue(host_re.match(response.body))
url = self.get_url("/host_echo").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertTrue(host_re.match(response.body), response.body)
def test_connection_refused(self):
server_socket, port = bind_unused_port()
server_socket.close()
with ExpectLog(gen_log, ".*", required=False):
self.http_client.fetch("http://localhost:%d/" % port, self.stop)
response = self.wait()
self.assertEqual(599, response.code)
if sys.platform != 'cygwin':
# cygwin returns EPERM instead of ECONNREFUSED here
self.assertTrue(str(errno.ECONNREFUSED) in str(response.error),
response.error)
# This is usually "Connection refused".
# On windows, strerror is broken and returns "Unknown error".
expected_message = os.strerror(errno.ECONNREFUSED)
self.assertTrue(expected_message in str(response.error),
response.error)
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
def setUp(self):
super(SimpleHTTPClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
**kwargs)
class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase):
def setUp(self):
super(SimpleHTTPSClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
defaults=dict(validate_cert=False),
**kwargs)
class CreateAsyncHTTPClientTestCase(AsyncTestCase):
def setUp(self):
super(CreateAsyncHTTPClientTestCase, self).setUp()
self.saved = AsyncHTTPClient._save_configuration()
def tearDown(self):
AsyncHTTPClient._restore_configuration(self.saved)
super(CreateAsyncHTTPClientTestCase, self).tearDown()
def test_max_clients(self):
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 10)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=11, force_instance=True)) as client:
self.assertEqual(client.max_clients, 11)
# Now configure max_clients statically and try overriding it
# with each way max_clients can be passed
AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 12)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=13, force_instance=True)) as client:
self.assertEqual(client.max_clients, 13)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=14, force_instance=True)) as client:
self.assertEqual(client.max_clients, 14)
class HTTP100ContinueTestCase(AsyncHTTPTestCase):
def respond_100(self, request):
self.request = request
self.request.connection.stream.write(
b"HTTP/1.1 100 CONTINUE\r\n\r\n",
self.respond_200)
def respond_200(self):
self.request.connection.stream.write(
b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA",
self.request.connection.stream.close)
def get_app(self):
# Not a full Application, but works as an HTTPServer callback
return self.respond_100
def test_100_continue(self):
res = self.fetch('/')
self.assertEqual(res.body, b'A')
class HostnameMappingTestCase(AsyncHTTPTestCase):
def setUp(self):
super(HostnameMappingTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
hostname_mapping={
'www.example.com': '127.0.0.1',
('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()),
})
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_hostname_mapping(self):
self.http_client.fetch(
'http://www.example.com:%d/hello' % self.get_http_port(), self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
def test_port_mapping(self):
self.http_client.fetch('http://foo.example.com:8000/hello', self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
|
mit
|
asgard-lab/neutron
|
neutron/agent/l3/link_local_allocator.py
|
23
|
2237
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron.agent.l3.item_allocator import ItemAllocator
class LinkLocalAddressPair(netaddr.IPNetwork):
def __init__(self, addr):
super(LinkLocalAddressPair, self).__init__(addr)
def get_pair(self):
"""Builds an address pair from the first and last addresses. """
# TODO(kevinbenton): the callers of this seem only interested in an IP,
# so we should just return two IPAddresses.
return (netaddr.IPNetwork("%s/%s" % (self.network, self.prefixlen)),
netaddr.IPNetwork("%s/%s" % (self[-1], self.prefixlen)))
class LinkLocalAllocator(ItemAllocator):
"""Manages allocation of link local IP addresses.
These link local addresses are used for routing inside the fip namespaces.
The associations need to persist across agent restarts to maintain
consistency. Without this, there is disruption in network connectivity
as the agent rewires the connections with the new IP address assocations.
Persisting these in the database is unnecessary and would degrade
performance.
"""
def __init__(self, data_store_path, subnet):
"""Create the necessary pool and item allocator
using ',' as the delimiter and LinkLocalAllocator as the
class type
"""
subnet = netaddr.IPNetwork(subnet)
pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31))
super(LinkLocalAllocator, self).__init__(data_store_path,
LinkLocalAddressPair,
pool)
|
apache-2.0
|
zouyapeng/horizon-newtouch
|
openstack_dashboard/dashboards/project/instances/urls.py
|
7
|
1896
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.instances import views
INSTANCES = r'^(?P<instance_id>[^/]+)/%s$'
INSTANCES_KEYPAIR = r'^(?P<instance_id>[^/]+)/(?P<keypair_name>[^/]+)/%s$'
VIEW_MOD = 'openstack_dashboard.dashboards.project.instances.views'
urlpatterns = patterns(VIEW_MOD,
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^launch$', views.LaunchInstanceView.as_view(), name='launch'),
url(r'^(?P<instance_id>[^/]+)/$',
views.DetailView.as_view(), name='detail'),
url(INSTANCES % 'update', views.UpdateView.as_view(), name='update'),
url(INSTANCES % 'rebuild', views.RebuildView.as_view(), name='rebuild'),
url(INSTANCES % 'console', 'console', name='console'),
url(INSTANCES % 'vnc', 'vnc', name='vnc'),
url(INSTANCES % 'spice', 'spice', name='spice'),
url(INSTANCES % 'rdp', 'rdp', name='rdp'),
url(INSTANCES % 'resize', views.ResizeView.as_view(), name='resize'),
url(INSTANCES_KEYPAIR % 'decryptpassword',
views.DecryptPasswordView.as_view(), name='decryptpassword'),
)
|
apache-2.0
|
dparlevliet/zelenka-report-storage
|
server-db/twisted/trial/_asynctest.py
|
8
|
14387
|
# -*- test-case-name: twisted.trial.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Things likely to be used by writers of unit tests.
Maintainer: Jonathan Lange
"""
from __future__ import division, absolute_import
import warnings
from zope.interface import implementer
# We can't import reactor at module-level because this code runs before trial
# installs a user-specified reactor, installing the default reactor and
# breaking reactor installation. See also #6047.
from twisted.internet import defer, utils
from twisted.python import failure
from twisted.trial import itrial, util
from twisted.trial._synctest import (
FailTest, SkipTest, SynchronousTestCase)
_wait_is_running = []
@implementer(itrial.ITestCase)
class TestCase(SynchronousTestCase):
"""
A unit test. The atom of the unit testing universe.
This class extends L{SynchronousTestCase} which extends C{unittest.TestCase}
from the standard library. The main feature is the ability to return
C{Deferred}s from tests and fixture methods and to have the suite wait for
those C{Deferred}s to fire. Also provides new assertions such as
L{assertFailure}.
@ivar timeout: A real number of seconds. If set, the test will
raise an error if it takes longer than C{timeout} seconds.
If not set, util.DEFAULT_TIMEOUT_DURATION is used.
"""
def __init__(self, methodName='runTest'):
"""
Construct an asynchronous test case for C{methodName}.
@param methodName: The name of a method on C{self}. This method should
be a unit test. That is, it should be a short method that calls some of
the assert* methods. If C{methodName} is unspecified,
L{SynchronousTestCase.runTest} will be used as the test method. This is
mostly useful for testing Trial.
"""
super(TestCase, self).__init__(methodName)
def assertFailure(self, deferred, *expectedFailures):
"""
Fail if C{deferred} does not errback with one of C{expectedFailures}.
Returns the original Deferred with callbacks added. You will need
to return this Deferred from your test case.
"""
def _cb(ignore):
raise self.failureException(
"did not catch an error, instead got %r" % (ignore,))
def _eb(failure):
if failure.check(*expectedFailures):
return failure.value
else:
output = ('\nExpected: %r\nGot:\n%s'
% (expectedFailures, str(failure)))
raise self.failureException(output)
return deferred.addCallbacks(_cb, _eb)
failUnlessFailure = assertFailure
def _run(self, methodName, result):
from twisted.internet import reactor
timeout = self.getTimeout()
def onTimeout(d):
e = defer.TimeoutError("%r (%s) still running at %s secs"
% (self, methodName, timeout))
f = failure.Failure(e)
# try to errback the deferred that the test returns (for no gorram
# reason) (see issue1005 and test_errorPropagation in
# test_deferred)
try:
d.errback(f)
except defer.AlreadyCalledError:
# if the deferred has been called already but the *back chain
# is still unfinished, crash the reactor and report timeout
# error ourself.
reactor.crash()
self._timedOut = True # see self._wait
todo = self.getTodo()
if todo is not None and todo.expected(f):
result.addExpectedFailure(self, f, todo)
else:
result.addError(self, f)
onTimeout = utils.suppressWarnings(
onTimeout, util.suppress(category=DeprecationWarning))
method = getattr(self, methodName)
d = defer.maybeDeferred(
utils.runWithWarningsSuppressed, self._getSuppress(), method)
call = reactor.callLater(timeout, onTimeout, d)
d.addBoth(lambda x : call.active() and call.cancel() or x)
return d
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
def deferSetUp(self, ignored, result):
d = self._run('setUp', result)
d.addCallbacks(self.deferTestMethod, self._ebDeferSetUp,
callbackArgs=(result,),
errbackArgs=(result,))
return d
def _ebDeferSetUp(self, failure, result):
if failure.check(SkipTest):
result.addSkip(self, self._getSkipReason(self.setUp, failure.value))
else:
result.addError(self, failure)
if failure.check(KeyboardInterrupt):
result.stop()
return self.deferRunCleanups(None, result)
def deferTestMethod(self, ignored, result):
d = self._run(self._testMethodName, result)
d.addCallbacks(self._cbDeferTestMethod, self._ebDeferTestMethod,
callbackArgs=(result,),
errbackArgs=(result,))
d.addBoth(self.deferRunCleanups, result)
d.addBoth(self.deferTearDown, result)
return d
def _cbDeferTestMethod(self, ignored, result):
if self.getTodo() is not None:
result.addUnexpectedSuccess(self, self.getTodo())
else:
self._passed = True
return ignored
def _ebDeferTestMethod(self, f, result):
todo = self.getTodo()
if todo is not None and todo.expected(f):
result.addExpectedFailure(self, f, todo)
elif f.check(self.failureException, FailTest):
result.addFailure(self, f)
elif f.check(KeyboardInterrupt):
result.addError(self, f)
result.stop()
elif f.check(SkipTest):
result.addSkip(
self,
self._getSkipReason(getattr(self, self._testMethodName), f.value))
else:
result.addError(self, f)
def deferTearDown(self, ignored, result):
d = self._run('tearDown', result)
d.addErrback(self._ebDeferTearDown, result)
return d
def _ebDeferTearDown(self, failure, result):
result.addError(self, failure)
if failure.check(KeyboardInterrupt):
result.stop()
self._passed = False
def deferRunCleanups(self, ignored, result):
"""
Run any scheduled cleanups and report errors (if any to the result
object.
"""
d = self._runCleanups()
d.addCallback(self._cbDeferRunCleanups, result)
return d
def _cbDeferRunCleanups(self, cleanupResults, result):
for flag, failure in cleanupResults:
if flag == defer.FAILURE:
result.addError(self, failure)
if failure.check(KeyboardInterrupt):
result.stop()
self._passed = False
def _cleanUp(self, result):
try:
clean = util._Janitor(self, result).postCaseCleanup()
if not clean:
self._passed = False
except:
result.addError(self, failure.Failure())
self._passed = False
for error in self._observer.getErrors():
result.addError(self, error)
self._passed = False
self.flushLoggedErrors()
self._removeObserver()
if self._passed:
result.addSuccess(self)
def _classCleanUp(self, result):
try:
util._Janitor(self, result).postClassCleanup()
except:
result.addError(self, failure.Failure())
def _makeReactorMethod(self, name):
"""
Create a method which wraps the reactor method C{name}. The new
method issues a deprecation warning and calls the original.
"""
def _(*a, **kw):
warnings.warn("reactor.%s cannot be used inside unit tests. "
"In the future, using %s will fail the test and may "
"crash or hang the test run."
% (name, name),
stacklevel=2, category=DeprecationWarning)
return self._reactorMethods[name](*a, **kw)
return _
def _deprecateReactor(self, reactor):
"""
Deprecate C{iterate}, C{crash} and C{stop} on C{reactor}. That is,
each method is wrapped in a function that issues a deprecation
warning, then calls the original.
@param reactor: The Twisted reactor.
"""
self._reactorMethods = {}
for name in ['crash', 'iterate', 'stop']:
self._reactorMethods[name] = getattr(reactor, name)
setattr(reactor, name, self._makeReactorMethod(name))
def _undeprecateReactor(self, reactor):
"""
Restore the deprecated reactor methods. Undoes what
L{_deprecateReactor} did.
@param reactor: The Twisted reactor.
"""
for name, method in self._reactorMethods.items():
setattr(reactor, name, method)
self._reactorMethods = {}
def _runCleanups(self):
"""
Run the cleanups added with L{addCleanup} in order.
@return: A C{Deferred} that fires when all cleanups are run.
"""
def _makeFunction(f, args, kwargs):
return lambda: f(*args, **kwargs)
callables = []
while len(self._cleanups) > 0:
f, args, kwargs = self._cleanups.pop()
callables.append(_makeFunction(f, args, kwargs))
return util._runSequentially(callables)
def _runFixturesAndTest(self, result):
"""
Really run C{setUp}, the test method, and C{tearDown}. Any of these may
return L{defer.Deferred}s. After they complete, do some reactor cleanup.
@param result: A L{TestResult} object.
"""
from twisted.internet import reactor
self._deprecateReactor(reactor)
self._timedOut = False
try:
d = self.deferSetUp(None, result)
try:
self._wait(d)
finally:
self._cleanUp(result)
self._classCleanUp(result)
finally:
self._undeprecateReactor(reactor)
def addCleanup(self, f, *args, **kwargs):
"""
Extend the base cleanup feature with support for cleanup functions which
return Deferreds.
If the function C{f} returns a Deferred, C{TestCase} will wait until the
Deferred has fired before proceeding to the next function.
"""
return super(TestCase, self).addCleanup(f, *args, **kwargs)
def getSuppress(self):
return self._getSuppress()
def getTimeout(self):
"""
Returns the timeout value set on this test. Checks on the instance
first, then the class, then the module, then packages. As soon as it
finds something with a C{timeout} attribute, returns that. Returns
L{util.DEFAULT_TIMEOUT_DURATION} if it cannot find anything. See
L{TestCase} docstring for more details.
"""
timeout = util.acquireAttribute(self._parents, 'timeout',
util.DEFAULT_TIMEOUT_DURATION)
try:
return float(timeout)
except (ValueError, TypeError):
# XXX -- this is here because sometimes people will have methods
# called 'timeout', or set timeout to 'orange', or something
# Particularly, test_news.NewsTestCase and ReactorCoreTestCase
# both do this.
warnings.warn("'timeout' attribute needs to be a number.",
category=DeprecationWarning)
return util.DEFAULT_TIMEOUT_DURATION
def _wait(self, d, running=_wait_is_running):
"""Take a Deferred that only ever callbacks. Block until it happens.
"""
if running:
raise RuntimeError("_wait is not reentrant")
from twisted.internet import reactor
results = []
def append(any):
if results is not None:
results.append(any)
def crash(ign):
if results is not None:
reactor.crash()
crash = utils.suppressWarnings(
crash, util.suppress(message=r'reactor\.crash cannot be used.*',
category=DeprecationWarning))
def stop():
reactor.crash()
stop = utils.suppressWarnings(
stop, util.suppress(message=r'reactor\.crash cannot be used.*',
category=DeprecationWarning))
running.append(None)
try:
d.addBoth(append)
if results:
# d might have already been fired, in which case append is
# called synchronously. Avoid any reactor stuff.
return
d.addBoth(crash)
reactor.stop = stop
try:
reactor.run()
finally:
del reactor.stop
# If the reactor was crashed elsewhere due to a timeout, hopefully
# that crasher also reported an error. Just return.
# _timedOut is most likely to be set when d has fired but hasn't
# completed its callback chain (see self._run)
if results or self._timedOut: #defined in run() and _run()
return
# If the timeout didn't happen, and we didn't get a result or
# a failure, then the user probably aborted the test, so let's
# just raise KeyboardInterrupt.
# FIXME: imagine this:
# web/test/test_webclient.py:
# exc = self.assertRaises(error.Error, wait, method(url))
#
# wait() will raise KeyboardInterrupt, and assertRaises will
# swallow it. Therefore, wait() raising KeyboardInterrupt is
# insufficient to stop trial. A suggested solution is to have
# this code set a "stop trial" flag, or otherwise notify trial
# that it should really try to stop as soon as possible.
raise KeyboardInterrupt()
finally:
results = None
running.pop()
|
lgpl-3.0
|
Mellthas/quodlibet
|
quodlibet/tests/test_player_gst.py
|
2
|
9002
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import sys
import contextlib
try:
from gi.repository import Gst
except ImportError:
Gst = None
from tests import TestCase, skipUnless, get_data_path
try:
from quodlibet.player.gstbe.util import GStreamerSink as Sink
from quodlibet.player.gstbe.util import parse_gstreamer_taglist
from quodlibet.player.gstbe.util import find_audio_sink
from quodlibet.player.gstbe.prefs import GstPlayerPreferences
except ImportError:
pass
from quodlibet.player import PlayerError
from quodlibet.util import sanitize_tags, is_flatpak, matches_flatpak_runtime
from quodlibet.formats import MusicFile
from quodlibet import config
@contextlib.contextmanager
def ignore_gst_errors():
old = Gst.debug_get_default_threshold()
Gst.debug_set_default_threshold(Gst.DebugLevel.NONE)
yield
Gst.debug_set_default_threshold(old)
@skipUnless(Gst, "GStreamer missing")
class TGstPlayerPrefs(TestCase):
def setUp(self):
config.init()
def tearDown(self):
config.quit()
def test_main(self):
widget = GstPlayerPreferences(None, True)
widget.destroy()
@skipUnless(Gst, "GStreamer missing")
class TGStreamerSink(TestCase):
def test_simple(self):
sinks = ["gconfaudiosink", "alsasink"]
for n in filter(Gst.ElementFactory.find, sinks):
obj, name = Sink(n)
self.failUnless(obj)
self.failUnlessEqual(name, n)
def test_invalid(self):
with ignore_gst_errors():
self.assertRaises(PlayerError, Sink, "notarealsink")
def test_fallback(self):
obj, name = Sink("")
self.failUnless(obj)
if os.name == "nt":
self.failUnlessEqual(name, "directsoundsink")
else:
self.failUnlessEqual(name, find_audio_sink()[1])
def test_append_sink(self):
obj, name = Sink("volume")
self.failUnless(obj)
self.failUnlessEqual(name.split("!")[-1].strip(), Sink("")[1])
@skipUnless(Gst, "GStreamer missing")
class TGstreamerTagList(TestCase):
def test_parse(self):
# gst.TagList can't be filled using pyGtk... so use a dict instead
l = {}
l["extended-comment"] = u"foo=bar"
self.failUnless("foo" in parse_gstreamer_taglist(l))
l["extended-comment"] = [u"foo=bar", u"bar=foo", u"bar=foo2"]
self.failUnless("foo" in parse_gstreamer_taglist(l))
self.failUnless("bar" in parse_gstreamer_taglist(l))
self.failUnlessEqual(parse_gstreamer_taglist(l)["bar"], "foo\nfoo2")
# date is abstract, so define our own
# (might work with pygobject now)
class Foo(object):
def to_iso8601_string(self):
return "3000-10-2"
l["date"] = Foo()
date = Gst.DateTime
Gst.DateTime = Foo
self.failUnlessEqual(parse_gstreamer_taglist(l)["date"], "3000-10-2")
Gst.DateTime = date
l["foo"] = u"äöü"
parsed = parse_gstreamer_taglist(l)
self.assertTrue(isinstance(parsed["foo"], str))
self.assertTrue(u"äöü" in parsed["foo"].split("\n"))
l["foo"] = u"äöü".encode("utf-8")
parsed = parse_gstreamer_taglist(l)
self.assertTrue(isinstance(parsed["foo"], str))
self.assertTrue(u"äöü" in parsed["foo"].split("\n"))
l["bar"] = 1.2
self.failUnlessEqual(parse_gstreamer_taglist(l)["bar"], 1.2)
l["bar"] = 9
self.failUnlessEqual(parse_gstreamer_taglist(l)["bar"], 9)
l["bar"] = Gst.TagList() # some random gst instance
self.failUnless(
isinstance(parse_gstreamer_taglist(l)["bar"], str))
self.failUnless("GstTagList" in parse_gstreamer_taglist(l)["bar"])
def test_sanitize(self):
l = sanitize_tags({"location": u"http://foo"})
self.failUnless("website" in l)
l = sanitize_tags({"channel-mode": u"joint-stereo"})
self.failUnlessEqual(l["channel-mode"], "stereo")
l = sanitize_tags({"channel-mode": u"dual"})
self.failUnlessEqual(l["channel-mode"], "stereo")
l = sanitize_tags({"audio-codec": u"mp3"})
self.failUnlessEqual(l["audio-codec"], "MP3")
l = sanitize_tags({"audio-codec": u"Advanced Audio Coding"})
self.failUnlessEqual(l["audio-codec"], "MPEG-4 AAC")
l = sanitize_tags({"audio-codec": u"vorbis"})
self.failUnlessEqual(l["audio-codec"], "Ogg Vorbis")
l = {"a": u"http://www.shoutcast.com", "b": u"default genre"}
l = sanitize_tags(l)
self.failIf(l)
l = sanitize_tags({"duration": 1000 * 42}, stream=True)
self.failUnlessEqual(l["~#length"], 42)
l = sanitize_tags({"duration": 1000 * 42})
self.failIf(l)
l = sanitize_tags({"duration": u"bla"}, stream=True)
self.failUnlessEqual(l["duration"], u"bla")
l = sanitize_tags({"bitrate": 1000 * 42}, stream=True)
self.failUnlessEqual(l["~#bitrate"], 42)
l = sanitize_tags({"bitrate": 1000 * 42})
self.failIf(l)
l = sanitize_tags({"bitrate": u"bla"})
self.failUnlessEqual(l["bitrate"], u"bla")
l = sanitize_tags({"nominal-bitrate": 1000 * 42})
self.failUnlessEqual(l["~#bitrate"], 42)
l = sanitize_tags({"nominal-bitrate": 1000 * 42}, stream=True)
self.failIf(l)
l = sanitize_tags({"nominal-bitrate": u"bla"})
self.failUnlessEqual(l["nominal-bitrate"], u"bla")
l = {"emphasis": u"something"}
self.failIf(sanitize_tags(l))
self.failIf(sanitize_tags(l))
l = {"title": u"something"}
self.failIf(sanitize_tags(l))
self.failUnless(sanitize_tags(l, stream=True))
l = {"artist": u"something"}
self.failIf(sanitize_tags(l))
self.failUnless(sanitize_tags(l, stream=True))
l = {"~#foo": 42, "bar": 42, "~#bla": u"42"}
self.failUnless("~#foo" in sanitize_tags(l))
self.failUnless("~#bar" in sanitize_tags(l))
self.failUnless("bla" in sanitize_tags(l))
l = {}
l["extended-comment"] = [u"location=1", u"website=2", u"website=3"]
l = parse_gstreamer_taglist(l)
l = sanitize_tags(l)["website"].split("\n")
self.failUnless("1" in l)
self.failUnless("2" in l)
self.failUnless("3" in l)
@skipUnless(Gst, "GStreamer missing")
@skipUnless(sys.platform == "darwin" or os.name == "nt" or is_flatpak(),
"no control over gst")
class TGStreamerCodecs(TestCase):
def setUp(self):
config.init()
def tearDown(self):
config.quit()
def _check(self, song):
old_threshold = Gst.debug_get_default_threshold()
Gst.debug_set_default_threshold(Gst.DebugLevel.NONE)
pipeline = Gst.parse_launch(
"uridecodebin uri=%s ! fakesink" % song("~uri"))
bus = pipeline.get_bus()
pipeline.set_state(Gst.State.PLAYING)
error = None
try:
while 1:
message = bus.timed_pop(Gst.SECOND * 40)
if not message or message.type == Gst.MessageType.ERROR:
if message:
error = message.parse_error()[0].message
else:
error = "timed out"
break
if message.type == Gst.MessageType.EOS:
break
finally:
pipeline.set_state(Gst.State.NULL)
Gst.debug_set_default_threshold(old_threshold)
return error
def test_decode_all(self):
"""Decode all kinds of formats using Gstreamer, to check if
they all work and to notify us if a plugin is missing on
platforms where we control the packaging.
"""
files = [
"coverart.wv",
"empty.aac",
"empty.flac",
"empty.ogg",
"empty.opus",
"silence-44-s.mpc",
"silence-44-s.sv8.mpc",
"silence-44-s.tta",
# "test.mid",
"test.spc",
"test.vgm",
"test.wma",
"empty.xm",
"h264_aac.mp4",
"h265_aac.mp4"
]
if not matches_flatpak_runtime("*org.gnome.*/3.32"):
# https://gitlab.com/freedesktop-sdk/freedesktop-sdk/issues/809
files.append("silence-44-s.spx")
errors = []
for file_ in files:
path = get_data_path(file_)
song = MusicFile(path)
if song is not None:
error = self._check(song)
if error:
errors.append((song("~format"), error))
if errors:
raise Exception("Decoding failed %r" % errors)
|
gpl-2.0
|
golding/Josefin
|
python/byteport/integration_tests.py
|
2
|
15865
|
# -*- coding: utf-8 -*-
import unittest
from http_clients import *
'''
NOTE: All tests here need a Byteport instance to communicate with
'''
class TestHttpClients(unittest.TestCase):
PRODUCTION = ('api.byteport.se', 'd8a26587463268f88fea6aec')
ACCEPTANCE = ('acc.byteport.se', 'd74f48f8375a32ca632fa49a')
LOCALHOST = ('localhost:8000', 'TEST')
TEST_ENVIRONMENT = LOCALHOST
byteport_api_hostname = TEST_ENVIRONMENT[0]
key = TEST_ENVIRONMENT[1]
namespace = 'test'
device_uid = 'byteport-api-tests'
test_user = 'admin'
test_password = 'admin'
def test_should_store_string_to_single_field_name_using_GET_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
data = {'string': 'hello string'}
# Will raise exception upon errors
client.store(data)
def test_should_store_data_series_using_GET_client(self):
client = ByteportHttpGetClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
for v in range(0, 10):
data = {'ramp': float(v)+0.0001}
client.store(data)
time.sleep(0.2)
def test_should_store_utf8_convertibel_string_to_single_field_name_using_GET_client(self):
client = ByteportHttpGetClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
# Unicode string that can be converted to UTF-8
data = {'unicode_string': u'mötley crüe'}
client.store(data)
def test_should_not_store_non_utf8_convertible_string_to_single_field_name_using_GET_client(self):
client = ByteportHttpGetClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
# A sting that can not be encoded to UTF-8: exception should be thrown client side
data = {'unicode_string': '\x80'}
self.assertRaises(ByteportClientInvalidDataTypeException, client.store, data)
def test_should_store_number_to_single_field_name_using_GET_client(self):
client = ByteportHttpGetClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
data = {'number': 1337}
# Will raise exception upon errors
client.store(data)
def test_should_store_number_to_single_field_name_with_custom_high_prec_timestamp_using_GET_client(self):
client = ByteportHttpGetClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
data = {'number': 1338}
# Will raise exception upon errors
custom_timestamp = datetime.datetime.strptime('2015-05-01T00:00:00.012345', '%Y-%m-%dT%H:%M:%S.%f')
client.store(data, timestamp=custom_timestamp)
def test_should_log_info_using_GET_client(self):
client = ByteportHttpGetClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
# Will raise exception upon errors
client.log('info from integration tests using GET API. Lets repete this boring message just to get a shit load of text so it wont be truncated anywhere along the way: info from integration tests using GET APIinfo from integration tests using GET APIinfo from integration tests using GET APIinfo from integration tests using GET APIinfo from integration tests using GET APIinfo from integration tests using GET APIinfo from integration tests using GET API', 'info')
def test_should_log_info_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
# Will raise exception upon errors
client.log('info from integration tests using POST API', 'info')
def test_should_store_string_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
data = {'fukt': 20}
# Will raise exception upon errors
client.store(data)
def test_should_store_text_data_base64_encoded_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'string_b64'
data_block = 'hello world'
# Will raise exception upon errors
client.base64_encode_and_store(field_name, data_block)
def test_should_store_binary_data_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'bin_b64'
binary_data = '\x10\x20\x30\x40'
# Will raise exception upon errors
client.base64_encode_and_store(field_name, binary_data)
def test_should_compress_and_store_binary_data_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'bin_gzip_b64'
binary_data = '\x10\x20\x30\x40'
# Will raise exception upon errors
client.base64_encode_and_store(field_name, binary_data, compression='gzip')
def test_should_store_10K_binary_data_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'large_bin_b64'
binary_data_base = '\x00\x10\x20\x30\x40\x50\x60\x70\x80\x90'
data_buffer = bytearray()
# Make a 10K buffer
for i in range(0, 1000):
data_buffer.extend(binary_data_base)
# Will raise exception upon errors
client.base64_encode_and_store(field_name, bytes(data_buffer))
def test_should_store_10K_binary_data_and_gzip_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'large_bin_gzip_b64'
binary_data_base = '\x00\x10\x20\x30\x40\x50\x60\x70\x80\x90'
data_buffer = bytearray()
# Make a 10K buffer
for i in range(0, 1000):
data_buffer.extend(binary_data_base)
# Will raise exception upon errors
client.base64_encode_and_store(field_name, bytes(data_buffer), compression='gzip')
def test_should_store_10K_binary_data_and_bzip2_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'large_bin_bzip2_b64'
binary_data_base = '\x00\x10\x20\x30\x40\x50\x60\x70\x80\x90'
data_buffer = bytearray()
# Make a 10K buffer
for i in range(0, 1000):
data_buffer.extend(binary_data_base)
# Will raise exception upon errors
client.base64_encode_and_store(field_name, bytes(data_buffer), compression='bzip2')
def test_should_store_test_file_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'file_integer_raw'
# Will raise exception upon errors
client.store_file(field_name, './integer.txt')
def test_should_store_test_file_and_bzip2_to_single_field_name_using_POST_client(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid
)
field_name = 'file_bzip2_b64'
# Will raise exception upon errors
client.base64_encode_and_store_file(field_name, './test_file_for_integration_tests.txt', compression='bzip2')
def test_should_store_directory(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid,
initial_heartbeat=False
)
client.store_directory('./test_directory', 'dir_storing_test')
def test_should_login_with_correct_credentials(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
def test_should_not_login_with_invalid_credentials(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid,
initial_heartbeat=False
)
try:
client.login('fakeuser', 'f00passb4r')
except ByteportLoginFailedException:
return
raise Exception("ByteportLoginFailedException was NOT thrown during invalid login!")
def test_should_login_and_access_protected_resource(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname
)
client.login(self.test_user, self.test_password)
# List Namespaces
result = client.list_namespaces()
self.assertTrue(len(result) > 0)
# Query for matching devices
result = client.query_devices('test', full=False, limit=10)
self.assertTrue(len(result) > 0)
# Load one device
result = client.get_device('test', '6000')
self.assertEqual(result[0]['guid'], 'test.6000')
# List devices in one namespace, load both as list and full
result = client.list_devices('test', full=False)
for guid in result:
self.assertTrue(len(guid) > 0)
result = client.list_devices('test', full=True)
for device in result:
self.assertTrue(len(device['device_type']) > 0)
#Devices
result = client.get_devices('test')
self.assertTrue( len(result) > 0 )
result = client.get_devices('test', "FOOBAR.")
self.assertTrue( len(result) == 0, "Should not find any device with id 636744, found: %s" % len(result) )
result = client.get_devices('test', "TestGW.")
self.assertTrue( len(result) == 1, "Should only find one device with uid=TestGW., found %s" % len(result) )
self.assertTrue( result[0][u'uid'] == u'TestGW', 'Device with id 1 should be the test GW, but was: "%s"' % result[0][u'uid'])
#Devicetypes
result = client.get_device_types('test')
self.assertTrue( len(result) > 0 )
result = client.get_device_types('test', "636744")
self.assertTrue( len(result) == 0, "Should not find any devicetype with id 636744, found: %s" % len(result) )
result = client.get_device_types('test', "1")
self.assertTrue( len(result) == 1, "Should only find one devicetype with id=1, found %s" % len(result) )
self.assertTrue( result[0][u'name'] == u'Generic Test Gateway', 'Device with id 1 should be the test GW, but was: "%s"' % result[0][u'name'])
#device firmwares
result = client.get_firmwares('test', device_type_id='1')
self.assertTrue( len(result) > 0 )
result = client.get_firmwares('test', device_type_id="1", key="636744")
self.assertTrue( len(result) == 0, "Should not find any firmware with id 636744, found: %s" % len(result) )
result = client.get_firmwares('test', device_type_id="1", key="2")
self.assertTrue( len(result) == 1, "Should only find one device with id=1, found %s" % len(result) )
self.assertTrue( result[0][u'filesize'] == u'165613', 'Device fw with id 2 should have size 165613, but was: "%s"' % result[0][u'filesize'])
#device field-definitions
result = client.get_field_definitions('test', device_type_id='2')
self.assertTrue( len(result) > 0 )
result = client.get_field_definitions('test', device_type_id="2", key="636744")
self.assertTrue( len(result) == 0, "Should not find any field definition with id 636744, found: %s" % len(result) )
result = client.get_field_definitions('test', device_type_id="2", key="5")
self.assertTrue( len(result) == 1, "Should only find one field definition with id=1, found %s" % len(result) )
self.assertTrue( result[0][u'name'] == u'b64_jsons', 'Device field 5 of test gw should be "b64_jsons", but was: "%s"' % result[0][u'name'])
# Load time-series data
to_time = datetime.datetime.now()
from_time = to_time - datetime.timedelta(hours=1)
result = client.load_timeseries_data('test', '6000', 'temp', from_time, to_time)
self.assertEqual(result['meta']['path'], u'test.6000.temp')
class PollingTests(unittest.TestCase):
#hostname = 'localhost:8000'
#hostname = 'acc.byteport.se'
hostname = 'api.byteport.se'
byteport_api_hostname = 'http://%s/services/store/' % hostname
namespace = 'test'
device_uid = 'byteport-api-tests'
key = 'd8a26587463268f88fea6aec'
#key = 'TEST'
def test_should_poll_directory_for_changes___needs_manual_change_to_trigger(self):
client = ByteportHttpClient(
byteport_api_hostname=self.byteport_api_hostname,
namespace_name=self.namespace,
api_key=self.key,
default_device_uid=self.device_uid,
initial_heartbeat=False
)
client.poll_directory_and_store_upon_content_change('./test_directory/', 'dir_poller_test')
class TestStompClient(unittest.TestCase):
TEST_BROKERS = ['canopus']
test_device_uid = '6000'
def test_should_connect_and_send_one_message_using_stomp_client(self):
client = ByteportStompClient('test', 'publicTestUser', 'publicTestUser', broker_hosts=self.TEST_BROKERS)
client.store({'stomp_data': 'hello STOMP world!'}, self.test_device_uid)
|
bsd-2-clause
|
errx/django
|
django/core/management/commands/syncdb.py
|
11
|
1145
|
import warnings
from optparse import make_option
from django.db import DEFAULT_DB_ALIAS
from django.core.management import call_command
from django.core.management.base import NoArgsCommand
from django.utils.deprecation import RemovedInDjango19Warning
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--no-initial-data', action='store_false', dest='load_initial_data', default=True,
help='Tells Django not to load any initial data after database synchronization.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
help = "Deprecated - use 'migrate' instead."
def handle_noargs(self, **options):
warnings.warn("The syncdb command will be removed in Django 1.9", RemovedInDjango19Warning)
call_command("migrate", **options)
|
bsd-3-clause
|
CERNDocumentServer/cds-videos
|
tests/unit/test_keyword.py
|
3
|
3414
|
# -*- coding: utf-8 -*-
#
# This file is part of CDS.
# Copyright (C) 2017 CERN.
#
# CDS is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CDS is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CDS; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test cds keyword."""
from __future__ import absolute_import, print_function
import json
from flask import url_for
from time import sleep
def test_load_jsonschema_category(api_app, json_headers):
"""Load jsonschema for keyword."""
with api_app.test_client() as client:
res = client.get(
url_for('invenio_jsonschemas.get_schema',
schema_path='keywords/keyword-v1.0.0.json'),
headers=json_headers)
assert res.status_code == 200
def test_get_keyword_from_url(api_app, db, es, indexer, pidstore,
cds_jsonresolver, json_headers, keyword_1,
keyword_3_deleted):
"""Load jsonschema for keyword."""
sleep(3)
with api_app.test_request_context():
url = url_for('invenio_records_rest.kwid_list')
with api_app.test_client() as client:
res = client.get(url, headers=json_headers)
assert res.status_code == 200
data = json.loads(res.data.decode('utf-8'))
assert len(data['hits']['hits']) == 1
keyw = data['hits']['hits'][0]
assert keyw['metadata'] == keyword_1
def test_suggest_keyword_from_url(api_app, db, es, indexer, pidstore,
cds_jsonresolver, json_headers, keyword_1,
keyword_2, keyword_3_deleted):
"""Load jsonschema for keyword."""
sleep(3)
with api_app.test_request_context():
url = url_for('invenio_records_rest.kwid_suggest')
with api_app.test_client() as client:
# suggest 1
res = client.get(
url,
headers=json_headers,
query_string={'suggest-name': keyword_2['name'][0:3], 'size': 10}
)
assert res.status_code == 200
data = json.loads(res.data.decode('utf-8'))
assert len(data['suggest-name'][0]['options']) == 1
name = data['suggest-name'][0]['options'][0]['payload']['name']
assert name == keyword_2['name']
key = data['suggest-name'][0]['options'][0]['payload']['key_id']
assert key in keyword_2['key_id']
# suggest 2
res = client.get(
url,
headers=json_headers,
query_string={'suggest-name': 'no-exist', 'size': 10}
)
assert res.status_code == 200
data = json.loads(res.data.decode('utf-8'))
assert len(data['suggest-name'][0]['options']) == 0
|
gpl-2.0
|
wikimedia/user_metrics
|
user_metrics/api/run.py
|
1
|
4196
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module defines the entry point for flask_ web server implementation
of the Wikimedia User Metrics API. This module is consumable
by the Apache web server via WSGI interface via mod_wsgi. An Apache
server can be pointed to api.wsgi such that Apache may be used as a
wrapper in this way.
.. _flask: http://flask.pocoo.org
Cohort Data
^^^^^^^^^^^
Cohort data is maintained in the host s1-analytics-slave.eqiad.wmnet under
the `staging` database in the `usertags` and `usertags_meta` tables: ::
+---------+-----------------+------+-----+---------+-------+
| Field | Type | Null | Key | Default | Extra |
+---------+-----------------+------+-----+---------+-------+
| ut_user | int(5) unsigned | NO | PRI | NULL | |
| ut_tag | int(4) unsigned | NO | PRI | NULL | |
+---------+-----------------+------+-----+---------+-------+
+-------------+-----------------+------+-----+---------+
| Field | Type | Null | Key | Default |
+-------------+-----------------+------+-----+---------+
| utm_id | int(5) unsigned | NO | PRI | NULL |
| utm_name | varchar(255) | NO | | |
| utm_notes | varchar(255) | YES | | NULL |
| utm_touched | datetime | YES | | NULL |
+-------------+-----------------+------+-----+---------+
"""
__author__ = {
"dario taraborelli": "[email protected]",
"ryan faulkner": "[email protected]"
}
__date__ = "2012-12-21"
__license__ = "GPL (version 2 or later)"
import multiprocessing as mp
from user_metrics.config import logging, settings
from user_metrics.api.engine.request_manager import job_control, \
requests_notification_callback
from user_metrics.api.engine.response_handler import process_responses
from user_metrics.api.views import app
from user_metrics.api.engine.request_manager import api_request_queue, \
req_notification_queue_out, req_notification_queue_in, api_response_queue
from user_metrics.utils import terminate_process_with_checks
job_controller_proc = None
response_controller_proc = None
rm_callback_proc = None
######
#
# Define Custom Classes
#
#######
def teardown():
""" When the instance is deleted store the pickled data and shutdown
the job controller """
# Try to shutdown the job control proc gracefully
try:
terminate_process_with_checks(job_controller_proc)
terminate_process_with_checks(response_controller_proc)
terminate_process_with_checks(rm_callback_proc)
except Exception:
logging.error(__name__ + ' :: Could not shut down callbacks.')
def setup_controller(req_queue, res_queue, msg_queue_in, msg_queue_out):
"""
Sets up the process that handles API jobs
"""
job_controller_proc = mp.Process(target=job_control,
args=(req_queue, res_queue))
response_controller_proc = mp.Process(target=process_responses,
args=(res_queue,
msg_queue_in))
rm_callback_proc = mp.Process(target=requests_notification_callback,
args=(msg_queue_in,
msg_queue_out))
job_controller_proc.start()
response_controller_proc.start()
rm_callback_proc.start()
######
#
# Execution
#
#######
# initialize API data - get the instance
setup_controller(api_request_queue, api_response_queue,
req_notification_queue_in, req_notification_queue_out)
app.config['SECRET_KEY'] = settings.__secret_key__
# With the presence of flask.ext.login module
if settings.__flask_login_exists__:
from user_metrics.api.session import login_manager
login_manager.setup_app(app)
if __name__ == '__main__':
try:
app.run(debug=True,
use_reloader=False,
host=settings.__instance_host__,
port=settings.__instance_port__,)
finally:
teardown()
|
bsd-3-clause
|
FrankBian/kuma
|
vendor/packages/ipython/IPython/frontend/wx/console_widget.py
|
7
|
23141
|
# encoding: utf-8
"""
A Wx widget to act as a console and input commands.
This widget deals with prompts and provides an edit buffer
restricted to after the last prompt.
"""
__docformat__ = "restructuredtext en"
#-------------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is
# in the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
import wx
import wx.stc as stc
from wx.py import editwindow
import time
import sys
import string
LINESEP = '\n'
if sys.platform == 'win32':
LINESEP = '\n\r'
import re
# FIXME: Need to provide an API for non user-generated display on the
# screen: this should not be editable by the user.
#-------------------------------------------------------------------------------
# Constants
#-------------------------------------------------------------------------------
_COMPLETE_BUFFER_MARKER = 31
_ERROR_MARKER = 30
_INPUT_MARKER = 29
_DEFAULT_SIZE = 10
if sys.platform == 'darwin':
_DEFAULT_SIZE = 12
_DEFAULT_STYLE = {
#background definition
'default' : 'size:%d' % _DEFAULT_SIZE,
'bracegood' : 'fore:#00AA00,back:#000000,bold',
'bracebad' : 'fore:#FF0000,back:#000000,bold',
# Edge column: a number of None
'edge_column' : -1,
# properties for the various Python lexer styles
'comment' : 'fore:#007F00',
'number' : 'fore:#007F7F',
'string' : 'fore:#7F007F,italic',
'char' : 'fore:#7F007F,italic',
'keyword' : 'fore:#00007F,bold',
'triple' : 'fore:#7F0000',
'tripledouble' : 'fore:#7F0000',
'class' : 'fore:#0000FF,bold,underline',
'def' : 'fore:#007F7F,bold',
'operator' : 'bold',
# Default colors
'trace' : '#FAFAF1', # Nice green
'stdout' : '#FDFFD3', # Nice yellow
'stderr' : '#FFF1F1', # Nice red
# Default scintilla settings
'antialiasing' : True,
'carret_color' : 'BLACK',
'background_color' :'WHITE',
#prompt definition
'prompt_in1' : \
'\n\x01\x1b[0;34m\x02In [\x01\x1b[1;34m\x02$number\x01\x1b[0;34m\x02]: \x01\x1b[0m\x02',
'prompt_out': \
'\x01\x1b[0;31m\x02Out[\x01\x1b[1;31m\x02$number\x01\x1b[0;31m\x02]: \x01\x1b[0m\x02',
}
# new style numbers
_STDOUT_STYLE = 15
_STDERR_STYLE = 16
_TRACE_STYLE = 17
# system colors
#SYS_COLOUR_BACKGROUND = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BACKGROUND)
# Translation table from ANSI escape sequences to color.
ANSI_STYLES = {'0;30': [0, 'BLACK'], '0;31': [1, 'RED'],
'0;32': [2, 'GREEN'], '0;33': [3, 'BROWN'],
'0;34': [4, 'BLUE'], '0;35': [5, 'PURPLE'],
'0;36': [6, 'CYAN'], '0;37': [7, 'LIGHT GREY'],
'1;30': [8, 'DARK GREY'], '1;31': [9, 'RED'],
'1;32': [10, 'SEA GREEN'], '1;33': [11, 'YELLOW'],
'1;34': [12, 'LIGHT BLUE'], '1;35':
[13, 'MEDIUM VIOLET RED'],
'1;36': [14, 'LIGHT STEEL BLUE'], '1;37': [15, 'YELLOW']}
# XXX: Maybe one day we should factor this code with ColorANSI. Right now
# ColorANSI is hard to reuse and makes our code more complex.
#we define platform specific fonts
if wx.Platform == '__WXMSW__':
FACES = { 'times': 'Times New Roman',
'mono' : 'Courier New',
'helv' : 'Arial',
'other': 'Comic Sans MS',
'size' : 10,
'size2': 8,
}
elif wx.Platform == '__WXMAC__':
FACES = { 'times': 'Times New Roman',
'mono' : 'Monaco',
'helv' : 'Arial',
'other': 'Comic Sans MS',
'size' : 10,
'size2': 8,
}
else:
FACES = { 'times': 'Times',
'mono' : 'Courier',
'helv' : 'Helvetica',
'other': 'new century schoolbook',
'size' : 10,
'size2': 8,
}
#-------------------------------------------------------------------------------
# The console widget class
#-------------------------------------------------------------------------------
class ConsoleWidget(editwindow.EditWindow):
""" Specialized styled text control view for console-like workflow.
This widget is mainly interested in dealing with the prompt and
keeping the cursor inside the editing line.
"""
# This is where the title captured from the ANSI escape sequences are
# stored.
title = 'Console'
# Last prompt printed
last_prompt = ''
# The buffer being edited.
def _set_input_buffer(self, string):
self.SetSelection(self.current_prompt_pos, self.GetLength())
self.ReplaceSelection(string)
self.GotoPos(self.GetLength())
def _get_input_buffer(self):
""" Returns the text in current edit buffer.
"""
input_buffer = self.GetTextRange(self.current_prompt_pos,
self.GetLength())
input_buffer = input_buffer.replace(LINESEP, '\n')
return input_buffer
input_buffer = property(_get_input_buffer, _set_input_buffer)
style = _DEFAULT_STYLE.copy()
# Translation table from ANSI escape sequences to color. Override
# this to specify your colors.
ANSI_STYLES = ANSI_STYLES.copy()
# Font faces
faces = FACES.copy()
# Store the last time a refresh was done
_last_refresh_time = 0
#--------------------------------------------------------------------------
# Public API
#--------------------------------------------------------------------------
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.WANTS_CHARS, ):
editwindow.EditWindow.__init__(self, parent, id, pos, size, style)
self.configure_scintilla()
# Track if 'enter' key as ever been processed
# This variable will only be reallowed until key goes up
self.enter_catched = False
self.current_prompt_pos = 0
self.Bind(wx.EVT_KEY_DOWN, self._on_key_down)
self.Bind(wx.EVT_KEY_UP, self._on_key_up)
def write(self, text, refresh=True):
""" Write given text to buffer, while translating the ansi escape
sequences.
"""
# XXX: do not put print statements to sys.stdout/sys.stderr in
# this method, the print statements will call this method, as
# you will end up with an infinit loop
title = self.title_pat.split(text)
if len(title)>1:
self.title = title[-2]
text = self.title_pat.sub('', text)
segments = self.color_pat.split(text)
segment = segments.pop(0)
self.GotoPos(self.GetLength())
self.StartStyling(self.GetLength(), 0xFF)
try:
self.AppendText(segment)
except UnicodeDecodeError:
# XXX: Do I really want to skip the exception?
pass
if segments:
for ansi_tag, text in zip(segments[::2], segments[1::2]):
self.StartStyling(self.GetLength(), 0xFF)
try:
self.AppendText(text)
except UnicodeDecodeError:
# XXX: Do I really want to skip the exception?
pass
if ansi_tag not in self.ANSI_STYLES:
style = 0
else:
style = self.ANSI_STYLES[ansi_tag][0]
self.SetStyling(len(text), style)
self.GotoPos(self.GetLength())
if refresh:
current_time = time.time()
if current_time - self._last_refresh_time > 0.03:
if sys.platform == 'win32':
wx.SafeYield()
else:
wx.Yield()
# self.ProcessEvent(wx.PaintEvent())
self._last_refresh_time = current_time
def new_prompt(self, prompt):
""" Prints a prompt at start of line, and move the start of the
current block there.
The prompt can be given with ascii escape sequences.
"""
self.write(prompt, refresh=False)
# now we update our cursor giving end of prompt
self.current_prompt_pos = self.GetLength()
self.current_prompt_line = self.GetCurrentLine()
self.EnsureCaretVisible()
self.last_prompt = prompt
def continuation_prompt(self):
""" Returns the current continuation prompt.
We need to implement this method here to deal with the
ascii escape sequences cleaning up.
"""
# ASCII-less prompt
ascii_less = ''.join(self.color_pat.split(self.last_prompt)[2::2])
return "."*(len(ascii_less)-2) + ': '
def scroll_to_bottom(self):
maxrange = self.GetScrollRange(wx.VERTICAL)
self.ScrollLines(maxrange)
def pop_completion(self, possibilities, offset=0):
""" Pops up an autocompletion menu. Offset is the offset
in characters of the position at which the menu should
appear, relativ to the cursor.
"""
self.AutoCompSetIgnoreCase(False)
self.AutoCompSetAutoHide(False)
self.AutoCompSetMaxHeight(len(possibilities))
self.AutoCompShow(offset, " ".join(possibilities))
def get_line_width(self):
""" Return the width of the line in characters.
"""
return self.GetSize()[0]/self.GetCharWidth()
def configure_scintilla(self):
""" Set up all the styling option of the embedded scintilla
widget.
"""
p = self.style.copy()
# Marker for complete buffer.
self.MarkerDefine(_COMPLETE_BUFFER_MARKER, stc.STC_MARK_BACKGROUND,
background=p['trace'])
# Marker for current input buffer.
self.MarkerDefine(_INPUT_MARKER, stc.STC_MARK_BACKGROUND,
background=p['stdout'])
# Marker for tracebacks.
self.MarkerDefine(_ERROR_MARKER, stc.STC_MARK_BACKGROUND,
background=p['stderr'])
self.SetEOLMode(stc.STC_EOL_LF)
# Ctrl"+" or Ctrl "-" can be used to zoomin/zoomout the text inside
# the widget
self.CmdKeyAssign(ord('+'), stc.STC_SCMOD_CTRL, stc.STC_CMD_ZOOMIN)
self.CmdKeyAssign(ord('-'), stc.STC_SCMOD_CTRL, stc.STC_CMD_ZOOMOUT)
# Also allow Ctrl Shift "=" for poor non US keyboard users.
self.CmdKeyAssign(ord('='), stc.STC_SCMOD_CTRL|stc.STC_SCMOD_SHIFT,
stc.STC_CMD_ZOOMIN)
# Keys: we need to clear some of the keys the that don't play
# well with a console.
self.CmdKeyClear(ord('D'), stc.STC_SCMOD_CTRL)
self.CmdKeyClear(ord('L'), stc.STC_SCMOD_CTRL)
self.CmdKeyClear(ord('T'), stc.STC_SCMOD_CTRL)
self.CmdKeyClear(ord('A'), stc.STC_SCMOD_CTRL)
self.SetEOLMode(stc.STC_EOL_CRLF)
self.SetWrapMode(stc.STC_WRAP_CHAR)
self.SetWrapMode(stc.STC_WRAP_WORD)
self.SetBufferedDraw(True)
self.SetUseAntiAliasing(p['antialiasing'])
self.SetLayoutCache(stc.STC_CACHE_PAGE)
self.SetUndoCollection(False)
self.SetUseTabs(True)
self.SetIndent(4)
self.SetTabWidth(4)
# we don't want scintilla's autocompletion to choose
# automaticaly out of a single choice list, as we pop it up
# automaticaly
self.AutoCompSetChooseSingle(False)
self.AutoCompSetMaxHeight(10)
# XXX: this doesn't seem to have an effect.
self.AutoCompSetFillUps('\n')
self.SetMargins(3, 3) #text is moved away from border with 3px
# Suppressing Scintilla margins
self.SetMarginWidth(0, 0)
self.SetMarginWidth(1, 0)
self.SetMarginWidth(2, 0)
# Xterm escape sequences
self.color_pat = re.compile('\x01?\x1b\[(.*?)m\x02?')
self.title_pat = re.compile('\x1b]0;(.*?)\x07')
# styles
self.SetCaretForeground(p['carret_color'])
background_color = p['background_color']
if 'default' in p:
if 'back' not in p['default']:
p['default'] += ',back:%s' % background_color
if 'size' not in p['default']:
p['default'] += ',size:%s' % self.faces['size']
if 'face' not in p['default']:
p['default'] += ',face:%s' % self.faces['mono']
self.StyleSetSpec(stc.STC_STYLE_DEFAULT, p['default'])
else:
self.StyleSetSpec(stc.STC_STYLE_DEFAULT,
"fore:%s,back:%s,size:%d,face:%s"
% (self.ANSI_STYLES['0;30'][1],
background_color,
self.faces['size'], self.faces['mono']))
self.StyleClearAll()
# XXX: two lines below are usefull if not using the lexer
#for style in self.ANSI_STYLES.values():
# self.StyleSetSpec(style[0], "bold,fore:%s" % style[1])
# prompt definition
self.prompt_in1 = p['prompt_in1']
self.prompt_out = p['prompt_out']
self.output_prompt_template = string.Template(self.prompt_out)
self.input_prompt_template = string.Template(self.prompt_in1)
self.StyleSetSpec(_STDOUT_STYLE, p['stdout'])
self.StyleSetSpec(_STDERR_STYLE, p['stderr'])
self.StyleSetSpec(_TRACE_STYLE, p['trace'])
self.StyleSetSpec(stc.STC_STYLE_BRACELIGHT, p['bracegood'])
self.StyleSetSpec(stc.STC_STYLE_BRACEBAD, p['bracebad'])
self.StyleSetSpec(stc.STC_P_COMMENTLINE, p['comment'])
self.StyleSetSpec(stc.STC_P_NUMBER, p['number'])
self.StyleSetSpec(stc.STC_P_STRING, p['string'])
self.StyleSetSpec(stc.STC_P_CHARACTER, p['char'])
self.StyleSetSpec(stc.STC_P_WORD, p['keyword'])
self.StyleSetSpec(stc.STC_P_WORD2, p['keyword'])
self.StyleSetSpec(stc.STC_P_TRIPLE, p['triple'])
self.StyleSetSpec(stc.STC_P_TRIPLEDOUBLE, p['tripledouble'])
self.StyleSetSpec(stc.STC_P_CLASSNAME, p['class'])
self.StyleSetSpec(stc.STC_P_DEFNAME, p['def'])
self.StyleSetSpec(stc.STC_P_OPERATOR, p['operator'])
self.StyleSetSpec(stc.STC_P_COMMENTBLOCK, p['comment'])
edge_column = p['edge_column']
if edge_column is not None and edge_column > 0:
#we add a vertical line to console widget
self.SetEdgeMode(stc.STC_EDGE_LINE)
self.SetEdgeColumn(edge_column)
#--------------------------------------------------------------------------
# EditWindow API
#--------------------------------------------------------------------------
def OnUpdateUI(self, event):
""" Override the OnUpdateUI of the EditWindow class, to prevent
syntax highlighting both for faster redraw, and for more
consistent look and feel.
"""
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _on_key_down(self, event, skip=True):
""" Key press callback used for correcting behavior for
console-like interfaces: the cursor is constraint to be after
the last prompt.
Return True if event as been catched.
"""
catched = True
# XXX: Would the right way to do this be to have a
# dictionary at the instance level associating keys with
# callbacks? How would we deal with inheritance? And Do the
# different callbacks share local variables?
# Intercept some specific keys.
key_code = event.GetKeyCode()
if key_code == ord('L') and event.ControlDown() :
self.scroll_to_bottom()
elif key_code == ord('K') and event.ControlDown() :
self.input_buffer = ''
elif key_code == ord('A') and event.ControlDown() :
self.GotoPos(self.GetLength())
self.SetSelectionStart(self.current_prompt_pos)
self.SetSelectionEnd(self.GetCurrentPos())
catched = True
elif key_code == ord('E') and event.ControlDown() :
self.GotoPos(self.GetLength())
catched = True
elif key_code == wx.WXK_PAGEUP:
self.ScrollPages(-1)
elif key_code == wx.WXK_PAGEDOWN:
self.ScrollPages(1)
elif key_code == wx.WXK_HOME:
self.GotoPos(self.GetLength())
elif key_code == wx.WXK_END:
self.GotoPos(self.GetLength())
elif key_code == wx.WXK_UP and event.ShiftDown():
self.ScrollLines(-1)
elif key_code == wx.WXK_DOWN and event.ShiftDown():
self.ScrollLines(1)
else:
catched = False
if self.AutoCompActive():
event.Skip()
else:
if key_code in (13, wx.WXK_NUMPAD_ENTER):
# XXX: not catching modifiers, to be wx2.6-compatible
catched = True
if not self.enter_catched:
self.CallTipCancel()
if event.ShiftDown():
# Try to force execution
self.GotoPos(self.GetLength())
self.write('\n' + self.continuation_prompt(),
refresh=False)
self._on_enter()
else:
self._on_enter()
self.enter_catched = True
elif key_code == wx.WXK_HOME:
if not event.ShiftDown():
self.GotoPos(self.current_prompt_pos)
catched = True
else:
# FIXME: This behavior is not ideal: if the selection
# is already started, it will jump.
self.SetSelectionStart(self.current_prompt_pos)
self.SetSelectionEnd(self.GetCurrentPos())
catched = True
elif key_code == wx.WXK_UP:
if self.GetCurrentLine() > self.current_prompt_line:
if self.GetCurrentLine() == self.current_prompt_line + 1 \
and self.GetColumn(self.GetCurrentPos()) < \
self.GetColumn(self.current_prompt_pos):
self.GotoPos(self.current_prompt_pos)
else:
event.Skip()
catched = True
elif key_code in (wx.WXK_LEFT, wx.WXK_BACK):
if not self._keep_cursor_in_buffer(self.GetCurrentPos() - 1):
event.Skip()
catched = True
elif key_code == wx.WXK_RIGHT:
if not self._keep_cursor_in_buffer(self.GetCurrentPos() + 1):
event.Skip()
catched = True
elif key_code == wx.WXK_DELETE:
if not self._keep_cursor_in_buffer(self.GetCurrentPos() - 1):
event.Skip()
catched = True
if skip and not catched:
# Put the cursor back in the edit region
if not self._keep_cursor_in_buffer():
if not (self.GetCurrentPos() == self.GetLength()
and key_code == wx.WXK_DELETE):
event.Skip()
catched = True
return catched
def _on_key_up(self, event, skip=True):
""" If cursor is outside the editing region, put it back.
"""
if skip:
event.Skip()
self._keep_cursor_in_buffer()
# XXX: I need to avoid the problem of having an empty glass;
def _keep_cursor_in_buffer(self, pos=None):
""" Checks if the cursor is where it is allowed to be. If not,
put it back.
Returns
-------
cursor_moved: Boolean
whether or not the cursor was moved by this routine.
Notes
------
WARNING: This does proper checks only for horizontal
movements.
"""
if pos is None:
current_pos = self.GetCurrentPos()
else:
current_pos = pos
if current_pos < self.current_prompt_pos:
self.GotoPos(self.current_prompt_pos)
return True
line_num = self.LineFromPosition(current_pos)
if not current_pos > self.GetLength():
line_pos = self.GetColumn(current_pos)
else:
line_pos = self.GetColumn(self.GetLength())
line = self.GetLine(line_num)
# Jump the continuation prompt
continuation_prompt = self.continuation_prompt()
if ( line.startswith(continuation_prompt)
and line_pos < len(continuation_prompt)):
if line_pos < 2:
# We are at the beginning of the line, trying to move
# forward: jump forward.
self.GotoPos(current_pos + 1 +
len(continuation_prompt) - line_pos)
else:
# Jump back up
self.GotoPos(self.GetLineEndPosition(line_num-1))
return True
elif ( current_pos > self.GetLineEndPosition(line_num)
and not current_pos == self.GetLength()):
# Jump to next line
self.GotoPos(current_pos + 1 +
len(continuation_prompt))
return True
# We re-allow enter event processing
self.enter_catched = False
return False
if __name__ == '__main__':
# Some simple code to test the console widget.
class MainWindow(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, size=(300, 250))
self._sizer = wx.BoxSizer(wx.VERTICAL)
self.console_widget = ConsoleWidget(self)
self._sizer.Add(self.console_widget, 1, wx.EXPAND)
self.SetSizer(self._sizer)
self.SetAutoLayout(1)
self.Show(True)
app = wx.PySimpleApp()
w = MainWindow(None, wx.ID_ANY, 'ConsoleWidget')
w.SetSize((780, 460))
w.Show()
app.MainLoop()
|
mpl-2.0
|
adelina-t/neutron
|
neutron/tests/unit/agent/l3/test_legacy_router.py
|
17
|
2991
|
# Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from neutron.agent.l3 import legacy_router
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.tests import base
_uuid = uuidutils.generate_uuid
class BasicRouterTestCaseFramework(base.BaseTestCase):
def _create_router(self, router=None, **kwargs):
if not router:
router = mock.MagicMock()
self.agent_conf = mock.Mock()
self.driver = mock.Mock()
self.router_id = _uuid()
return legacy_router.LegacyRouter(self.router_id,
router,
self.agent_conf,
self.driver,
**kwargs)
class TestBasicRouterOperations(BasicRouterTestCaseFramework):
def test_remove_floating_ip(self):
ri = self._create_router(mock.MagicMock())
device = mock.Mock()
cidr = '15.1.2.3/32'
ri.remove_floating_ip(device, cidr)
device.delete_addr_and_conntrack_state.assert_called_once_with(cidr)
@mock.patch.object(ip_lib, 'send_ip_addr_adv_notif')
class TestAddFloatingIpWithMockGarp(BasicRouterTestCaseFramework):
def test_add_floating_ip(self, send_ip_addr_adv_notif):
ri = self._create_router()
ri._add_fip_addr_to_device = mock.Mock(return_value=True)
ip = '15.1.2.3'
result = ri.add_floating_ip({'floating_ip_address': ip},
mock.sentinel.interface_name,
mock.sentinel.device)
ip_lib.send_ip_addr_adv_notif.assert_called_once_with(
ri.ns_name,
mock.sentinel.interface_name,
ip,
self.agent_conf)
self.assertEqual(l3_constants.FLOATINGIP_STATUS_ACTIVE, result)
def test_add_floating_ip_error(self, send_ip_addr_adv_notif):
ri = self._create_router()
ri._add_fip_addr_to_device = mock.Mock(return_value=False)
result = ri.add_floating_ip({'floating_ip_address': '15.1.2.3'},
mock.sentinel.interface_name,
mock.sentinel.device)
self.assertFalse(ip_lib.send_ip_addr_adv_notif.called)
self.assertEqual(l3_constants.FLOATINGIP_STATUS_ERROR, result)
|
apache-2.0
|
cristiana214/cristianachavez214-cristianachavez
|
python/src/Lib/encodings/base64_codec.py
|
528
|
2338
|
""" Python 'base64_codec' Codec - base64 content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs, base64
### Codec APIs
def base64_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = base64.encodestring(input)
return (output, len(input))
def base64_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = base64.decodestring(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return base64_encode(input,errors)
def decode(self, input,errors='strict'):
return base64_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return base64.encodestring(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return base64.decodestring(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='base64',
encode=base64_encode,
decode=base64_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
apache-2.0
|
ralphm/wokkel
|
wokkel/test/test_iwokkel.py
|
1
|
1676
|
# Copyright (c) Ralph Meijer.
# See LICENSE for details.
"""
Tests for L{wokkel.iwokkel}
"""
from __future__ import division, absolute_import
from twisted.trial import unittest
class DeprecationTest(unittest.TestCase):
"""
Deprecation test for L{wokkel.subprotocols}.
"""
def lookForDeprecationWarning(self, testmethod, attributeName, newName):
"""
Importing C{testmethod} emits a deprecation warning.
"""
warningsShown = self.flushWarnings([testmethod])
self.assertEqual(len(warningsShown), 1)
self.assertIdentical(warningsShown[0]['category'], DeprecationWarning)
self.assertEqual(
warningsShown[0]['message'],
"wokkel.iwokkel." + attributeName + " "
"was deprecated in wokkel 0.7.0: Use " + newName + " instead.")
def test_iXMPPHandler(self):
"""
L{wokkel.iwokkel.IXMPPHandler} is deprecated.
"""
from wokkel.iwokkel import IXMPPHandler
IXMPPHandler
self.lookForDeprecationWarning(
self.test_iXMPPHandler,
"IXMPPHandler",
"twisted.words.protocols.jabber.ijabber."
"IXMPPHandler")
def test_iXMPPHandlerCollection(self):
"""
L{wokkel.iwokkel.IXMPPHandlerCollection} is deprecated.
"""
from wokkel.iwokkel import IXMPPHandlerCollection
IXMPPHandlerCollection
self.lookForDeprecationWarning(
self.test_iXMPPHandlerCollection,
"IXMPPHandlerCollection",
"twisted.words.protocols.jabber.ijabber."
"IXMPPHandlerCollection")
|
mit
|
benosteen/mypaint
|
gui/brushcreationwidget.py
|
1
|
9333
|
# This file is part of MyPaint.
# Copyright (C) 2009 by Martin Renold <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import gtk
gdk = gtk.gdk
from lib import document
import tileddrawwidget, brushmanager, dialogs
from gettext import gettext as _
def startfile(path):
import os
import platform
if platform.system == 'Windows':
os.startfile(path)
else:
os.system("xdg-open " + path)
def stock_button(stock_id):
b = gtk.Button()
img = gtk.Image()
img.set_from_stock(stock_id, gtk.ICON_SIZE_MENU)
b.add(img)
return b
class BrushManipulationWidget(gtk.HBox):
""" """
def __init__(self, app, brushicon_editor):
gtk.HBox.__init__(self)
self.app = app
self.bm = app.brushmanager
self.brushicon_editor = brushicon_editor
self.init_widgets()
self.bm.selected_brush_observers.append(self.brush_selected_cb)
def init_widgets(self):
l = self.brush_name_label = gtk.Label()
l.set_text(_('(unnamed brush)'))
self.pack_start(l, expand=True)
right_vbox_buttons = [
(gtk.STOCK_SAVE, self.update_settings_cb, _('Save Settings')),
(gtk.STOCK_ADD, self.create_brush_cb, _('Add As New')),
(gtk.STOCK_PROPERTIES, self.edit_brush_cb, _('Edit Brush Icon')),
(gtk.STOCK_EDIT, self.rename_brush_cb, _('Rename...')),
(gtk.STOCK_DELETE, self.delete_brush_cb, _('Remove...')),
]
for stock_id, clicked_cb, tooltip in reversed(right_vbox_buttons):
b = stock_button(stock_id)
b.connect('clicked', clicked_cb)
b.set_tooltip_text(tooltip)
self.pack_end(b, expand=False)
def brush_selected_cb(self, managed_brush, brushinfo):
name = managed_brush.name
if name is None:
name = _('(unnamed brush)')
else:
name = name.replace('_', ' ') # XXX safename/unsafename utils?
self.brush_name_label.set_text(name)
def edit_brush_cb(self, window):
self.edit_brush_properties_cb()
def create_brush_cb(self, window):
"""Create and save a new brush based on the current working brush."""
b = brushmanager.ManagedBrush(self.bm)
b.brushinfo = self.app.brush.clone()
b.brushinfo.set_string_property("parent_brush_name", None) #avoid mis-hilight
b.preview = self.brushicon_editor.get_preview_pixbuf()
b.save()
if self.bm.active_groups:
group = self.bm.active_groups[0]
else:
group = brushmanager.DEFAULT_BRUSH_GROUP
brushes = self.bm.get_group_brushes(group, make_active=True)
brushes.insert(0, b)
b.persistent = True # Brush was saved
b.in_brushlist = True
for f in self.bm.brushes_observers: f(brushes)
self.bm.select_brush(b)
# Pretend that the active app.brush is a child of the new one, for the
# sake of the strokemap and strokes drawn immediately after.
self.app.brush.set_string_property("parent_brush_name", b.name)
def rename_brush_cb(self, window):
src_brush = self.bm.selected_brush
if not src_brush.name:
dialogs.error(self, _('No brush selected!'))
return
dst_name = dialogs.ask_for_name(self, _("Rename Brush"), src_brush.name.replace('_', ' '))
if not dst_name:
return
dst_name = dst_name.replace(' ', '_')
# ensure we don't overwrite an existing brush by accident
dst_deleted = None
for group, brushes in self.bm.groups.iteritems():
for b2 in brushes:
if b2.name == dst_name:
if group == brushmanager.DELETED_BRUSH_GROUP:
dst_deleted = b2
else:
dialogs.error(self, _('A brush with this name already exists!'))
return
print 'renaming brush', repr(src_brush.name), '-->', repr(dst_name)
if dst_deleted:
deleted_brushes = self.bm.get_group_brushes(brushmanager.DELETED_BRUSH_GROUP)
deleted_brushes.remove(dst_deleted)
for f in self.bm.brushes_observers: f(deleted_brushes)
# save src as dst
src_name = src_brush.name
src_brush.name = dst_name
src_brush.save()
src_brush.name = src_name
# load dst
dst_brush = brushmanager.ManagedBrush(self.bm, dst_name, persistent=True)
dst_brush.load()
dst_brush.in_brushlist = True
# replace src with dst (but keep src in the deleted list if it is a stock brush)
self.delete_brush_internal(src_brush, replacement=dst_brush)
self.bm.select_brush(dst_brush)
def update_settings_cb(self, window):
b = self.bm.selected_brush
if not b.name:
dialogs.error(self, _('No brush selected, please use "Add As New" instead.'))
return
b.brushinfo = self.app.brush.clone()
b.save()
def delete_brush_cb(self, window):
b = self.bm.selected_brush
if not b.name:
dialogs.error(self, _('No brush selected!'))
return
if not dialogs.confirm(self, _("Really delete brush from disk?")):
return
self.bm.select_brush(None)
self.delete_brush_internal(b)
def delete_brush_internal(self, b, replacement=None):
for brushes in self.bm.groups.itervalues():
if b in brushes:
idx = brushes.index(b)
if replacement:
brushes[idx] = replacement
else:
del brushes[idx]
for f in self.bm.brushes_observers: f(brushes)
assert b not in brushes, 'Brush exists multiple times in the same group!'
if not b.delete_from_disk():
# stock brush can't be deleted
deleted_brushes = self.bm.get_group_brushes(brushmanager.DELETED_BRUSH_GROUP)
deleted_brushes.insert(0, b)
for f in self.bm.brushes_observers: f(deleted_brushes)
class BrushIconEditorWidget(gtk.VBox):
def __init__(self, app):
gtk.VBox.__init__(self)
self.app = app
self.bm = app.brushmanager
self.set_border_width(8)
self.init_widgets()
self.bm.selected_brush_observers.append(self.brush_selected_cb)
self.set_brush_preview_edit_mode(False)
def init_widgets(self):
button_box = gtk.HBox()
doc = document.Document(self.app.brush)
self.tdw = tileddrawwidget.TiledDrawWidget(self.app, doc)
self.tdw.set_size_request(brushmanager.preview_w*2, brushmanager.preview_h*2)
self.tdw.scale = 2.0
tdw_box = gtk.HBox()
tdw_box.pack_start(self.tdw, expand=False, fill=False)
tdw_box.pack_start(gtk.Label(), expand=True)
self.pack_start(tdw_box, expand=False, fill=False, padding=3)
self.pack_start(button_box, expand=False, fill=False, padding=3)
self.brush_preview_edit_mode_button = b = gtk.CheckButton(_('Edit'))
b.connect('toggled', self.brush_preview_edit_mode_cb)
button_box.pack_start(b, expand=False, padding=3)
self.brush_preview_clear_button = b = gtk.Button(_('Clear'))
def clear_cb(window):
self.tdw.doc.clear_layer()
b.connect('clicked', clear_cb)
button_box.pack_start(b, expand=False, padding=3)
self.brush_preview_save_button = b = gtk.Button(_('Save'))
b.connect('clicked', self.update_preview_cb)
button_box.pack_start(b, expand=False, padding=3)
def brush_preview_edit_mode_cb(self, button):
self.set_brush_preview_edit_mode(button.get_active())
def set_brush_preview_edit_mode(self, edit_mode):
self.brush_preview_edit_mode = edit_mode
self.brush_preview_edit_mode_button.set_active(edit_mode)
self.brush_preview_save_button.set_sensitive(edit_mode)
self.brush_preview_clear_button.set_sensitive(edit_mode)
self.tdw.set_sensitive(edit_mode)
def set_preview_pixbuf(self, pixbuf):
if pixbuf is None:
self.tdw.doc.clear()
else:
self.tdw.doc.load_from_pixbuf(pixbuf)
def get_preview_pixbuf(self):
pixbuf = self.tdw.doc.render_as_pixbuf(0, 0, brushmanager.preview_w, brushmanager.preview_h)
return pixbuf
def update_preview_cb(self, window):
pixbuf = self.get_preview_pixbuf()
b = self.bm.selected_brush
if not b.name:
dialogs.error(self, _('No brush selected, please use "Add As New" instead.'))
return
b.preview = pixbuf
b.save()
for brushes in self.bm.groups.itervalues():
if b in brushes:
for f in self.bm.brushes_observers: f(brushes)
def brush_selected_cb(self, managed_brush, brushinfo):
# Update brush icon preview if it is not in edit mode
if not self.brush_preview_edit_mode:
self.set_preview_pixbuf(managed_brush.preview)
|
gpl-2.0
|
teixas/js.bootstrap_wysihtml5
|
bootstrap.py
|
40
|
10525
|
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os, shutil, sys, tempfile, urllib, urllib2, subprocess
from optparse import OptionParser
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
quote = str
# See zc.buildout.easy_install._has_broken_dash_S for motivation and comments.
stdout, stderr = subprocess.Popen(
[sys.executable, '-Sc',
'try:\n'
' import ConfigParser\n'
'except ImportError:\n'
' print 1\n'
'else:\n'
' print 0\n'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
has_broken_dash_S = bool(int(stdout.strip()))
# In order to be more robust in the face of system Pythons, we want to
# run without site-packages loaded. This is somewhat tricky, in
# particular because Python 2.6's distutils imports site, so starting
# with the -S flag is not sufficient. However, we'll start with that:
if not has_broken_dash_S and 'site' in sys.modules:
# We will restart with python -S.
args = sys.argv[:]
args[0:0] = [sys.executable, '-S']
args = map(quote, args)
os.execv(sys.executable, args)
# Now we are running with -S. We'll get the clean sys.path, import site
# because distutils will do it later, and then reset the path and clean
# out any namespace packages from site-packages that might have been
# loaded by .pth files.
clean_path = sys.path[:]
import site # imported because of its side effects
sys.path[:] = clean_path
for k, v in sys.modules.items():
if k in ('setuptools', 'pkg_resources') or (
hasattr(v, '__path__') and
len(v.__path__) == 1 and
not os.path.exists(os.path.join(v.__path__[0], '__init__.py'))):
# This is a namespace package. Remove it.
sys.modules.pop(k)
is_jython = sys.platform.startswith('java')
setuptools_source = 'http://peak.telecommunity.com/dist/ez_setup.py'
distribute_source = 'http://python-distribute.org/distribute_setup.py'
# parsing arguments
def normalize_to_url(option, opt_str, value, parser):
if value:
if '://' not in value: # It doesn't smell like a URL.
value = 'file://%s' % (
urllib.pathname2url(
os.path.abspath(os.path.expanduser(value))),)
if opt_str == '--download-base' and not value.endswith('/'):
# Download base needs a trailing slash to make the world happy.
value += '/'
else:
value = None
name = opt_str[2:].replace('-', '_')
setattr(parser.values, name, value)
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --setup-source and --download-base to point to
local resources, you can keep this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("-d", "--distribute",
action="store_true", dest="use_distribute", default=False,
help="Use Distribute rather than Setuptools.")
parser.add_option("--setup-source", action="callback", dest="setup_source",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or file location for the setup file. "
"If you use Setuptools, this will default to " +
setuptools_source + "; if you use Distribute, this "
"will default to " + distribute_source + "."))
parser.add_option("--download-base", action="callback", dest="download_base",
callback=normalize_to_url, nargs=1, type="string",
help=("Specify a URL or directory for downloading "
"zc.buildout and either Setuptools or Distribute. "
"Defaults to PyPI."))
parser.add_option("--eggs",
help=("Specify a directory for storing eggs. Defaults to "
"a temporary directory that is deleted when the "
"bootstrap script completes."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
if options.eggs:
eggs_dir = os.path.abspath(os.path.expanduser(options.eggs))
else:
eggs_dir = tempfile.mkdtemp()
if options.setup_source is None:
if options.use_distribute:
options.setup_source = distribute_source
else:
options.setup_source = setuptools_source
if options.accept_buildout_test_releases:
args.insert(0, 'buildout:accept-buildout-test-releases=true')
try:
import pkg_resources
import setuptools # A flag. Sometimes pkg_resources is installed alone.
if not hasattr(pkg_resources, '_distribute'):
raise ImportError
except ImportError:
ez_code = urllib2.urlopen(
options.setup_source).read().replace('\r\n', '\n')
ez = {}
exec ez_code in ez
setup_args = dict(to_dir=eggs_dir, download_delay=0)
if options.download_base:
setup_args['download_base'] = options.download_base
if options.use_distribute:
setup_args['no_fake'] = True
if sys.version_info[:2] == (2, 4):
setup_args['version'] = '0.6.32'
ez['use_setuptools'](**setup_args)
if 'pkg_resources' in sys.modules:
reload(sys.modules['pkg_resources'])
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(eggs_dir)]
if not has_broken_dash_S:
cmd.insert(1, '-S')
find_links = options.download_base
if not find_links:
find_links = os.environ.get('bootstrap-testing-find-links')
if not find_links and options.accept_buildout_test_releases:
find_links = 'http://downloads.buildout.org/'
if find_links:
cmd.extend(['-f', quote(find_links)])
if options.use_distribute:
setup_requirement = 'distribute'
else:
setup_requirement = 'setuptools'
ws = pkg_resources.working_set
setup_requirement_path = ws.find(
pkg_resources.Requirement.parse(setup_requirement)).location
env = dict(
os.environ,
PYTHONPATH=setup_requirement_path)
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setup_requirement_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if distv >= pkg_resources.parse_version('2dev'):
continue
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement += '=='+version
else:
requirement += '<2dev'
cmd.append(requirement)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
if exitcode != 0:
sys.stdout.flush()
sys.stderr.flush()
print ("An error occurred when trying to install zc.buildout. "
"Look above this message for any errors that "
"were output by easy_install.")
sys.exit(exitcode)
ws.add_entry(eggs_dir)
ws.require(requirement)
import zc.buildout.buildout
# If there isn't already a command in the args, add bootstrap
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout's main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
if not options.eggs: # clean up temporary egg directory
shutil.rmtree(eggs_dir)
|
bsd-3-clause
|
sestrella/ansible
|
lib/ansible/modules/network/onyx/onyx_buffer_pool.py
|
37
|
4908
|
#!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: onyx_buffer_pool
version_added: "2.8"
author: "Anas Badaha (@anasb)"
short_description: Configures Buffer Pool
description:
- This module provides declarative management of Onyx Buffer Pool configuration
on Mellanox ONYX network devices.
notes:
- Tested on ONYX 3.6.8130
options:
name:
description:
- pool name.
required: true
pool_type:
description:
- pool type.
choices: ['lossless', 'lossy']
default: lossy
memory_percent:
description:
- memory percent.
switch_priority:
description:
- switch priority, range 1-7.
"""
EXAMPLES = """
- name: configure buffer pool
onyx_buffer_pool:
name: roce
pool_type: lossless
memory_percent: 50.00
switch_priority: 3
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device.
returned: always
type: list
sample:
- traffic pool roce type lossless
- traffic pool roce memory percent 50.00
- traffic pool roce map switch-priority 3
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.onyx.onyx import show_cmd
from ansible.module_utils.network.onyx.onyx import BaseOnyxModule
class OnyxBufferPoolModule(BaseOnyxModule):
def init_module(self):
""" initialize module
"""
element_spec = dict(
name=dict(type='str', required=True),
pool_type=dict(choices=['lossless', 'lossy'], default='lossy'),
memory_percent=dict(type='float'),
switch_priority=dict(type='int')
)
argument_spec = dict()
argument_spec.update(element_spec)
self._module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True)
def get_required_config(self):
module_params = self._module.params
self._required_config = dict(module_params)
self.validate_param_values(self._required_config)
def validate_switch_priority(self, value):
if value and not 0 <= int(value) <= 7:
self._module.fail_json(msg='switch_priority value must be between 0 and 7')
def _set_traffic_pool_config(self, traffic_pool_config):
if traffic_pool_config is None:
return
traffic_pool_config = traffic_pool_config.get(self._required_config.get('name'))
self._current_config['pool_type'] = traffic_pool_config[0].get("Type")
self._current_config['switch_priority'] = int(traffic_pool_config[0].get("Switch Priorities"))
self._current_config['memory_percent'] = float(traffic_pool_config[0].get("Memory [%]"))
def _show_traffic_pool(self):
cmd = "show traffic pool {0}".format(self._required_config.get("name"))
return show_cmd(self._module, cmd, json_fmt=True, fail_on_error=False)
def load_current_config(self):
self._current_config = dict()
traffic_pool_config = self._show_traffic_pool()
self._set_traffic_pool_config(traffic_pool_config)
def generate_commands(self):
name = self._required_config.get("name")
pool_type = self._required_config.get("pool_type")
if self._current_config is None:
self._add_add_traffic_pool_cmds(name, pool_type)
else:
current_pool_type = self._current_config.get("pool_type")
if pool_type != current_pool_type:
self._add_add_traffic_pool_cmds(name, pool_type)
memory_percent = self._required_config.get("memory_percent")
if memory_percent is not None:
curr_memory_percent = self._current_config.get("memory_percent")
if curr_memory_percent is None or memory_percent != curr_memory_percent:
self._commands.append('traffic pool {0} memory percent {1}'.format(name, memory_percent))
switch_priority = self._required_config.get("switch_priority")
if switch_priority is not None:
curr_switch_priority = self._current_config.get("switch_priority")
if curr_switch_priority is None or switch_priority != curr_switch_priority:
self._commands.append('traffic pool {0} map switch-priority {1}'.format(name, switch_priority))
def _add_add_traffic_pool_cmds(self, name, pool_type):
self._commands.append('traffic pool {0} type {1}'.format(name, pool_type))
def main():
""" main entry point for module execution
"""
OnyxBufferPoolModule.main()
if __name__ == '__main__':
main()
|
gpl-3.0
|
zhjunlang/kbengine
|
kbe/res/scripts/common/Lib/test/test_logging.py
|
60
|
145919
|
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import select
import socket
import struct
import sys
import tempfile
from test.script_helper import assert_python_ok
from test.support import (captured_stdout, run_with_locale, run_unittest,
patch, requires_zlib, TestHandler, Matcher)
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
# The following imports are needed only for tests which
# require threading
import asynchat
import asyncore
import errno
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler,
ThreadingUnixStreamServer,
ThreadingUnixDatagramServer)
except ImportError:
threading = None
try:
import win32evtlog
except ImportError:
win32evtlog = None
try:
import win32evtlogutil
except ImportError:
win32evtlogutil = None
win32evtlog = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
old_stderr = sys.stderr
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
sys.stderr = sio = io.StringIO()
h.handle(r)
self.assertIn('\nRuntimeError: deliberate mistake\n',
sio.getvalue())
logging.raiseExceptions = False
sys.stderr = sio = io.StringIO()
h.handle(r)
self.assertEqual('', sio.getvalue())
finally:
logging.raiseExceptions = old_raise
sys.stderr = old_stderr
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
if threading:
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
try:
asyncore.loop(poll_interval, map=self._map)
except OSError:
# On FreeBSD 8, closing the server repeatably
# raises this error. We swallow it if the
# server has been closed.
if self.connected or self.accepting:
raise
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
self._thread.join(timeout)
self._thread = None
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
self._thread.join(timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPHandlerTest(BaseTest):
TIMEOUT = 8.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer(('localhost', 0), self.process_message, 0.001,
sockmap)
server.start()
addr = ('localhost', server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT) # 14314: don't wait forever
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
if threading:
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls(('localhost', server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
PEMFILE = """-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDGT4xS5r91rbLJQK2nUDenBhBG6qFk+bVOjuAGC/LSHlAoBnvG
zQG3agOG+e7c5z2XT8m2ktORLqG3E4mYmbxgyhDrzP6ei2Anc+pszmnxPoK3Puh5
aXV+XKt0bU0C1m2+ACmGGJ0t3P408art82nOxBw8ZHgIg9Dtp6xIUCyOqwIDAQAB
AoGBAJFTnFboaKh5eUrIzjmNrKsG44jEyy+vWvHN/FgSC4l103HxhmWiuL5Lv3f7
0tMp1tX7D6xvHwIG9VWvyKb/Cq9rJsDibmDVIOslnOWeQhG+XwJyitR0pq/KlJIB
5LjORcBw795oKWOAi6RcOb1ON59tysEFYhAGQO9k6VL621gRAkEA/Gb+YXULLpbs
piXN3q4zcHzeaVANo69tUZ6TjaQqMeTxE4tOYM0G0ZoSeHEdaP59AOZGKXXNGSQy
2z/MddcYGQJBAMkjLSYIpOLJY11ja8OwwswFG2hEzHe0cS9bzo++R/jc1bHA5R0Y
i6vA5iPi+wopPFvpytdBol7UuEBe5xZrxWMCQQCWxELRHiP2yWpEeLJ3gGDzoXMN
PydWjhRju7Bx3AzkTtf+D6lawz1+eGTuEss5i0JKBkMEwvwnN2s1ce+EuF4JAkBb
E96h1lAzkVW5OAfYOPY8RCPA90ZO/hoyg7PpSxR0ECuDrgERR8gXIeYUYfejBkEa
rab4CfRoVJKKM28Yq/xZAkBvuq670JRCwOgfUTdww7WpdOQBYPkzQccsKNCslQW8
/DyW6y06oQusSENUvynT6dr3LJxt/NgZPhZX2+k1eYDV
-----END RSA PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIICGzCCAYSgAwIBAgIJAIq84a2Q/OvlMA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNV
BAMTCWxvY2FsaG9zdDAeFw0xMTA1MjExMDIzMzNaFw03NTAzMjEwMzU1MTdaMBQx
EjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
xk+MUua/da2yyUCtp1A3pwYQRuqhZPm1To7gBgvy0h5QKAZ7xs0Bt2oDhvnu3Oc9
l0/JtpLTkS6htxOJmJm8YMoQ68z+notgJ3PqbM5p8T6Ctz7oeWl1flyrdG1NAtZt
vgAphhidLdz+NPGq7fNpzsQcPGR4CIPQ7aesSFAsjqsCAwEAAaN1MHMwHQYDVR0O
BBYEFLWaUPO6N7efGiuoS9i3DVYcUwn0MEQGA1UdIwQ9MDuAFLWaUPO6N7efGiuo
S9i3DVYcUwn0oRikFjAUMRIwEAYDVQQDEwlsb2NhbGhvc3SCCQCKvOGtkPzr5TAM
BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAMK5whPjLNQK1Ivvk88oqJqq
4f889OwikGP0eUhOBhbFlsZs+jq5YZC2UzHz+evzKBlgAP1u4lP/cB85CnjvWqM+
1c/lywFHQ6HOdDeQ1L72tSYMrNOG4XNmLn0h7rx6GoTU7dcFRfseahBCq8mv0IDt
IRbTpvlHWPjsSvHz0ZOH
-----END CERTIFICATE-----"""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
fd, fn = tempfile.mkstemp()
os.close(fd)
with open(fn, 'w') as f:
f.write(self.PEMFILE)
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.load_cert_chain(fn)
os.unlink(fn)
except ImportError:
sslctx = None
else:
sslctx = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client)
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
#See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_10_ok(self):
with captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_1_ok(self):
with captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_stderr = sys.stderr
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
sys.stderr = sio = io.StringIO()
root.debug('This should not appear')
self.assertEqual(sio.getvalue(), '')
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), 'This is your final chance!\n')
#No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), 'No handlers could be found for logger "root"\n')
# 'No handlers' message only printed once
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), '')
root.manager.emittedNoHandlerWarning = False
#If raiseExceptions is False, no message is printed
logging.raiseExceptions = False
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), '')
finally:
sys.stderr = old_stderr
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
def _test_log(self, method, level=None):
called = []
patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
if threading:
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
old_raise = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExecptions', old_raise)
logging.raiseExceptions = True
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
old_raise = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExecptions', old_raise)
logging.raiseExceptions = False
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
#print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
h = logging.handlers.NTEventLogHandler('test_logging')
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, HandlerTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, DatagramHandlerTest,
MemoryTest, EncodingTest, WarningsTest, ConfigDictTest,
ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest,
BasicConfigTest, LoggerAdapterTest, LoggerTest,
SMTPHandlerTest, FileHandlerTest, RotatingFileHandlerTest,
LastResortTest, LogRecordTest, ExceptionTest,
SysLogHandlerTest, HTTPHandlerTest, NTEventLogHandlerTest,
TimedRotatingFileHandlerTest, UnixSocketHandlerTest,
UnixDatagramHandlerTest, UnixSysLogHandlerTest
)
if __name__ == "__main__":
test_main()
|
lgpl-3.0
|
michalkurka/h2o-3
|
h2o-py/tests_rest_smoke/h2o_print.py
|
2
|
1663
|
import getpass
# some fun to match michal's use of green in his messaging in ec2_cmd.py
# generalize like http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
class bcolors:
PURPLE = ''
BLUE = ''
GREEN = ''
YELLOW = ''
RED = ''
ENDC = ''
def enable(self):
self.PURPLE = '\033[95m'
self.BLUE = '\033[94m'
self.GREEN = '\033[92m'
self.YELLOW = '\033[93m'
self.RED = '\033[91m'
self.ENDC = '\033[0m'
def disable(self):
self.PURPLE = ''
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.RED = ''
self.ENDC = ''
b = bcolors()
b.enable()
def disable_colors():
b.disable()
def enable_colors():
b.enable()
# make these compatible with multiple args like print?
def green_print(*args):
# the , at the end means no eol
if getpass.getuser() == 'jenkins':
b.disable()
for msg in args:
print(b.GREEN + str(msg) + b.ENDC)
print
def blue_print(*args):
if getpass.getuser() == 'jenkins':
b.disable()
for msg in args:
print (b.BLUE + str(msg) + b.ENDC)
print
def yellow_print(*args):
if getpass.getuser() == 'jenkins':
b.disable()
for msg in args:
print(b.YELLOW + str(msg) + b.ENDC)
print
def red_print(*args):
if getpass.getuser() == 'jenkins':
b.disable()
for msg in args:
print(b.RED + str(msg) + b.ENDC)
print
def purple_print(*args):
if getpass.getuser()=='jenkins':
b.disable()
for msg in args:
print(b.PURPLE + str(msg) + b.ENDC)
print
|
apache-2.0
|
pinterb/st2contrib
|
packs/elasticsearch/actions/lib/static_metagen.py
|
12
|
4319
|
import yaml
class StaticMetagen(object):
action_meta = {
"name": "",
"parameters": {
"action": {
"type": "string",
"immutable": True,
"default": ""
},
"host": {
"description": "Elasticsearch host.",
"type": "string",
"required": True
},
"url_prefix": {
"description": "Elasticsearch http url prefix.",
"type": "string"
},
"port": {
"description": "Elasticsearch port.",
"type": "string",
},
"use_ssl": {
"description": "Connect to Elasticsearch through SSL.",
"type": "boolean",
"default": False
},
"http_auth": {
"description": "Use Basic Authentication ex: user:pass",
"type": "string"
},
"master_only": {
"description": "Only operate on elected master node.",
"type": "boolean",
"default": False
},
"timeout": {
"description": "Don't wait for action completion more then the "
"specified timeout.",
"default": 600,
"type": "integer"
},
"operation_timeout": {
"description": "Elasticsearch operation timeout in seconds. "
"(It's equal to action timeout).",
"default": "{{timeout}}",
"immutable": True,
"type": "string"
},
"log_level": {
"description": "Log level [critical|error|warning|info|debug].",
"type": "string",
"default": "warn"
},
"dry_run": {
"description": "Do not perform any changes.",
"type": "boolean",
"default": False
}
},
"runner_type": "run-python",
"description": "Run a Meta Action through a generic Runner.",
"enabled": True,
"entry_point": "curator_runner.py"}
parameter_meta = {
"type": "string"
}
def __init__(self, action_meta=None):
self.action_meta = StaticMetagen.action_meta
if action_meta is not None:
self.action_meta.update(action_meta)
def generate_action(self, module_type, action):
manifest = self.action_meta
manifest['name'] = "{0}_{1}".format(module_type, action)
manifest['parameters']['action']['default'] = action
fh = open('{0}_{1}.yaml'.format(module_type, action), 'w')
fh.write('---\n')
fh.write(yaml.dump(manifest, default_flow_style=False))
fh.close()
def generate_from_file(self, meta_file):
if meta_file is None:
return None
with open(meta_file) as fh:
actions = yaml.load(fh.read())
for manifest in self._merge_actions(actions):
fh = open('{0}.yaml'.format(manifest['name']), 'w')
fh.write('---\n')
fh.write(yaml.dump(manifest, default_flow_style=False))
fh.close()
def _merge_actions(self, actions):
for action in actions:
name, meta = action.items()[0]
manifest = self.action_meta.copy()
manifest['name'] = name
manifest['parameters']['action']['default'] = name
for k, v in meta['parameters'].items():
nv = StaticMetagen.parameter_meta.copy()
nv.update(v)
meta['parameters'][k] = nv
parameters = manifest['parameters'].copy()
parameters.update(meta['parameters'])
meta['parameters'] = parameters
manifest.update(meta)
if 'alias' in manifest:
alias_name = manifest.pop('alias')
alias_manifest = manifest.copy()
alias_manifest['name'] = alias_name
yield alias_manifest
yield manifest
metagen = StaticMetagen()
metagen.generate_from_file('lib/curator_actions.yaml')
# print yaml.dump(metagen.meta_actions)
|
apache-2.0
|
missionpinball/mpf
|
mpf/tests/test_CarouselMode.py
|
1
|
10205
|
from mpf.tests.MpfTestCase import MpfTestCase, MagicMock
class TestCarouselMode(MpfTestCase):
def get_config_file(self):
return 'config.yaml'
def get_machine_path(self):
return 'tests/machine_files/carousel/'
def _start_game(self):
self.machine.playfield.add_ball = MagicMock()
self.machine.ball_controller.num_balls_known = 3
self.hit_and_release_switch("s_start")
self.advance_time_and_run()
self.assertIsNotNone(self.machine.game)
def _stop_game(self):
# stop game
self.assertIsNotNone(self.machine.game)
self.machine.game.end_game()
self.advance_time_and_run()
self.assertIsNone(self.machine.game)
def testBlockingCarousel(self):
self.mock_event("blocking_carousel_item1_highlighted")
self.mock_event("blocking_carousel_item2_highlighted")
self.mock_event("blocking_carousel_item3_highlighted")
self.mock_event("flipper_cancel")
self._start_game()
self.post_event("start_mode4")
self.assertIn(self.machine.modes["blocking_carousel"], self.machine.mode_controller.active_modes)
self.assertEqual(1, self._events["blocking_carousel_item1_highlighted"])
self.assertEqual(0, self._events["blocking_carousel_item2_highlighted"])
self.post_event("s_flipper_right_active")
self.post_event("s_flipper_right_inactive")
self.assertEqual(1, self._events["blocking_carousel_item2_highlighted"])
self.assertEqual(0, self._events["blocking_carousel_item3_highlighted"])
self.assertEqual(0, self._events["flipper_cancel"])
self.post_event("s_flipper_right_active")
self.post_event("s_flipper_left_active")
self.post_event("flipper_cancel")
self.post_event("s_flipper_right_inactive")
self.post_event("s_flipper_left_inactive")
self.assertEqual(1, self._events["flipper_cancel"])
self.assertEqual(1, self._events["blocking_carousel_item1_highlighted"])
self.assertEqual(1, self._events["blocking_carousel_item2_highlighted"])
self.assertEqual(0, self._events["blocking_carousel_item3_highlighted"])
self.post_event("both_flippers_inactive")
self.post_event("s_flipper_right_inactive")
self.assertEqual(1, self._events["blocking_carousel_item3_highlighted"])
# Restart the mode to ensure that the block is cleared
self.post_event("flipper_cancel")
self.post_event("stop_mode4")
self.advance_time_and_run()
self.post_event("start_mode4")
self.post_event("s_flipper_right_inactive")
self.assertEqual(2, self._events["blocking_carousel_item2_highlighted"],
"item2_highlighted should be called when a blocked mode restarts")
def testConditionalCarousel(self):
self.mock_event("conditional_carousel_item1_highlighted")
self.mock_event("conditional_carousel_item2_highlighted")
self.mock_event("conditional_carousel_item3_highlighted")
self.mock_event("conditional_carousel_item4_highlighted")
self._start_game()
# Start the mode without any conditions true
self.post_event("start_mode3")
self.assertIn(self.machine.modes["conditional_carousel"], self.machine.mode_controller.active_modes)
self.assertEqual(1, self._events["conditional_carousel_item1_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item2_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item3_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item4_highlighted"])
self.post_event("next")
self.assertEqual(2, self._events["conditional_carousel_item1_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item2_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item3_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item4_highlighted"])
self.post_event("next")
self.assertEqual(3, self._events["conditional_carousel_item1_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item2_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item3_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item4_highlighted"])
self.post_event("stop_mode3")
# Reset the count for item 1
self.mock_event("conditional_carousel_item1_highlighted")
# Start the mode with a player variable condition
self.machine.game.player["show_item4"] = True
self.post_event("start_mode3")
self.assertEqual(1, self._events["conditional_carousel_item1_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item2_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item3_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item4_highlighted"])
self.post_event("next")
self.assertEqual(1, self._events["conditional_carousel_item1_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item2_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item3_highlighted"])
self.assertEqual(1, self._events["conditional_carousel_item4_highlighted"])
self.post_event("next")
self.assertEqual(2, self._events["conditional_carousel_item1_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item2_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item3_highlighted"])
self.assertEqual(1, self._events["conditional_carousel_item4_highlighted"])
self.post_event("stop_mode3")
# Reset the count for items 1 and 4
self.mock_event("conditional_carousel_item1_highlighted")
self.mock_event("conditional_carousel_item4_highlighted")
# Start the mode with a machine variable condition
self.machine.variables.set_machine_var("player2_score", 500000)
self.machine.game.player["show_item4"] = False
self.post_event("start_mode3")
self.assertEqual(1, self._events["conditional_carousel_item1_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item2_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item3_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item4_highlighted"])
self.post_event("next")
self.assertEqual(1, self._events["conditional_carousel_item1_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item2_highlighted"])
self.assertEqual(1, self._events["conditional_carousel_item3_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item4_highlighted"])
self.post_event("next")
self.assertEqual(2, self._events["conditional_carousel_item1_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item2_highlighted"])
self.assertEqual(1, self._events["conditional_carousel_item3_highlighted"])
self.assertEqual(0, self._events["conditional_carousel_item4_highlighted"])
self.post_event("stop_mode3")
# The mode shouldn't start if all conditions are false (i.e. no items)
self.mock_event("conditional_carousel_items_empty")
self.machine.game.player["hide_item1"] = "truthy"
self.machine.variables.set_machine_var("player2_score", 0)
self.post_event("start_mode3")
self.assertEqual(1, self._events["conditional_carousel_items_empty"])
self.assertNotIn(self.machine.modes["conditional_carousel"], self.machine.mode_controller.active_modes)
def testExtraBall(self):
self.mock_event("carousel_item1_highlighted")
self.mock_event("carousel_item1_selected")
self.mock_event("carousel_item2_highlighted")
self.mock_event("carousel_item2_selected")
self.mock_event("carousel_item3_highlighted")
self.mock_event("carousel_item3_selected")
# start game
self._start_game()
# start mode
self.post_event("start_mode1")
self.assertIn(self.machine.modes["carousel"], self.machine.mode_controller.active_modes)
self.assertEqual(1, self._events["carousel_item1_highlighted"])
self.assertEqual(0, self._events["carousel_item2_highlighted"])
self.assertEqual(0, self._events["carousel_item3_highlighted"])
self.post_event("next")
self.assertEqual(1, self._events["carousel_item1_highlighted"])
self.assertEqual(1, self._events["carousel_item2_highlighted"])
self.assertEqual(0, self._events["carousel_item3_highlighted"])
self.post_event("next")
self.assertEqual(1, self._events["carousel_item1_highlighted"])
self.assertEqual(1, self._events["carousel_item2_highlighted"])
self.assertEqual(1, self._events["carousel_item3_highlighted"])
self.post_event("next")
self.assertEqual(2, self._events["carousel_item1_highlighted"])
self.assertEqual(1, self._events["carousel_item2_highlighted"])
self.assertEqual(1, self._events["carousel_item3_highlighted"])
self.post_event("previous2")
self.assertEqual(2, self._events["carousel_item1_highlighted"])
self.assertEqual(1, self._events["carousel_item2_highlighted"])
self.assertEqual(2, self._events["carousel_item3_highlighted"])
self.post_event("previous")
self.assertEqual(2, self._events["carousel_item1_highlighted"])
self.assertEqual(2, self._events["carousel_item2_highlighted"])
self.assertEqual(2, self._events["carousel_item3_highlighted"])
self.post_event("select")
self.assertEqual(0, self._events["carousel_item1_selected"])
self.assertEqual(1, self._events["carousel_item2_selected"])
self.assertEqual(0, self._events["carousel_item3_selected"])
self.assertNotIn(self.machine.modes["carousel"], self.machine.mode_controller.active_modes)
|
mit
|
jyt109/SimpleCV
|
SimpleCV/examples/kinect/kinect-coloring.py
|
13
|
2150
|
#!/usr/bin/python
import time, webbrowser
from operator import add
from SimpleCV import Color, ColorCurve, Kinect, Image, pg, np
from SimpleCV.Display import Display
d = Display(flags = pg.FULLSCREEN)
#create video streams
cam = Kinect()
#initialize the camera
compositeframe = Image((640, 480))
#populate the compositeframe
offtime = 5.0
laststroke = time.time()
while not d.isDone():
img = cam.getImage()
imgscene = img.copy()
depth = cam.getDepth()
mindepth = np.min(depth.getNumpy())
if mindepth < 180:
depthbin = depth.binarize(np.min(depth.getNumpy()) + np.std(depth.getNumpy()) / 4).erode(3)
#take the front 1/4 stdev of the depth map
img = img.crop(0,25, 605, 455).scale(640,480)
#img.dl().blit(img.crop(100, 25, 515, 455), (125,0))
#this is a bit of a hack to compensate for the offset between cam and depth sensor
#img = img.applyLayers()
img = img - depthbin.invert()
#img.save(d)
meanred, meangrn, meanblue = img.meanColor()
if meanred > meanblue and meanred > meangrn:
depthbin, junk, junk = depthbin.splitChannels(grayscale = False)
if meanblue > meanred and meanblue > meangrn:
junk, junk, depthbin = depthbin.splitChannels(grayscale = False)
if meangrn > meanred and meangrn > meanblue:
junk, depthbin, junk = depthbin.splitChannels(grayscale = False)
laststroke = time.time()
compositeframe = compositeframe + depthbin
#we're painting -- keep adding to the composite frame
else:
if (time.time() - laststroke > offtime):
#if we're not painting for a certain amount of time, reset
compositeframe = Image(cam.getImage().getEmpty())
frame = ((imgscene - compositeframe.binarize(10).invert()) + compositeframe).flipHorizontal()
#subtract our composite frame from our camera image, then add it back in in red. False = Show red channel as red, [0] = first (red) channel
frame.save(d) #show in browser
if d.mouseLeft:
d.done = True
pg.quit()
time.sleep(0.01) #yield to the webserver
|
bsd-3-clause
|
SebastianLloret/Clever-Bot
|
libfuturize/fixes/fix_absolute_import.py
|
62
|
3141
|
"""
Fixer for import statements, with a __future__ import line.
Based on lib2to3/fixes/fix_import.py, but extended slightly so it also
supports Cython modules.
If spam is being imported from the local directory, this import:
from spam import eggs
becomes:
from __future__ import absolute_import
from .spam import eggs
and this import:
import spam
becomes:
from __future__ import absolute_import
from . import spam
"""
from os.path import dirname, join, exists, sep
from lib2to3.fixes.fix_import import FixImport
from lib2to3.fixer_util import FromImport, syms
from lib2to3.fixes.fix_import import traverse_imports
from libfuturize.fixer_util import future_import
class FixAbsoluteImport(FixImport):
run_order = 9
def transform(self, node, results):
"""
Copied from FixImport.transform(), but with this line added in
any modules that had implicit relative imports changed:
from __future__ import absolute_import"
"""
if self.skip:
return
imp = results['imp']
if node.type == syms.import_from:
# Some imps are top-level (eg: 'import ham')
# some are first level (eg: 'import ham.eggs')
# some are third level (eg: 'import ham.eggs as spam')
# Hence, the loop
while not hasattr(imp, 'value'):
imp = imp.children[0]
if self.probably_a_local_import(imp.value):
imp.value = u"." + imp.value
imp.changed()
future_import(u"absolute_import", node)
else:
have_local = False
have_absolute = False
for mod_name in traverse_imports(imp):
if self.probably_a_local_import(mod_name):
have_local = True
else:
have_absolute = True
if have_absolute:
if have_local:
# We won't handle both sibling and absolute imports in the
# same statement at the moment.
self.warning(node, "absolute and local imports together")
return
new = FromImport(u".", [imp])
new.prefix = node.prefix
future_import(u"absolute_import", node)
return new
def probably_a_local_import(self, imp_name):
"""
Like the corresponding method in the base class, but this also
supports Cython modules.
"""
if imp_name.startswith(u"."):
# Relative imports are certainly not local imports.
return False
imp_name = imp_name.split(u".", 1)[0]
base_path = dirname(self.filename)
base_path = join(base_path, imp_name)
# If there is no __init__.py next to the file its not in a package
# so can't be a relative import.
if not exists(join(dirname(base_path), "__init__.py")):
return False
for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd", ".pyx"]:
if exists(base_path + ext):
return True
return False
|
gpl-3.0
|
mukashi/solarized
|
utils/tests/python.py
|
79
|
2613
|
# test python (sample from offlineimap)
class ExitNotifyThread(Thread):
"""This class is designed to alert a "monitor" to the fact that a thread has
exited and to provide for the ability for it to find out why."""
def run(self):
global exitthreads, profiledir
self.threadid = thread.get_ident()
try:
if not profiledir: # normal case
Thread.run(self)
else:
try:
import cProfile as profile
except ImportError:
import profile
prof = profile.Profile()
try:
prof = prof.runctx("Thread.run(self)", globals(), locals())
except SystemExit:
pass
prof.dump_stats( \
profiledir + "/" + str(self.threadid) + "_" + \
self.getName() + ".prof")
except:
self.setExitCause('EXCEPTION')
if sys:
self.setExitException(sys.exc_info()[1])
tb = traceback.format_exc()
self.setExitStackTrace(tb)
else:
self.setExitCause('NORMAL')
if not hasattr(self, 'exitmessage'):
self.setExitMessage(None)
if exitthreads:
exitthreads.put(self, True)
def setExitCause(self, cause):
self.exitcause = cause
def getExitCause(self):
"""Returns the cause of the exit, one of:
'EXCEPTION' -- the thread aborted because of an exception
'NORMAL' -- normal termination."""
return self.exitcause
def setExitException(self, exc):
self.exitexception = exc
def getExitException(self):
"""If getExitCause() is 'EXCEPTION', holds the value from
sys.exc_info()[1] for this exception."""
return self.exitexception
def setExitStackTrace(self, st):
self.exitstacktrace = st
def getExitStackTrace(self):
"""If getExitCause() is 'EXCEPTION', returns a string representing
the stack trace for this exception."""
return self.exitstacktrace
def setExitMessage(self, msg):
"""Sets the exit message to be fetched by a subsequent call to
getExitMessage. This message may be any object or type except
None."""
self.exitmessage = msg
def getExitMessage(self):
"""For any exit cause, returns the message previously set by
a call to setExitMessage(), or None if there was no such message
set."""
return self.exitmessage
|
mit
|
bop/hybrid
|
lib/python2.6/site-packages/django/contrib/localflavor/hr/forms.py
|
100
|
9127
|
# -*- coding: utf-8 -*-
"""
HR-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import datetime
import re
from django.contrib.localflavor.hr.hr_choices import (
HR_LICENSE_PLATE_PREFIX_CHOICES, HR_COUNTY_CHOICES,
HR_PHONE_NUMBER_PREFIX_CHOICES)
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, Select, RegexField
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
jmbg_re = re.compile(r'^(?P<dd>\d{2})(?P<mm>\d{2})(?P<yyy>\d{3})' + \
r'(?P<rr>\d{2})(?P<bbb>\d{3})(?P<k>\d{1})$')
oib_re = re.compile(r'^\d{11}$')
plate_re = re.compile(r'^(?P<prefix>[A-ZČŠŽ]{2})' + \
r'(?P<number>\d{3,4})(?P<suffix>[ABCDEFGHIJKLMNOPRSTUVZ]{1,2})$')
postal_code_re = re.compile(r'^\d{5}$')
phone_re = re.compile(r'^(\+385|00385|0)(?P<prefix>\d{2})(?P<number>\d{6,7})$')
jmbag_re = re.compile(r'^601983(?P<copy>\d{1})1(?P<jmbag>\d{10})(?P<k>\d{1})$')
class HRCountySelect(Select):
"""
A Select widget that uses a list of counties of Croatia as its choices.
"""
def __init__(self, attrs=None):
super(HRCountySelect, self).__init__(attrs, choices=HR_COUNTY_CHOICES)
class HRLicensePlatePrefixSelect(Select):
"""
A Select widget that uses a list of vehicle license plate prefixes of
Croatia as its choices.
"""
def __init__(self, attrs=None):
super(HRLicensePlatePrefixSelect, self).__init__(attrs,
choices=HR_LICENSE_PLATE_PREFIX_CHOICES)
class HRPhoneNumberPrefixSelect(Select):
"""
A Select widget that uses a list of phone number prefixes of Croatia as its
choices.
"""
def __init__(self, attrs=None):
super(HRPhoneNumberPrefixSelect, self).__init__(attrs,
choices=HR_PHONE_NUMBER_PREFIX_CHOICES)
class HRJMBGField(Field):
"""
Unique Master Citizen Number (JMBG) field.
The number is still in use in Croatia, but it is being replaced by OIB.
Source: http://en.wikipedia.org/wiki/Unique_Master_Citizen_Number
For who might be reimplementing:
The "area" regular expression group is used to calculate the region where a
person was registered. Additional validation can be implemented in
accordance with it, however this could result in exclusion of legit
immigrated citizens. Therefore, this field works for any ex-Yugoslavia
country.
"""
default_error_messages = {
'invalid': _('Enter a valid 13 digit JMBG'),
'date': _('Error in date segment'),
}
def clean(self, value):
super(HRJMBGField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.strip()
matches = jmbg_re.search(value)
if matches is None:
raise ValidationError(self.error_messages['invalid'])
# Make sure the date part is correct.
dd = int(matches.group('dd'))
mm = int(matches.group('mm'))
yyy = int(matches.group('yyy'))
try:
datetime.date(yyy, mm, dd)
except ValueError:
raise ValidationError(self.error_messages['date'])
# Validate checksum.
k = matches.group('k')
checksum = 0
for i, j in zip(range(7, 1, -1), range(6)):
checksum += i * (int(value[j]) + int(value[13 - i]))
m = 11 - checksum % 11
if m == 10:
raise ValidationError(self.error_messages['invalid'])
if m == 11 and k != '0':
raise ValidationError(self.error_messages['invalid'])
if not str(m) == k:
raise ValidationError(self.error_messages['invalid'])
return '%s' % (value, )
class HROIBField(RegexField):
"""
Personal Identification Number of Croatia (OIB) field.
http://www.oib.hr/
"""
default_error_messages = {
'invalid': _('Enter a valid 11 digit OIB'),
}
def __init__(self, min_length=11, max_length=11, *args, **kwargs):
super(HROIBField, self).__init__(r'^\d{11}$',
min_length, max_length, *args, **kwargs)
def clean(self, value):
super(HROIBField, self).clean(value)
if value in EMPTY_VALUES:
return ''
return '%s' % (value, )
class HRLicensePlateField(Field):
"""
Vehicle license plate of Croatia field. Normalizes to the specific format
below. Suffix is constructed from the shared letters of the Croatian and
English alphabets.
Format examples:
SB 123-A
(but also supports more characters)
ZG 1234-AA
Used for standardized license plates only.
"""
default_error_messages = {
'invalid': _('Enter a valid vehicle license plate number'),
'area': _('Enter a valid location code'),
'number': _('Number part cannot be zero'),
}
def clean(self, value):
super(HRLicensePlateField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub(r'[\s\-]+', '', smart_text(value.strip())).upper()
matches = plate_re.search(value)
if matches is None:
raise ValidationError(self.error_messages['invalid'])
# Make sure the prefix is in the list of known codes.
prefix = matches.group('prefix')
if prefix not in [choice[0] for choice in HR_LICENSE_PLATE_PREFIX_CHOICES]:
raise ValidationError(self.error_messages['area'])
# Make sure the number portion is not zero.
number = matches.group('number')
if int(number) == 0:
raise ValidationError(self.error_messages['number'])
return '%s %s-%s' % (prefix,number,matches.group('suffix'), )
class HRPostalCodeField(Field):
"""
Postal code of Croatia field.
It consists of exactly five digits ranging from 10000 to possibly less than
60000.
http://www.posta.hr/main.aspx?id=66
"""
default_error_messages = {
'invalid': _('Enter a valid 5 digit postal code'),
}
def clean(self, value):
super(HRPostalCodeField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.strip()
if not postal_code_re.search(value):
raise ValidationError(self.error_messages['invalid'])
# Make sure the number is in valid range.
if not 9999<int(value)<60000:
raise ValidationError(self.error_messages['invalid'])
return '%s' % (value, )
class HRPhoneNumberField(Field):
"""
Phone number of Croatia field.
Format: Complete country code or leading zero, area code prefix, 6 or 7
digit number.
Validates fixed, mobile and FGSM numbers. Normalizes to a full number with
country code (+385 prefix).
"""
default_error_messages = {
'invalid': _('Enter a valid phone number'),
'area': _('Enter a valid area or mobile network code'),
'number': _('The phone nubmer is too long'),
}
def clean(self, value):
super(HRPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub(r'[\-\s\(\)]', '', smart_text(value))
matches = phone_re.search(value)
if matches is None:
raise ValidationError(self.error_messages['invalid'])
# Make sure the prefix is in the list of known codes.
prefix = matches.group('prefix')
number = matches.group('number')
if prefix[0] == '1':
number = prefix[1] + number
prefix = prefix[0]
if prefix not in [choice[0] for choice in HR_PHONE_NUMBER_PREFIX_CHOICES]:
raise ValidationError(self.error_messages['area'])
# Make sure the number is of adequate length.
if prefix=='1' and len(number)!=7:
raise ValidationError(self.error_messages['number'])
return '%s%s%s' % ('+385',prefix,number)
class HRJMBAGField(Field):
"""
Unique Master Academic Citizen Number of Croatia (JMBAG) field.
This number is used by college students and professors in Croatia.
http://www.cap.srce.hr/IzgledX.aspx
"""
default_error_messages = {
'invalid': _('Enter a valid 19 digit JMBAG starting with 601983'),
'copy': _('Card issue number cannot be zero'),
}
def clean(self, value):
super(HRJMBAGField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub(r'[\-\s]', '', value.strip())
matches = jmbag_re.search(value)
if matches is None:
raise ValidationError(self.error_messages['invalid'])
# Make sure the issue number is not zero.
if matches.group('copy')=='0':
raise ValidationError(self.error_messages['copy'])
# Validate checksum using Luhn algorithm.
num = [int(x) for x in value]
if not sum(num[::-2] + [sum(divmod(d * 2, 10)) for d in num[-2::-2]]) % 10 == 0:
raise ValidationError(self.error_messages['invalid'])
return '%s' % (value, )
|
gpl-2.0
|
dietrichc/streamline-ppc-reports
|
examples/dfp/v201405/label_service/get_labels_by_statement.py
|
1
|
1743
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all labels ordered by name.
To create a label, run create_label.py. This feature is only available to DFP
premium solution networks.
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
label_service = client.GetService('LabelService', version='v201405')
# Create statement to get all labels
statement = dfp.FilterStatement('ORDER BY name')
# Get labels by statement.
while True:
response = label_service.getLabelsByStatement(statement.ToStatement())
if 'results' in response:
# Display results.
for label in response['results']:
print ('Label with id \'%s\' and name \'%s\' was found.'
% (label['id'], label['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
apache-2.0
|
talhajaved/nyuadmarket
|
flask/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.py
|
257
|
1073
|
# Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "15.0"
__author__ = "Donald Stufft"
__email__ = "[email protected]"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__
|
mit
|
abinit/abinit
|
scripts/deprecated/class_ScrolledWidgetList.py
|
4
|
2295
|
from Tkinter import *
class ScrolledWidgetList(Frame):
def __init__(self,master=None,width=100,height=100,bg='white',pady=10,topspace=0,bottomspace=0):
self.width = width
self.height = height
self.pady = pady
self.topspace = topspace
self.bottomspace = bottomspace
Frame.__init__(self,master)
self.border = Frame(self,relief='solid',bd=1)
self.border.grid(row=0,column=0)
self.container = Canvas(self.border,width=width,height=height,bg=bg,scrollregion=(0,0,0,height),highlightthickness=0)
self.container.pack()
self.scrollbarY = Scrollbar(self,orient=VERTICAL,command=self.scrollY)
self.scrollbarY.grid(row=0,column=1,sticky=NS)
self.container['yscrollcommand'] = self.scrollbarY.set
def scrollY(self,mode=None,value=None,units=None):
self.container.yview(mode,value,units)
self.container.update()
self.placewidget()
def refresh(self):
try:
if len(self.container.winfo_children()) > len(self.Y): # a widget has been added
self.Y.append([self.Y[-1][0]+self.Y[-1][1]+self.pady,self.container.winfo_children()[-1].winfo_reqheight()])
elif len(self.container.winfo_children()) < len(self.Y): # one or more widgets has been deleted
while len(self.container.winfo_children()) < len(self.Y):
self.Y.remove(self.Y[-1])
except: # this is the very first widget to be added
self.Y = []
self.Y.append([self.pady+self.topspace,self.container.winfo_children()[0].winfo_reqheight()])
self.container.configure(scrollregion=(0,0,0,self.Y[-1][0]+self.Y[-1][1]+self.pady+self.bottomspace))
self.container.yview('moveto',1)
self.container.update()
self.placewidget()
def placewidget(self):
self.container.yview('moveto',self.scrollbarY.get()[0])
Ymodificator = self.scrollbarY.get()[0]*float(self.container['scrollregion'].split()[3])
for i in range(len(self.container.winfo_children())):
self.container.winfo_children()[i].place(relx=0.5,y=self.Y[i][0]-Ymodificator,anchor=N)
|
gpl-3.0
|
ryfeus/lambda-packs
|
Pandas_numpy/source/numpy/core/_internal.py
|
3
|
21639
|
"""
A place for code to be called from core C-code.
Some things are more easily handled Python.
"""
from __future__ import division, absolute_import, print_function
import re
import sys
from numpy.compat import basestring
from .multiarray import dtype, array, ndarray
try:
import ctypes
except ImportError:
ctypes = None
from .numerictypes import object_
if (sys.byteorder == 'little'):
_nbo = b'<'
else:
_nbo = b'>'
def _makenames_list(adict, align):
allfields = []
fnames = list(adict.keys())
for fname in fnames:
obj = adict[fname]
n = len(obj)
if not isinstance(obj, tuple) or n not in [2, 3]:
raise ValueError("entry not a 2- or 3- tuple")
if (n > 2) and (obj[2] == fname):
continue
num = int(obj[1])
if (num < 0):
raise ValueError("invalid offset.")
format = dtype(obj[0], align=align)
if (n > 2):
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(key=lambda x: x[2])
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict, align)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if (len(res) > 2):
titles.append(res[2])
else:
titles.append(None)
return dtype({"names": names,
"formats": formats,
"offsets": offsets,
"titles": titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
if new:
return (descriptor.str, new)
else:
return descriptor.str
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('', '|V%d' % num))
offset += num
if len(field) > 3:
name = (field[2], field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
if descriptor.itemsize > offset:
num = descriptor.itemsize - offset
result.append(('', '|V%d' % num))
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibility.
def _reconstruct(subtype, shape, dtype):
return ndarray.__new__(subtype, shape, dtype)
# format_re was originally from numarray by J. Todd Miller
format_re = re.compile(br'(?P<order1>[<>|=]?)'
br'(?P<repeats> *[(]?[ ,0-9L]*[)]? *)'
br'(?P<order2>[<>|=]?)'
br'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
sep_re = re.compile(br'\s*,\s*')
space_re = re.compile(br'\s+$')
# astr is a string (perhaps comma separated)
_convorder = {b'=': _nbo}
def _commastring(astr):
startindex = 0
result = []
while startindex < len(astr):
mo = format_re.match(astr, pos=startindex)
try:
(order1, repeats, order2, dtype) = mo.groups()
except (TypeError, AttributeError):
raise ValueError('format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
# Separator or ending padding
if startindex < len(astr):
if space_re.match(astr, pos=startindex):
startindex = len(astr)
else:
mo = sep_re.match(astr, pos=startindex)
if not mo:
raise ValueError(
'format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
if order2 == b'':
order = order1
elif order1 == b'':
order = order2
else:
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
raise ValueError(
'inconsistent byte-order specification %s and %s' %
(order1, order2))
order = order1
if order in [b'|', b'=', _nbo]:
order = b''
dtype = order + dtype
if (repeats == b''):
newitem = dtype
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
return result
class dummy_ctype(object):
def __init__(self, cls):
self._cls = cls
def __mul__(self, other):
return self
def __call__(self, *other):
return self._cls(other)
def __eq__(self, other):
return self._cls == other._cls
def __ne__(self, other):
return self._cls != other._cls
def _getintp_ctype():
val = _getintp_ctype.cache
if val is not None:
return val
if ctypes is None:
import numpy as np
val = dummy_ctype(np.intp)
else:
char = dtype('p').char
if (char == 'i'):
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes(object):
def cast(self, num, obj):
return num
def c_void_p(self, num):
return num
class _ctypes(object):
def __init__(self, array, ptr=None):
if ctypes:
self._ctypes = ctypes
else:
self._ctypes = _missing_ctypes()
self._arr = array
self._data = ptr
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
def get_data(self):
return self._data
def get_shape(self):
return self.shape_as(_getintp_ctype())
def get_strides(self):
return self.strides_as(_getintp_ctype())
def get_as_parameter(self):
return self._ctypes.c_void_p(self._data)
data = property(get_data, None, doc="c-types data")
shape = property(get_shape, None, doc="c-types shape")
strides = property(get_strides, None, doc="c-types strides")
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
def _newnames(datatype, order):
"""
Given a datatype and an order object, return a new names tuple, with the
order indicated
"""
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
seen = set()
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
if name in seen:
raise ValueError("duplicate field name: %s" % (name,))
else:
raise ValueError("unknown field name: %s" % (name,))
seen.add(name)
return tuple(list(order) + nameslist)
raise ValueError("unsupported order value: %s" % (order,))
def _copy_fields(ary):
"""Return copy of structured array with padding between fields removed.
Parameters
----------
ary : ndarray
Structured array from which to remove padding bytes
Returns
-------
ary_copy : ndarray
Copy of ary with padding bytes removed
"""
dt = ary.dtype
copy_dtype = {'names': dt.names,
'formats': [dt.fields[name][0] for name in dt.names]}
return array(ary, dtype=copy_dtype, copy=True)
def _getfield_is_safe(oldtype, newtype, offset):
""" Checks safety of getfield for object arrays.
As in _view_is_safe, we need to check that memory containing objects is not
reinterpreted as a non-object datatype and vice versa.
Parameters
----------
oldtype : data-type
Data type of the original ndarray.
newtype : data-type
Data type of the field being accessed by ndarray.getfield
offset : int
Offset of the field being accessed by ndarray.getfield
Raises
------
TypeError
If the field access is invalid
"""
if newtype.hasobject or oldtype.hasobject:
if offset == 0 and newtype == oldtype:
return
if oldtype.names:
for name in oldtype.names:
if (oldtype.fields[name][1] == offset and
oldtype.fields[name][0] == newtype):
return
raise TypeError("Cannot get/set field of an object array")
return
def _view_is_safe(oldtype, newtype):
""" Checks safety of a view involving object arrays, for example when
doing::
np.zeros(10, dtype=oldtype).view(newtype)
Parameters
----------
oldtype : data-type
Data type of original ndarray
newtype : data-type
Data type of the view
Raises
------
TypeError
If the new type is incompatible with the old type.
"""
# if the types are equivalent, there is no problem.
# for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
if oldtype == newtype:
return
if newtype.hasobject or oldtype.hasobject:
raise TypeError("Cannot change data-type for object array.")
return
# Given a string containing a PEP 3118 format specifier,
# construct a NumPy dtype
_pep3118_native_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'h',
'H': 'H',
'i': 'i',
'I': 'I',
'l': 'l',
'L': 'L',
'q': 'q',
'Q': 'Q',
'e': 'e',
'f': 'f',
'd': 'd',
'g': 'g',
'Zf': 'F',
'Zd': 'D',
'Zg': 'G',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
_pep3118_standard_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
'e': 'f2',
'f': 'f',
'd': 'd',
'Zf': 'F',
'Zd': 'D',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
def _dtype_from_pep3118(spec):
class Stream(object):
def __init__(self, s):
self.s = s
self.byteorder = '@'
def advance(self, n):
res = self.s[:n]
self.s = self.s[n:]
return res
def consume(self, c):
if self.s[:len(c)] == c:
self.advance(len(c))
return True
return False
def consume_until(self, c):
if callable(c):
i = 0
while i < len(self.s) and not c(self.s[i]):
i = i + 1
return self.advance(i)
else:
i = self.s.index(c)
res = self.advance(i)
self.advance(len(c))
return res
@property
def next(self):
return self.s[0]
def __bool__(self):
return bool(self.s)
__nonzero__ = __bool__
stream = Stream(spec)
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
return dtype
def __dtype_from_pep3118(stream, is_subdtype):
field_spec = dict(
names=[],
formats=[],
offsets=[],
itemsize=0
)
offset = 0
common_alignment = 1
is_padding = False
# Parse spec
while stream:
value = None
# End of structure, bail out to upper level
if stream.consume('}'):
break
# Sub-arrays (1)
shape = None
if stream.consume('('):
shape = stream.consume_until(')')
shape = tuple(map(int, shape.split(',')))
# Byte order
if stream.next in ('@', '=', '<', '>', '^', '!'):
byteorder = stream.advance(1)
if byteorder == '!':
byteorder = '>'
stream.byteorder = byteorder
# Byte order characters also control native vs. standard type sizes
if stream.byteorder in ('@', '^'):
type_map = _pep3118_native_map
type_map_chars = _pep3118_native_typechars
else:
type_map = _pep3118_standard_map
type_map_chars = _pep3118_standard_typechars
# Item sizes
itemsize_str = stream.consume_until(lambda c: not c.isdigit())
if itemsize_str:
itemsize = int(itemsize_str)
else:
itemsize = 1
# Data types
is_padding = False
if stream.consume('T{'):
value, align = __dtype_from_pep3118(
stream, is_subdtype=True)
elif stream.next in type_map_chars:
if stream.next == 'Z':
typechar = stream.advance(2)
else:
typechar = stream.advance(1)
is_padding = (typechar == 'x')
dtypechar = type_map[typechar]
if dtypechar in 'USV':
dtypechar += '%d' % itemsize
itemsize = 1
numpy_byteorder = {'@': '=', '^': '='}.get(
stream.byteorder, stream.byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
#
# Native alignment may require padding
#
# Here we assume that the presence of a '@' character implicitly implies
# that the start of the array is *already* aligned.
#
extra_offset = 0
if stream.byteorder == '@':
start_padding = (-offset) % align
intra_padding = (-value.itemsize) % align
offset += start_padding
if intra_padding != 0:
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
# Inject internal padding to the end of the sub-item
value = _add_trailing_padding(value, intra_padding)
else:
# We can postpone the injection of internal padding,
# as the item appears at most once
extra_offset += intra_padding
# Update common alignment
common_alignment = _lcm(align, common_alignment)
# Convert itemsize to sub-array
if itemsize != 1:
value = dtype((value, (itemsize,)))
# Sub-arrays (2)
if shape is not None:
value = dtype((value, shape))
# Field name
if stream.consume(':'):
name = stream.consume_until(':')
else:
name = None
if not (is_padding and name is None):
if name is not None and name in field_spec['names']:
raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
% name)
field_spec['names'].append(name)
field_spec['formats'].append(value)
field_spec['offsets'].append(offset)
offset += value.itemsize
offset += extra_offset
field_spec['itemsize'] = offset
# extra final padding for aligned types
if stream.byteorder == '@':
field_spec['itemsize'] += (-offset) % common_alignment
# Check if this was a simple 1-item type, and unwrap it
if (field_spec['names'] == [None]
and field_spec['offsets'][0] == 0
and field_spec['itemsize'] == field_spec['formats'][0].itemsize
and not is_subdtype):
ret = field_spec['formats'][0]
else:
_fix_names(field_spec)
ret = dtype(field_spec)
# Finished
return ret, common_alignment
def _fix_names(field_spec):
""" Replace names which are None with the next unused f%d name """
names = field_spec['names']
for i, name in enumerate(names):
if name is not None:
continue
j = 0
while True:
name = 'f{}'.format(j)
if name not in names:
break
j = j + 1
names[i] = name
def _add_trailing_padding(value, padding):
"""Inject the specified number of padding bytes at the end of a dtype"""
if value.fields is None:
field_spec = dict(
names=['f0'],
formats=[value],
offsets=[0],
itemsize=value.itemsize
)
else:
fields = value.fields
names = value.names
field_spec = dict(
names=names,
formats=[fields[name][0] for name in names],
offsets=[fields[name][1] for name in names],
itemsize=value.itemsize
)
field_spec['itemsize'] += padding
return dtype(field_spec)
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a % b
return a
def _lcm(a, b):
return a // _gcd(a, b) * b
# Exception used in shares_memory()
class TooHardError(RuntimeError):
pass
class AxisError(ValueError, IndexError):
""" Axis supplied was invalid. """
def __init__(self, axis, ndim=None, msg_prefix=None):
# single-argument form just delegates to base class
if ndim is None and msg_prefix is None:
msg = axis
# do the string formatting here, to save work in the C code
else:
msg = ("axis {} is out of bounds for array of dimension {}"
.format(axis, ndim))
if msg_prefix is not None:
msg = "{}: {}".format(msg_prefix, msg)
super(AxisError, self).__init__(msg)
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
""" Format the error message for when __array_ufunc__ gives up. """
args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
['{}={!r}'.format(k, v)
for k, v in kwargs.items()])
args = inputs + kwargs.get('out', ())
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
return ('operand type(s) all returned NotImplemented from '
'__array_ufunc__({!r}, {!r}, {}): {}'
.format(ufunc, method, args_string, types_string))
def _ufunc_doc_signature_formatter(ufunc):
"""
Builds a signature string which resembles PEP 457
This is used to construct the first line of the docstring
"""
# input arguments are simple
if ufunc.nin == 1:
in_args = 'x'
else:
in_args = ', '.join('x{}'.format(i+1) for i in range(ufunc.nin))
# output arguments are both keyword or positional
if ufunc.nout == 0:
out_args = ', /, out=()'
elif ufunc.nout == 1:
out_args = ', /, out=None'
else:
out_args = '[, {positional}], / [, out={default}]'.format(
positional=', '.join(
'out{}'.format(i+1) for i in range(ufunc.nout)),
default=repr((None,)*ufunc.nout)
)
# keyword only args depend on whether this is a gufunc
kwargs = (
", casting='same_kind'"
", order='K'"
", dtype=None"
", subok=True"
"[, signature"
", extobj]"
)
if ufunc.signature is None:
kwargs = ", where=True" + kwargs
# join all the parts together
return '{name}({in_args}{out_args}, *{kwargs})'.format(
name=ufunc.__name__,
in_args=in_args,
out_args=out_args,
kwargs=kwargs
)
|
mit
|
Taifxx/xxtrep
|
context.addtolib/resources/lib/ext/base/tags.py
|
1
|
15746
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2014 Martijn Kaijser
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########## DEFINE TAGS:
#### System param's ...
### Library folder name ...
TAG_PAR_LIB_FOLDER = 'LIB'
### TMP folders names ...
TAG_PAR_TMP = 'TMP'
TAG_PAR_TMPA = 'TMPA'
### Addon ...
TAG_PAR_SCRIPT_ID = 'context.addtolib'
TAG_PAR_SERVICE_PY = 'service.py'
TAG_PAR_ADDON_PY = 'context.py'
TAG_PAR_COLORS_FILE = 'colors'
### Addon folders ...
TAG_PAR_RESFOLDER = 'resources'
TAG_PAR_BSFOLDER = 'bs'
TAG_PAR_SKINSFOLDER = [TAG_PAR_RESFOLDER,'skins']
TAG_PAR_SPLASH_FILE = [TAG_PAR_RESFOLDER, TAG_PAR_BSFOLDER, 'splash.mp4']
### RunScript's ...
TAG_PAR_SERVICE = 'special://home/addons/%s/%s' % (TAG_PAR_SCRIPT_ID, TAG_PAR_SERVICE_PY)
TAG_PAR_ADDON = 'special://home/addons/%s/%s' % (TAG_PAR_SCRIPT_ID, TAG_PAR_ADDON_PY)
### Strinsg XML (as default) ...
TAG_PAR_STRINGSXML_PATH = [TAG_PAR_RESFOLDER,'language','english']
TAG_PAR_STRINGSXML_FILE = 'strings.xml'
### Dropbox API ...
TAG_PAR_DROPBOX_LF = 'synclock'
TAG_PAR_DROPBOX_LCODE = 'XX000000'
TAG_PAR_DROPBOX_SYNC_FILE = 'vdbsync'
TAG_PAR_DROPBOX_SYNC_T_FILE = 'vdbsync.tmp'
TAG_PAR_DROPBOX_LI_FILE = 'libimg'
TAG_PAR_DROPBOX_LI_T_FILE = 'libimg.tmp'
TAG_PAR_DROPBOX_LI_S_FILE = 'libimg.sync'
TAG_PAR_DROPBOX_CORR_FILE = 'corruption'
TAG_PAR_DROPBOX_UID_FILE = 'uid'
TAG_PAR_DROPBOX_SYNC_T_DIR = 'SYNC_TMP'
TAG_PAR_DROPBOX_PATH = [TAG_PAR_RESFOLDER,'lib','dropbox']
TAG_PAR_DBXACCESSTOKEN_FILE = 'dropbox_access_token'
TAG_PAR_DROPBOX_LISEPREC = '\n'
TAG_PAR_DROPBOX_LISEPTM = '<**DBXTM**>'
TAG_PAR_DROPBOX_MSGSEP = '#'
TAG_PAR_DBXAPPKEY = 'cxa8c253kvoqbqd'
TAG_PAR_DBXAPPSECRET = 'n7tx9emzji3aqnh'
### Addon work files ...
TAG_PAR_TVSPACK_FILE = 'tvs.pack'
TAG_PAR_TVSRAWFILE = 'tvs.eraw'
TAG_PAR_STL_FILE = 'linktable'
TAG_PAR_FSET_FILE = 'fset'
TAG_PAR_PTYPETABLE_FILE = 'pttable'
### Addon work files (tmp) ...
TAG_PAR_TVSUPD_FILE = 'tvsupd'
TAG_PAR_TVSUPDNOW_FILE = 'updnow'
#TAG_PAR_LOCKF = 'lock'
TAG_PAR_STRARTF = 'lock_started'
#TAG_PAR_STRARTAF = 'act'
TAG_PAR_LAACTT = 'laactt'
TAG_PAR_WS_FILE = 'watchsync'
TAG_PAR_WS_TMP_FILE = 'watchsync.tmp'
### Video extensions ...
TAG_PAR_VIDEOSEXT = ['.avi', '.mpeg', '.wmv', 'asf', '.flv', '.mkv', '.mka', '.mp4', '.m4a', '.aac', '.ogg', '.ogm', '.ram', '.rm', '.rv', '.ra', '.rmvb', '.3gp']
### Backup files template ...
TAG_PAR_SYSFLSTMPL = ['.strm', TAG_PAR_TVSPACK_FILE, TAG_PAR_TVSRAWFILE, TAG_PAR_STL_FILE, TAG_PAR_FSET_FILE, TAG_PAR_PTYPETABLE_FILE, TAG_PAR_TVSUPD_FILE, TAG_PAR_TVSUPDNOW_FILE, TAG_PAR_STRARTF, TAG_PAR_DROPBOX_SYNC_FILE, TAG_PAR_DBXACCESSTOKEN_FILE]
TAG_PAR_DROPBOX_TMPL = ['.strm', TAG_PAR_TVSPACK_FILE, TAG_PAR_TVSRAWFILE, TAG_PAR_STL_FILE]
### Default tmpl ...
TAG_PAR_TVSDEFSEASON = '01'
TAG_PAR_SETDEF = 'Default'
TAG_PAR_MNUCOLORFORMAT = '[COLOR %s]%s[/COLOR]'
TAG_PAR_COLORTAG = '##COLOR##'
TAG_PAR_ADDONLABEL_TMPL = '<string id="29999">%s</string>'
TAG_PAR_ADDONLABEL_PATT = TAG_PAR_ADDONLABEL_TMPL % ('(.*)')
TAG_PAR_ADDONLABEL = TAG_PAR_ADDONLABEL_TMPL % ('ADD to [COLOR %s]Lib[/COLOR]')
TAG_PAR_LNPAGE = ' - (%s/%s)'
TAG_PAR_LNSEP = ' > '
TAG_PAR_TTLQ = '%s ( %s ):'
### Zip ...
TAG_PAR_ZIPCN = 'CN'
TAG_PAR_ZIPST = 'atl.backup.'
TAG_PAR_ZIPTMPL = TAG_PAR_ZIPST + '%s.%s.'+ TAG_PAR_ZIPCN + '.zip'
### XML
TAG_PAR_XMLW_SELDLG = 'XDialogSelect.xml'
TAG_PAR_XMLW_SELDLGSUB = 'XDialogSelectSub.xml'
TAG_PAR_XMLW_OKDLG = 'XDialogOk.xml'
TAG_PAR_XMLW_YESNODLG = 'XDialogYesNo.xml'
TAG_PAR_XMLW_RESUMEDLG = 'XDialogResume.xml'
TAG_PAR_XMLW_NOWPLAYDLG = 'XDialogNowPlay.xml'
TAG_PAR_XMLW_DROPBOX = 'Dropbox.xml'
### Help ...
TAG_PAG_HELPXML = 'DialogHelp.xml'
TAG_PAR_HELPFILE = 'help.'
TAG_PAR_HELPPATH = [TAG_PAR_RESFOLDER, 'help']
### Time ...
TAG_PAR_TIMENUMFORMAT = '{:0>2}'
TAG_PAR_TIMESEP = ':'
### URL ...
TAG_PAR_CALLURLTMPL = 'plugin://%s//?#strmtype=#%s&#strmfile=#%s&#strmurl=#'
TAG_PAR_REPFN = '%s'
TAG_PAR_ACTION = 'action='
TAG_PAR_IGNOREST = 'ignorestarted'
### tvs.pack separators ...
TAG_PAR_TVSPACK_LSEP = '<**LSTSEP**>'
TAG_PAR_TVSPACK_SSEP = '<**SRCSEP**>'
TAG_PAR_TVSPACK_FSEP = '<**FRCSEP**>'
TAG_PAR_TVSPACK_ESEP = '<**EPSSEP**>'
TAG_PAR_TVSPACK_PSEP = '<**PRTSEP**>'
TAG_PAR_TVSPACK_VERSEP = '<**VERSIONSEP**>'
TAG_PAR_TVSPACK_VERSION = '10015'
### Containers starts with ...
TAG_CON_STARTSW_EXT = 'plugin:'
TAG_CON_STARTSW_VID = 'videodb:'
TAG_CON_STARTSW_PVD = 'playlistvideo:'
#### Const Tags ...
### Default ...
DEFAULT = 10000
### Types ...
TAG_TYP_ALL = 10001
TAG_TYP_MOV = 10002
TAG_TYP_TVS = 10003
TAG_TYP_SRC = 10004
TAG_TYP_FOLDER = 10005
TAG_TYP_PREFILE = 10006
TAG_TYP_FILE = 10007
### Containers ...
TAG_CON_LOCAL = 10071
TAG_CON_EXT = 10072
TAG_CON_VID = 10073
TAG_CON_PVD = 10074
### Condidions ...
TAG_CND_FOUND = 10075
TAG_CND_NOTFOUND = 10076
TAG_CND_LISTEMPTY = 10077
TAG_CND_NEWSRC = 10078
TAG_CND_OLDSRC = 10079
TAG_CND_NOUPD = 10080
TAG_CND_NEWFRC = 10081
TAG_CND_OLDFRC = 10082
TAG_CND_UPDPRC = 10083
TAG_CND_NOUPDPRC = 10084
TAG_CND_NOGL = 10085
TAG_CND_NOACTION = 10086
TAG_CND_PLAY = 10087
TAG_CND_DBXNOAUTH = 10088
TAG_CND_NOTISMOV = 10089
TAG_CND_ISMOV = 10090
### Free actions ...
TAG_ACT_LPRESET = 10200
TAG_ACT_SHADOWUPD = 10201
TAG_ACT_DONOTHING = 10202
TAG_ACT_CHCOLOR = 10203
TAG_ACT_RENAMER = 10204
TAG_ACT_BACKUP = 10205
TAG_ACT_REMBACK = 10206
TAG_ACT_RESTBACK = 10207
TAG_ACT_RESETTBU = 10208
TAG_ACT_AUTOBACKUP = 10209
TAG_ACT_RESKIN = 10210
TAG_ACT_DBXCONNECT = 10211
TAG_ACT_DBXDISCONNECT = 10212
TAG_ACT_SYNC = 10213
TAG_ACT_WATCHSYNC = 10214
TAG_ACT_STOPSRV = 10215
TAG_ACT_STARTSRV = 10216
#### Strings Tags ...
### Language ...
TAG_LNG_ID = 30000
### Menue ...
TAG_MNU_MOV = 30001
TAG_MNU_TVS = 30002
TAG_MNU_TVSU = 30003
TAG_MNU_OPEN = 30004
TAG_MNU_RESCAN = 30005
TAG_MNU_REMSRC = 30006
TAG_MNU_RESTORE = 30007
TAG_MNU_DELETE = 30008
TAG_MNU_VIDLIBU = 30009
TAG_MNU_CHKNEW = 30010
TAG_MNU_JOIN = 30011
TAG_MNU_TVSREN = 30012
TAG_MNU_SRCREN = 30013
TAG_MNU_UPDMAN = 30014
TAG_MNU_ADDEXIST = 30015
TAG_MNU_ADDNEW = 30016
TAG_MNU_SM = 30017
TAG_MNU_SHOWALL = 30018
TAG_MNU_SRCMAN = 30019
TAG_MNU_TVSMAN = 30020
TAG_MNU_QR = 30021
TAG_MNU_QL = 30022
TAG_MNU_NEW = 30023
TAG_MNU_ADDFOL = 30024
TAG_MNU_SRE = 30025
TAG_MNU_UPDFOL = 30026
TAG_MNU_VIDLIBCLN = 30027
TAG_MNU_SHDIR = 30028
TAG_MNU_REBSTL = 30029
TAG_MNU_DEFNMMOV = 30030
TAG_MNU_NEWNMMOV = 30031
TAG_MNU_ATVSNM = 30032
TAG_MNU_ATVSNUMT = 30033
TAG_MNU_ATVSNUM = 30034
TAG_MNU_DEFNM = 30035
TAG_MNU_SEQNUM = 30036
TAG_MNU_SEANUM = 30037
TAG_MNU_STARTADD = 30038
TAG_MNU_ATVS = 30039
TAG_MNU_ATVSSERT = 30040
TAG_MNU_SERDEF = 30041
TAG_MNU_SERTPL = 30042
TAG_MNU_SEASON = 30043
TAG_MNU_RFROM = 30044
TAG_MNU_SFRBEGIN = 30045
TAG_MNU_ADVADD = 30046
TAG_MNU_CHKNEWGL = 30047
TAG_MNU_RESTOREALL = 30048
TAG_MNU_SMM = 30049
TAG_MNU_RAWADD = 30050
TAG_MNU_BRWSREN = 30051
TAG_MNU_CONTUPD = 30052
TAG_MNU_RESCANALLS = 30053
TAG_MNU_RESCANFULL = 30054
TAG_MNU_YES = 30055
TAG_MNU_NO = 30056
TAG_MNU_CLOSEDLG = 30057
TAG_MNU_ADVLSORT = 30058
TAG_MNU_ADVLSORTDOWN = 30059
TAG_MNU_ADVLSORTUP = 30060
TAG_MNU_EPSLISTCORR = 30061
TAG_MNU_NUMBCORR = 30062
TAG_MNU_PBTYPES = 30063
TAG_MNU_DBSYNC = 30064
TAG_MNU_DELMOV = 30065
TAG_MNU_DELTVS = 30066
TAG_MNU_REMARKALL = 30067
TAG_MNU_TVSSTALN = 30068
TAG_MNU_FOLDMODE = 30069
### Static mnu ...
TAG_MNU_MORE = 30090
TAG_MNU_BACKMAIN = 30091
TAG_MNU_OK = 30092
TAG_MNU_HELP = 30096
TAG_MNU_SET = 30097
TAG_MNU_BACK = 30098
TAG_MNU_CANCEL = 30099
### Confirms ...
TAG_CFR_RESCAN = 30071
TAG_CFR_REMSRC = 30072
TAG_CFR_RESTORE = 30073
TAG_CFR_DELETE = 30074
TAG_CFR_TVSREN = 30075
TAG_CFR_JOIN = 30076
TAG_CFR_CLEANVL = 30077
TAG_CFR_DEFNM = 30078
TAG_CFR_RESTOREALL = 30079
TAG_CFR_RESCANALLS = 30080
TAG_CFR_RESCANFULL = 30081
TAG_CFR_RENAMER = 30082
TAG_CFR_UNLOCK = 30083
TAG_CFR_REMBACK = 30084
TAG_CFR_RESTBACK = 30085
TAG_CFR_EXCLPLUG = 30086
### Dialogs messages ...
TAG_DLG_OK = 30100
TAG_DLG_NX = 30101
TAG_DLG_PR = 30102
TAG_DLG_INNM = 30103
TAG_DLG_INSE = 30104
TAG_DLG_NUMSKIP = 30105
TAG_DLG_SUPPRES = 30106
TAG_DLG_PBT1 = 30107
TAG_DLG_PBT2 = 30108
TAG_DLG_PBTAD1 = 30109
TAG_DLG_PBTAD2 = 30110
TAG_DLG_PBTADTIMEO = 30111
TAG_DLG_PBTADTCLAS = 30112
TAG_DLG_PBTADTISP = 30113
TAG_DLG_PBTADTFOLD = 30114
TAG_DLG_PBTT1 = 30115
TAG_DLG_PBTT2 = 30116
TAG_DLG_PBTT3 = 30117
TAG_DLG_PBTT4 = 30118
TAG_DLG_PBTT5 = 30119
TAG_DLG_PBTALT = 30120
TAG_DLG_PBTREM = 30121
TAG_DLG_NPINFO = 30122
TAG_DLG_NPINFRAT = 30123
TAG_DLG_NPINFSRC = 30124
TAG_DLG_NPINFPBT = 30125
TAG_DLG_NPDIRL = 30126
TAG_DLG_PBTTRAN = 30127
TAG_DLG_PBTTRANI = 30128
TAG_DLG_DBXP1 = 30129
TAG_DLG_DBXP2 = 30130
TAG_DLG_DBXP3 = 30131
TAG_DLG_DBXP4 = 30132
TAG_DLG_DBXP5 = 30133
TAG_DLG_DBXPEC = 30134
TAG_DLG_DBXPRGSMSGS = 30135
TAG_DLG_CORR1 = 30136
TAG_DLG_CORR2 = 30137
TAG_DLG_CORR3 = 30138
TAG_DLG_CORR_FORCE = 30139
TAG_DLG_CORR_UNL = 30140
TAG_DLG_MOVIEDEL = 30141
TAG_DLG_TVSDEL = 30142
TAG_DLG_SCLNDB = 30143
TAG_DLG_SREMEF = 30144
TAG_DLG_LOCKSYQ = 30145
TAG_DLG_RENM = 30146
TAG_DLG_CURRTVS = 30147
TAG_DLG_EXCLADDON = 30148
### Titles ...
TAG_TTL_NM = 30150
TAG_TTL_ENTNAME = 30151
TAG_TTL_CHSNAME = 30152
TAG_TTL_ADDTVS = 30153
TAG_TTL_NEWEPS = 30154
TAG_TTL_EXITVS = 30155
TAG_TTL_CHKUPD = 30156
TAG_TTL_ADDMOV = 30157
TAG_TTL_ENTNAMEM = 30158
TAG_TTL_ADVADD = 30159
TAG_TTL_RESTOREALL = 30160
TAG_TTL_CHKUPDGL = 30161
TAG_TTL_POSHLP = 30162
TAG_TTL_CAST = 30163
TAG_TTL_BRWSREN = 30164
TAG_TTL_BRWSRENEP = 30165
TAG_TTL_COLORIZE = 30166
TAG_TTL_SEASON = 30167
TAG_TTL_BACKUP = 30168
TAG_TTL_RESTBACK = 30169
TAG_TTL_RESTLIB = 30170
TAG_TTL_RESTRL = 30171
TAG_TTL_RESTUL = 30172
TAG_TTL_RESTCHK = 30173
TAG_TTL_BCKNM = 30174
TAG_TTL_RESTAT = 30175
TAG_TTL_RESTATC = 30176
TAG_TTL_RESTRTMP = 30177
TAG_TTL_PACK = 30178
TAG_TTL_REMOLDBCK = 30179
TAG_TTL_CLRERRDT = 30180
TAG_TTL_CLRERRD = 30181
TAG_TTL_HELP = 30182
TAG_TTL_MAINMNU = 30183
TAG_TTL_RESKIN = 30184
TAG_TTL_RAWADDEPS = 30185
TAG_TTL_SYNCAUTO = 30186
TAG_TTL_SYNCUP = 30187
TAG_TTL_SYNCDOWN = 30188
TAG_TTL_SYNCUNLOCK = 30189
TAG_TTL_SYNCSENDCH = 30190
TAG_TTL_DBXTTL = 30191
TAG_TTL_DBXOK = 30192
TAG_TTL_DBXCANCEL = 30193
TAG_TTL_DBXCOPY = 30194
TAG_TTL_DBXKEYB = 30195
TAG_TTL_DBXPASTE = 30196
TAG_TTL_DBXOPEN = 30197
TAG_TTL_SVIDDB = 30198
TAG_TTL_SWS = 30199
TAG_TTL_LOCKSY = 30200
### Set ...
TAG_SET_RENAMER = 30436
### Ok messages ...
TAG_ERR_OK = 30301
TAG_ERR_OK_MOVADD = 30302
TAG_ERR_OK_TVSADD = 30303
TAG_ERR_OK_TVSUPD = 30304
TAG_ERR_OK_RESCAN = 30305
TAG_ERR_OK_RESTOR = 30306
TAG_ERR_OK_REMSRC = 30307
TAG_ERR_OK_DELETE = 30308
TAG_ERR_OK_CHKNEW = 30309
TAG_ERR_OK_TVSREN = 30310
TAG_ERR_OK_SRCREN = 30311
TAG_ERR_OK_JOIN = 30312
TAG_ERR_OK_ADDFOL = 30313
TAG_ERR_OK_UPDFOL = 30314
TAG_ERR_OK_SETUPD = 30315
TAG_ERR_OK_VIDLIBU = 30316
TAG_ERR_OK_REBSTL = 30317
TAG_ERR_OK_RESTOREALL = 30318
TAG_ERR_OK_BRWSREN = 30319
TAG_ERR_OK_NEWFRC = 30320
TAG_ERR_OK_RESCANALLS = 30321
TAG_ERR_OK_RESCANFULL = 30322
TAG_ERR_OK_RENAMER = 30323
TAG_ERR_OK_BACKUP = 30324
TAG_ERR_OK_REMBACK = 30325
TAG_ERR_OK_RESTBACK = 30326
TAG_ERR_OK_NOBACK = 30327
TAG_ERR_OK_DBXSMAC = 30328
TAG_ERR_OK_DBXSMDL = 30329
TAG_ERR_OK_DBXSMUP = 30330
TAG_ERR_OK_DBXWSMAC = 30331
TAG_ERR_OK_DBXWSMDL = 30332
TAG_ERR_OK_DBXWSMUP = 30333
TAG_ERR_OK_SYNCUNLOCK = 30334
TAG_ERR_OK_MTVSDEL = 30335
TAG_ERR_OK_SYNCLOCK = 30336
TAG_ERR_OK_EPSREM = 30337
TAG_ERR_OK_EXCLUPLUG = 30338
### Errors ...
TAG_ERR_NOTFILE = 30201
TAG_ERR_INCINPUT = 30202
TAG_ERR_LISTEMPTY = 30203
TAG_ERR_ABORT = 30204
TAG_ERR_NOTOJOIN = 30205
TAG_ERR_DEDLINK = 30206
TAG_ERR_NONAME = 30207
TAG_ERR_NONAME2 = 30208
TAG_ERR_DEFEPS = 30209
TAG_ERR_BROKENLINK = 30210
TAG_ERR_BROKENLINK2 = 30211
TAG_ERR_LIB = 30212
TAG_ERR_LIBACT = 30213
TAG_ERR_LOCK = 30214
TAG_ERR_OL = 30215
TAG_ERR_BADZIP = 30216
TAG_ERR_NOBCKPATH = 30217
TAG_ERR_NOBCKPATHM = 30218
TAG_ERR_INCPBTYPE = 30219
TAG_ERR_NODBXCONNECT = 30220
TAG_ERR_DBXISLOCK = 30221
TAG_ERR_DBXRAISE = 30222
### Other ...
TAG_SET_RUN = 30479
TAG_SET_STOP = 30480
|
gpl-3.0
|
Tset-Noitamotua/_learnpython
|
google-python-class/lesson_123_list_methods.py
|
1
|
1755
|
# -*- coding: utf-8 -*-
# filename: lesson_122_list_methods.py
# Life is short, use Python!
# LIST METHODS
# usage: LIST.METHOD(ARGUMENTs)
# L.append(ELEMENT) ---> append ELEMENT as is at the end of list L
# L.extend('LIST') ---> add elements of LIST at the end of L
# L.insert(INDEX, 'ELEMENT') ---> insert ELEMENT at INDEX e.g. 0 of L
# L.remove('ELEMENT') ---> search and remove ELEMENT from L
# L.pop() ---> remove and return LAST element from L
# L.pop(INDEX) ---> remove and return given INDEX from L
# L.index('ELEMENT') ---> return the INDEX of ELEMENT in L
# NOTE: Common error - the above methods do not RETURN the modified list,
# they just modify it!!!
L = ['larry', 'curly', 'moe']
print(L)
# appends the given list as is to L
L.append(['Tset', 'Noitamotua'])
print(L)
# appends each string and number from given list to L
L.extend(['TESTER', 'QA', 1, 3, 'PASS'])
print(L)
# inserts string PROWSER at index 0 of L
L.insert(0, 'PROWSER')
print(L)
L.remove('larry')
print(L)
# removes last element of L (index (-1) -> PASS) and returns it
L.pop()
print(L)
# removes first element (PROWSER) and returns it
L.pop(0)
print(L)
# get an element's index
print(L.index('curly'))
print(L.index(['Tset', 'Noitamotua']))
print(L.index('TESTER'))
# Example of common mistake mentioned above
try:
a = L.append(99)
print('a ---> ' + str(a))
if a == None:
print('What did I told you above, BITCH?!')
print('List methods do NOT return the modified list!')
print("They just modify it! But they don't RETURN it!")
print("That's why a is None!!!")
print('But the list was modified, though:')
print(L)
except:
raise
|
gpl-2.0
|
lipingxue/docker-volume-vsphere
|
esx_service/vmdk_ops.py
|
3
|
83534
|
#!/usr/bin/env python
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
ESX-side service handling VMDK requests from VMCI clients
The requests are JSON formatted.
All operations are using requester VM (docker host) datastore and
"Name" in request refers to vmdk basename
VMDK name is formed as [vmdatastore] dockvols/"Name".vmdk
Commands ("cmd" in request):
"create" - create a VMDK in "[vmdatastore] dvol"
"remove" - remove a VMDK. We assume it's not open, and fail if it is
"list" - enumerate VMDKs
"get" - get info about an individual volume (vmdk)
"attach" - attach a VMDK to the requesting VM
"detach" - detach a VMDK from the requesting VM (assuming it's unmounted)
"version" - get the ESX service version string
'''
import atexit
import getopt
import json
import logging
import os
import os.path
import re
import signal
import subprocess
import sys
import traceback
import time
from ctypes import *
from vmware import vsi
import pyVim
from pyVim.connect import Connect, Disconnect
from pyVim import vmconfig
from pyVmomi import VmomiSupport, vim, vmodl
from pyVmomi.VmomiSupport import newestVersions
sys.dont_write_bytecode = True
# Location of utils used by the plugin.
TOP_DIR = "/usr/lib/vmware/vmdkops"
BIN_LOC = os.path.join(TOP_DIR, "bin")
LIB_LOC = os.path.join(TOP_DIR, "lib")
LIB_LOC64 = os.path.join(TOP_DIR, "lib64")
PY_LOC = os.path.join(TOP_DIR, "Python")
PY2_LOC = os.path.join(PY_LOC, "2")
# We won't accept names longer than that
MAX_VOL_NAME_LEN = 100
MAX_DS_NAME_LEN = 100
# Characters not acceptable in volume name
ILLEGAL_CHARACTERS = {'/', '\\'}
# vmdkops python utils are in PY_LOC, so insert to path ahead of other stuff
sys.path.insert(0, PY_LOC)
# if we are on Python 2, add py2-only stuff as a fallback
if sys.version_info.major == 2:
sys.path.append(PY2_LOC)
import threading
import threadutils
import log_config
import volume_kv as kv
import vmdk_utils
import vsan_policy
import vsan_info
import auth
import sqlite3
import convert
import auth_data_const
import auth_api
import error_code
from error_code import ErrorCode
from error_code import error_code_to_message
import vm_listener
import counter
# Python version 3.5.1
PYTHON64_VERSION = 50659824
# External tools used by the plugin.
OBJ_TOOL_CMD = "/usr/lib/vmware/osfs/bin/objtool open -u "
OSFS_MKDIR_CMD = "/usr/lib/vmware/osfs/bin/osfs-mkdir -n "
# Defaults
DOCK_VOLS_DIR = "dockvols" # place in the same (with Docker VM) datastore
MAX_JSON_SIZE = 1024 * 4 # max buf size for query json strings. Queries are limited in size
MAX_SKIP_COUNT = 16 # max retries on VMCI Get Ops failures
VMDK_ADAPTER_TYPE = 'busLogic' # default adapter type
# Server side understand protocol version. If you are changing client/server protocol we use
# over VMCI, PLEASE DO NOT FORGET TO CHANGE IT FOR CLIENT in file <esx_vmdkcmd.go> !
SERVER_PROTOCOL_VERSION = 2
# Error codes
VMCI_ERROR = -1 # VMCI C code uses '-1' to indicate failures
ECONNABORTED = 103 # Error on non privileged client
# Volume data returned on Get request
CAPACITY = 'capacity'
SIZE = 'size'
ALLOCATED = 'allocated'
LOCATION = 'datastore'
CREATED_BY_VM = 'created by VM'
ATTACHED_TO_VM = 'attached to VM'
# Virtual machine power states
VM_POWERED_OFF = "poweredOff"
# Maximum number of PVSCSI targets
PVSCSI_MAX_TARGETS = 16
# Service instance provide from connection to local hostd
_service_instance = None
# VMCI library used to communicate with clients
lib = None
# For managing resource locks.
lockManager = threadutils.LockManager()
# Barrier indicating whether stop has been requested
stopBarrier = False
# Counter of operations in flight
opsCounter = counter.OpsCounter()
# Timeout setting for waiting all in-flight ops drained
WAIT_OPS_TIMEOUT = 20
# PCI bus and function number bits and mask, used on the slot number.
PCI_BUS_BITS = 5
PCI_BUS_MASK = 31
PCI_FUNC_BITS = 10
PCI_FUNC_MASK = 7
# Run executable on ESX as needed.
# Returns int with return value, and a string with either stdout (on success) or stderr (on error)
def RunCommand(cmd):
"""RunCommand
Runs command specified by user
@param command to execute
"""
logging.debug("Running cmd %s", cmd)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=True)
o, e = p.communicate()
s = p.returncode
if s != 0:
return (s, e)
return (s, o)
# returns error, or None for OK
# opts is dictionary of {option: value}.
# for now we care about size and (maybe) policy
def createVMDK(vmdk_path, vm_name, vol_name,
opts={}, vm_uuid=None, tenant_uuid=None, datastore_url=None, vm_datastore_url=None, vm_datastore=None):
logging.info("*** createVMDK: %s opts=%s vm_name=%s vm_uuid=%s tenant_uuid=%s datastore_url=%s",
vmdk_path, opts, vm_name, vm_uuid, tenant_uuid, datastore_url)
if os.path.isfile(vmdk_path):
# We are mostly here due to race or Plugin VMCI retry #1076
msg = "File {0} already exists".format(vmdk_path)
logging.warning(msg)
return err(msg)
try:
validate_opts(opts, vmdk_path)
except ValidationError as e:
return err(e.msg)
if kv.CLONE_FROM in opts:
return cloneVMDK(vm_name=vm_name,
vmdk_path=vmdk_path,
opts=opts,
vm_uuid=vm_uuid,
datastore_url=datastore_url,
vm_datastore_url=vm_datastore_url,
vm_datastore=vm_datastore)
if not kv.DISK_ALLOCATION_FORMAT in opts:
disk_format = kv.DEFAULT_ALLOCATION_FORMAT
# Update opts with DISK_ALLOCATION_FORMAT for volume metadata
opts[kv.DISK_ALLOCATION_FORMAT] = kv.DEFAULT_ALLOCATION_FORMAT
else:
disk_format = kv.VALID_ALLOCATION_FORMATS[opts[kv.DISK_ALLOCATION_FORMAT]]
# VirtualDiskSpec
vdisk_spec = vim.VirtualDiskManager.FileBackedVirtualDiskSpec()
vdisk_spec.adapterType = VMDK_ADAPTER_TYPE
vdisk_spec.diskType = disk_format
if kv.SIZE in opts:
vdisk_spec.capacityKb = convert.convert_to_KB(opts[kv.SIZE])
else:
vdisk_spec.capacityKb = convert.convert_to_KB(kv.DEFAULT_DISK_SIZE)
# Form datastore path from vmdk_path
volume_datastore_path = vmdk_utils.get_datastore_path(vmdk_path)
logging.debug("volume_datastore_path=%s", volume_datastore_path)
si = get_si()
task = si.content.virtualDiskManager.CreateVirtualDisk(
name=volume_datastore_path, spec=vdisk_spec)
try:
wait_for_tasks(si, [task])
except vim.fault.VimFault as ex:
return err("Failed to create volume: {0}".format(ex.msg))
logging.debug("Successfully created %s volume", vmdk_path)
# Handle vsan policy
if kv.VSAN_POLICY_NAME in opts:
# Attempt to set policy to vmdk
# set_policy_to_vmdk() deleted vmdk if couldn't set policy
set_err = set_policy_to_vmdk(vmdk_path=vmdk_path,
opts=opts,
vol_name=vol_name)
if set_err:
return set_err
if not create_kv_store(vm_name, vmdk_path, opts):
msg = "Failed to create metadata kv store for {0}".format(vmdk_path)
logging.warning(msg)
error_info = err(msg)
clean_err = cleanVMDK(vmdk_path=vmdk_path,
vol_name=vol_name)
if clean_err:
logging.warning("Failed to clean %s file: %s", vmdk_path, clean_err)
error_info = error_info + clean_err
return error_info
# create succeed, insert the volume information into "volumes" table
if tenant_uuid:
vol_size_in_MB = convert.convert_to_MB(auth.get_vol_size(opts))
auth.add_volume_to_volumes_table(tenant_uuid, datastore_url, vol_name, vol_size_in_MB)
else:
logging.debug(error_code_to_message[ErrorCode.VM_NOT_BELONG_TO_TENANT].format(vm_name))
def cloneVMDK(vm_name, vmdk_path, opts={}, vm_uuid=None, datastore_url=None, vm_datastore_url=None, vm_datastore=None):
logging.info("*** cloneVMDK: %s opts = %s vm_uuid=%s datastore_url=%s vm_datastore_url=%s vm_datastore=%s",
vmdk_path, opts, vm_uuid, datastore_url, vm_datastore_url, vm_datastore)
# Get source volume path for cloning
error_info, tenant_uuid, tenant_name = auth.get_tenant(vm_uuid)
if error_info:
return err(error_info)
try:
src_volume, src_datastore = parse_vol_name(opts[kv.CLONE_FROM])
except ValidationError as ex:
return err(str(ex))
if not src_datastore:
src_datastore_url = datastore_url
src_datastore = vmdk_utils.get_datastore_name(datastore_url)
elif not vmdk_utils.validate_datastore(src_datastore):
return err("Invalid datastore '%s'.\n" \
"Known datastores: %s.\n" \
"Default datastore_url: %s" \
% (src_datastore, ", ".join(get_datastore_names_list()), datastore_url))
else:
src_datastore_url = vmdk_utils.get_datastore_url(src_datastore)
error_info = authorize_check(vm_uuid=vm_uuid,
datastore_url=src_datastore_url,
datastore=src_datastore,
cmd=auth.CMD_ATTACH,
opts={},
use_default_ds=False,
vm_datastore_url=vm_datastore_url,
vm_datastore=vm_datastore)
if error_info:
errmsg = "Failed to authorize VM: {0}, datastore: {1}".format(error_info, src_datastore)
logging.warning("*** cloneVMDK: %s", errmsg)
return err(errmsg)
src_path, errMsg = get_vol_path(src_datastore, tenant_name)
if src_path is None:
return err("Failed to initialize source volume path {0}: {1}".format(src_path, errMsg))
src_vmdk_path = vmdk_utils.get_vmdk_path(src_path, src_volume)
logging.debug("cloneVMDK: src path=%s vol=%s vmdk_path=%s", src_path, src_volume, src_vmdk_path)
if not os.path.isfile(src_vmdk_path):
return err("Could not find volume for cloning %s" % opts[kv.CLONE_FROM])
# Form datastore path from vmdk_path
dest_vol = vmdk_utils.get_datastore_path(vmdk_path)
source_vol = vmdk_utils.get_datastore_path(src_vmdk_path)
lockname = "{}.{}.{}".format(src_datastore, tenant_name, src_volume)
with lockManager.get_lock(lockname):
# Verify if the source volume is in use.
attached, uuid, attach_as, attached_vm_name = getStatusAttached(src_vmdk_path)
if attached:
log_attached_volume(vmdk_path, uuid, attached_vm_name)
# Reauthorize with size info of the volume being cloned
src_vol_info = kv.get_vol_info(src_vmdk_path)
datastore = vmdk_utils.get_datastore_from_vmdk_path(vmdk_path)
datastore_url = vmdk_utils.get_datastore_url(datastore)
opts["size"] = src_vol_info["size"]
error_info = authorize_check(vm_uuid=vm_uuid,
datastore_url=datastore_url,
datastore=datastore,
cmd=auth.CMD_CREATE,
opts=opts,
use_default_ds=False,
vm_datastore_url=vm_datastore_url,
vm_datastore=vm_datastore)
if error_info:
return err(error_info)
# Handle the allocation format
if not kv.DISK_ALLOCATION_FORMAT in opts:
disk_format = kv.DEFAULT_ALLOCATION_FORMAT
# Update opts with DISK_ALLOCATION_FORMAT for volume metadata
opts[kv.DISK_ALLOCATION_FORMAT] = kv.DEFAULT_ALLOCATION_FORMAT
else:
disk_format = kv.VALID_ALLOCATION_FORMATS[opts[kv.DISK_ALLOCATION_FORMAT]]
# VirtualDiskSpec
vdisk_spec = vim.VirtualDiskManager.VirtualDiskSpec()
vdisk_spec.adapterType = VMDK_ADAPTER_TYPE
vdisk_spec.diskType = disk_format
# Clone volume
si = get_si()
task = si.content.virtualDiskManager.CopyVirtualDisk(
sourceName=source_vol, destName=dest_vol, destSpec=vdisk_spec)
try:
wait_for_tasks(si, [task])
except vim.fault.VimFault as ex:
return err("Failed to clone volume: {0}".format(ex.msg))
vol_name = vmdk_utils.strip_vmdk_extension(src_vmdk_path.split("/")[-1])
# Fix up the KV for the destination
if not kv.fixup_kv(src_vmdk_path, vmdk_path):
msg = ("Failed to create volume KV for %s" % vol_name)
logging.warning(msg)
error_info = err(msg)
clean_err = cleanVMDK(vmdk_path=vmdk_path, vol_name=vol_name)
if clean_err:
logging.warning("Failed to clean %s file: %s", vmdk_path, clean_err)
error_info = error_info + clean_err
return error_info
# Handle vsan policy
if kv.VSAN_POLICY_NAME in opts:
# Attempt to set policy to vmdk
# set_policy_to_vmdk() deleted vmdk if couldn't set policy
set_err = set_policy_to_vmdk(vmdk_path=vmdk_path,
opts=opts,
vol_name=vol_name)
if set_err:
return set_err
# Update volume meta
vol_meta = kv.getAll(vmdk_path)
vol_meta[kv.CREATED_BY] = vm_name
vol_meta[kv.CREATED] = time.asctime(time.gmtime())
vol_meta[kv.VOL_OPTS][kv.CLONE_FROM] = src_volume
vol_meta[kv.VOL_OPTS][kv.DISK_ALLOCATION_FORMAT] = opts[kv.DISK_ALLOCATION_FORMAT]
if kv.ACCESS in opts:
vol_meta[kv.VOL_OPTS][kv.ACCESS] = opts[kv.ACCESS]
if kv.ATTACH_AS in opts:
vol_meta[kv.VOL_OPTS][kv.ATTACH_AS] = opts[kv.ATTACH_AS]
if not kv.setAll(vmdk_path, vol_meta):
msg = "Failed to create metadata kv store for {0}".format(vmdk_path)
logging.warning(msg)
removeVMDK(vmdk_path)
return err(msg)
def create_kv_store(vm_name, vmdk_path, opts):
""" Create the metadata kv store for a volume """
vol_meta = {kv.STATUS: kv.DETACHED,
kv.VOL_OPTS: opts,
kv.CREATED: time.asctime(time.gmtime()),
kv.CREATED_BY: vm_name}
return kv.create(vmdk_path, vol_meta)
def validate_opts(opts, vmdk_path):
"""
Validate available options. Current options are:
* size - The size of the disk to create
* vsan-policy-name - The name of an existing policy to use
* diskformat - The allocation format of allocated disk
"""
valid_opts = [kv.SIZE, kv.VSAN_POLICY_NAME, kv.DISK_ALLOCATION_FORMAT,
kv.ATTACH_AS, kv.ACCESS, kv.FILESYSTEM_TYPE, kv.CLONE_FROM]
defaults = [kv.DEFAULT_DISK_SIZE, kv.DEFAULT_VSAN_POLICY,\
kv.DEFAULT_ALLOCATION_FORMAT, kv.DEFAULT_ATTACH_AS,\
kv.DEFAULT_ACCESS, kv.DEFAULT_FILESYSTEM_TYPE, kv.DEFAULT_CLONE_FROM]
invalid = frozenset(opts.keys()).difference(valid_opts)
if len(invalid) != 0:
msg = 'Invalid options: {0} \n'.format(list(invalid)) \
+ 'Valid options and defaults: ' \
+ '{0}'.format(list(zip(list(valid_opts), defaults)))
raise ValidationError(msg)
# For validation of clone (in)compatible options
clone = True if kv.CLONE_FROM in opts else False
if kv.SIZE in opts:
validate_size(opts[kv.SIZE], clone)
if kv.VSAN_POLICY_NAME in opts:
validate_vsan_policy_name(opts[kv.VSAN_POLICY_NAME], vmdk_path)
if kv.DISK_ALLOCATION_FORMAT in opts:
validate_disk_allocation_format(opts[kv.DISK_ALLOCATION_FORMAT])
if kv.ATTACH_AS in opts:
validate_attach_as(opts[kv.ATTACH_AS])
if kv.ACCESS in opts:
validate_access(opts[kv.ACCESS])
if kv.FILESYSTEM_TYPE in opts:
validate_fstype(opts[kv.FILESYSTEM_TYPE], clone)
def validate_size(size, clone=False):
"""
Ensure size is given in a human readable format <int><unit> where int is an
integer and unit is either 'mb', 'gb', or 'tb'. e.g. 22mb
"""
if clone:
raise ValidationError("Cannot define the size for a clone")
if not size.lower().endswith(('mb', 'gb', 'tb'
)) or not size[:-2].isdigit():
msg = ('Invalid format for size. \n'
'Valid sizes must be of form X[mMgGtT]b where X is an '
'integer. Default = 100mb')
raise ValidationError(msg)
def validate_vsan_policy_name(policy_name, vmdk_path):
"""
Ensure that the policy file exists
"""
if not vsan_info.is_on_vsan(vmdk_path):
raise ValidationError('Cannot use a VSAN policy on a non-VSAN datastore')
if not vsan_policy.policy_exists(policy_name):
err_msg = 'Policy {0} does not exist.'.format(policy_name)
# If valid policies exist, append their names along with error message
# for available policy names that can be used
avail_policies = vsan_policy.get_policies()
if avail_policies:
avail_msg = ' Available policies are: {0}'.format(list(avail_policies.keys()))
err_msg = err_msg + avail_msg
raise ValidationError(err_msg)
def set_policy_to_vmdk(vmdk_path, opts, vol_name=None):
"""
Set VSAN policy to the vmdk object
If failed, delete the vmdk file and return the error info to be displayed
on client
"""
out = vsan_policy.set_policy_by_name(vmdk_path, opts[kv.VSAN_POLICY_NAME])
if out:
# If policy is incompatible/wrong, return the error and delete the vmdk_path
msg = ("Failed to create volume %s: %s" % (vol_name, out))
logging.warning(msg)
error_info = err(msg)
clean_err = cleanVMDK(vmdk_path=vmdk_path,
vol_name=vol_name)
if clean_err:
logging.warning("Failed to clean %s file: %s", vmdk_path, clean_err)
error_info = error_info + clean_err
return error_info
return None
def validate_disk_allocation_format(alloc_format):
"""
Ensure format is valid.
"""
if not alloc_format in kv.VALID_ALLOCATION_FORMATS :
raise ValidationError("Disk Allocation Format \'{0}\' is not supported."
" Valid options are: {1}.".format(
alloc_format, list(kv.VALID_ALLOCATION_FORMATS)))
def validate_attach_as(attach_type):
"""
Ensure that we recognize the attach type
"""
if not attach_type in kv.ATTACH_AS_TYPES :
raise ValidationError("Attach type '{0}' is not supported."
" Valid options are: {1}".format(attach_type, kv.ATTACH_AS_TYPES))
def validate_access(access_type):
"""
Ensure that we recognize the access type
"""
if not access_type in kv.ACCESS_TYPES :
raise ValidationError("Access type '{0}' is not supported."
" Valid options are: {1}".format(access_type,
kv.ACCESS_TYPES))
def validate_fstype(fstype, clone=False):
"""
Ensure that we don't accept fstype for a clone
"""
if clone:
raise ValidationError("Cannot define the filesystem type for a clone")
# Returns the UUID if the vmdk_path is for a VSAN backed.
def get_vsan_uuid(vmdk_path):
f = open(vmdk_path)
data = f.read()
f.close()
# For now we look for a VSAN URI, later vvol.
exp = re.compile("RW .* VMFS \"vsan:\/\/(.*)\"")
try:
return exp.search(data).group(1)
except:
return None
# Return volume ingo
def vol_info(vol_meta, vol_size_info, datastore):
vinfo = {CREATED_BY_VM : vol_meta[kv.CREATED_BY],
kv.CREATED : vol_meta[kv.CREATED],
kv.STATUS : vol_meta[kv.STATUS]}
vinfo[CAPACITY] = {}
vinfo[CAPACITY][SIZE] = vol_size_info[SIZE]
vinfo[CAPACITY][ALLOCATED] = vol_size_info[ALLOCATED]
vinfo[LOCATION] = datastore
if kv.ATTACHED_VM_UUID in vol_meta:
vm_name = vm_uuid2name(vol_meta[kv.ATTACHED_VM_UUID])
if vm_name:
vinfo[ATTACHED_TO_VM] = vm_name
elif kv.ATTACHED_VM_NAME in vol_meta:
# If vm name couldn't be retrieved through uuid, use name from KV
vinfo[ATTACHED_TO_VM] = vol_meta[kv.ATTACHED_VM_NAME]
else:
vinfo[ATTACHED_TO_VM] = vol_meta[kv.ATTACHED_VM_UUID]
if kv.ATTACHED_VM_DEV in vol_meta:
vinfo[kv.ATTACHED_VM_DEV] = vol_meta[kv.ATTACHED_VM_DEV]
if kv.VOL_OPTS in vol_meta:
if kv.FILESYSTEM_TYPE in vol_meta[kv.VOL_OPTS]:
vinfo[kv.FILESYSTEM_TYPE] = vol_meta[kv.VOL_OPTS][kv.FILESYSTEM_TYPE]
if kv.VSAN_POLICY_NAME in vol_meta[kv.VOL_OPTS]:
vinfo[kv.VSAN_POLICY_NAME] = vol_meta[kv.VOL_OPTS][kv.VSAN_POLICY_NAME]
if kv.DISK_ALLOCATION_FORMAT in vol_meta[kv.VOL_OPTS]:
vinfo[kv.DISK_ALLOCATION_FORMAT] = vol_meta[kv.VOL_OPTS][kv.DISK_ALLOCATION_FORMAT]
else:
vinfo[kv.DISK_ALLOCATION_FORMAT] = kv.DEFAULT_ALLOCATION_FORMAT
if kv.ATTACH_AS in vol_meta[kv.VOL_OPTS]:
vinfo[kv.ATTACH_AS] = vol_meta[kv.VOL_OPTS][kv.ATTACH_AS]
else:
vinfo[kv.ATTACH_AS] = kv.DEFAULT_ATTACH_AS
if kv.ACCESS in vol_meta[kv.VOL_OPTS]:
vinfo[kv.ACCESS] = vol_meta[kv.VOL_OPTS][kv.ACCESS]
else:
vinfo[kv.ACCESS] = kv.DEFAULT_ACCESS
if kv.CLONE_FROM in vol_meta[kv.VOL_OPTS]:
vinfo[kv.CLONE_FROM] = vol_meta[kv.VOL_OPTS][kv.CLONE_FROM]
else:
vinfo[kv.CLONE_FROM] = kv.DEFAULT_CLONE_FROM
return vinfo
def cleanVMDK(vmdk_path, vol_name=None):
"""
Delete the vmdk file. Retry if the attempt fails
Invoked as a part of removeVMDK procedure and
cases requiring deletion of vmdk file only (when meta file
hasn't been generated)
eg: Unsuccesful attempt to apply vsan policy and when failed
to create metadata for vmdk_path
"""
logging.info("*** cleanVMDK: %s", vmdk_path)
# Form datastore path from vmdk_path
volume_datastore_path = vmdk_utils.get_datastore_path(vmdk_path)
retry_count = 0
vol_meta = kv.getAll(vmdk_path)
kv.delete(vmdk_path)
while True:
si = get_si()
task = si.content.virtualDiskManager.DeleteVirtualDisk(name=volume_datastore_path)
try:
# Wait for delete, exit loop on success
wait_for_tasks(si, [task])
break
except vim.fault.FileNotFound as ex:
logging.warning("*** removeVMDK: File not found error: %s", ex.msg)
return None
except vim.fault.VimFault as ex:
if retry_count == vmdk_utils.VMDK_RETRY_COUNT or "Error caused by file" not in ex.msg:
kv.create(vmdk_path, vol_meta)
return err("Failed to remove volume: {0}".format(ex.msg))
else:
logging.warning("*** removeVMDK: Retrying removal on error: %s", ex.msg)
vmdk_utils.log_volume_lsof(vol_name)
retry_count += 1
time.sleep(vmdk_utils.VMDK_RETRY_SLEEP)
return None
# Return error, or None for OK
def removeVMDK(vmdk_path, vol_name=None, vm_name=None, tenant_uuid=None, datastore_url=None):
"""
Checks the status of the vmdk file using its meta file
If it is not attached, then cleans(deletes) the vmdk file.
If clean is successful, delete the volume from volume table
"""
logging.info("*** removeVMDK: %s", vmdk_path)
# Check the current volume status
kv_status_attached, kv_uuid, attach_mode, attached_vm_name = getStatusAttached(vmdk_path)
if kv_status_attached:
if vol_name is None:
vol_name = vmdk_utils.get_volname_from_vmdk_path(vmdk_path)
logging.info("*** removeVMDK: %s is in use, volume = %s VM = %s VM-uuid = %s",
vmdk_path, vol_name, attached_vm_name, kv_uuid)
return err("Failed to remove volume {0}, in use by VM = {1}.".format(vol_name, attached_vm_name))
# Cleaning .vmdk file
clean_err = cleanVMDK(vmdk_path, vol_name)
if clean_err:
logging.warning("Failed to clean %s file: %s", vmdk_path, clean_err)
return clean_err
# clean succeeded, remove infomation of this volume from volumes table
if tenant_uuid:
error_info = auth.remove_volume_from_volumes_table(tenant_uuid, datastore_url, vol_name)
return error_info
elif not vm_name:
logging.debug(error_code_to_message[ErrorCode.VM_NOT_BELONG_TO_TENANT].format(vm_name))
return None
def getVMDK(vmdk_path, vol_name, datastore):
"""Checks if the volume exists, and returns error if it does not"""
# Note: will return more Volume info here, when Docker API actually accepts it
logging.debug("getVMDK: vmdk_path=%s vol_name=%s, datastore=%s", vmdk_path, vol_name, datastore)
file_exist = os.path.isfile(vmdk_path)
logging.debug("getVMDK: file_exist=%d", file_exist)
if not os.path.isfile(vmdk_path):
return err("Volume {0} not found (file: {1})".format(vol_name, vmdk_path))
# Return volume info - volume policy, size, allocated capacity, allocation
# type, creat-by, create time.
try:
result = vol_info(kv.getAll(vmdk_path),
kv.get_vol_info(vmdk_path),
datastore)
except Exception as ex:
logging.error("Failed to get disk details for %s (%s)" % (vmdk_path, ex))
return None
return result
def listVMDK(tenant):
"""
Returns a list of volume names (note: may be an empty list).
Each volume name is returned as either `volume@datastore`, or just `volume`
for volumes on vm_datastore
"""
vmdk_utils.init_datastoreCache(force=True)
vmdks = vmdk_utils.get_volumes(tenant)
# build fully qualified vol name for each volume found
return [{u'Name': get_full_vol_name(x['filename'], x['datastore']),
u'Attributes': {}} \
for x in vmdks]
def findVmByUuid(vm_uuid, is_vc_uuid=False):
"""
Find VM by vm_uuid.
is_vc_uuid should be true if vm_uuid is vc uuid, else it should be false.
Return VM managed object, reconnect if needed. Throws if connection fails twice.
Returns None if the uuid is not found
"""
si = get_si()
vm = si.content.searchIndex.FindByUuid(None, vm_uuid, True, is_vc_uuid)
return vm
def findVmByUuidChoice(bios_uuid, vc_uuid):
"""
Returns vm object based on either vc_uuid, or bios_uuid.
Returns None if failed to find.
"""
vm = None
if vc_uuid:
vm = findVmByUuid(vc_uuid, True)
if not vm: # either vc_uuid is not even passed, or we failed to find the VM by VC uuid:
if vc_uuid:
logging.info("Failed to find VM by VC UUID %s, trying BIOS UUID %s", vc_uuid, bios_uuid)
vm = findVmByUuid(bios_uuid, False)
if not vm: # can't find VM by VC or BIOS uuid
logging.error("Failed to find VM by BIOS UUID either.")
return None
logging.info("Found vm name='%s'", vm.config.name)
return vm
def vm_uuid2name(vm_uuid):
vm = findVmByUuidChoice(vm_uuid, vm_uuid)
if not vm or not vm.config:
return None
return vm.config.name
def attachVMDK(vmdk_path, vm_name, bios_uuid, vc_uuid):
return apply_action_VMDK(disk_attach, vmdk_path, vm_name, bios_uuid, vc_uuid)
def detachVMDK(vmdk_path, vm_name, bios_uuid, vc_uuid):
return apply_action_VMDK(disk_detach, vmdk_path, vm_name, bios_uuid, vc_uuid)
def apply_action_VMDK(action, vmdk_path, vm_name, bios_uuid, vc_uuid):
# note: vc_uuid is the last one to avoid reworkign tests which use positional args and
# not aware of vc_uuid
"""Finds the VM and applies action(path,vm_MO) to it.
Returns json reply from action to pass upstairs, or json with 'err'"""
logging.info("*** %s: VMDK %s to VM '%s' , bios uuid = %s, VC uuid=%s)",
action.__name__, vmdk_path, vm_name, bios_uuid, vc_uuid)
vm = findVmByUuidChoice(bios_uuid, vc_uuid)
vcuuid = 'None'
if vc_uuid:
vcuuid = vc_uuid
if not vm: # can't find VM by VC or BIOS uuid
return err("Failed to find VM object for %s (bios %s vc %s)" % (vm_name, bios_uuid, vcuuid))
if vm.config.name != vm_name:
logging.warning("vm_name from vSocket '%s' does not match VM object '%s' ", vm_name, vm.config.name)
return action(vmdk_path, vm)
def get_vol_path(datastore, tenant_name=None, create=True):
"""
Check existence (and create if needed) the path for docker volume VMDKs
Returns either path to tenant-specific folder (if tenant name is passed)
or path to dockvol.
"""
# If tenant_name is set to None, the folder for Docker
# volumes is created on <datastore>/DOCK_VOLS_DIR
# If tenant_name is set, the folder for Dock volume
# is created on <datastore>/DOCK_VOLS_DIR/tenant_uuid
# a symlink <datastore>/DOCK_VOLS_DIR/tenant_name will be created to point to
# path <datastore>/DOCK_VOLS_DIR/tenant_uuid
# If the dock volume folder already exists,
# the path returned contains tenant name not UUID.
# This is to make logs more readable. OS will resolve this path
# as a symlink with tenant_name will already be present.
readable_path = path = dock_vol_path = os.path.join("/vmfs/volumes", datastore, DOCK_VOLS_DIR)
if tenant_name:
error_info, tenant = auth_api.get_tenant_from_db(tenant_name)
if error_info:
logging.error("get_vol_path: failed to find tenant info for tenant %s", tenant_name)
path = dock_vol_path
path = os.path.join(dock_vol_path, tenant.id)
readable_path = os.path.join(dock_vol_path, tenant_name)
if os.path.isdir(path):
# If the readable_path exists then return, else return path with no symlinks
if os.path.exists(readable_path):
logging.debug("Found %s, returning", readable_path)
return readable_path, None
else:
logging.warning("Internal: Tenant name symlink not found for path %s", readable_path)
logging.debug("Found %s, returning", path)
return path, None
if not create:
# Return the readable path to caller without creating it.
logging.debug("Returning %s, path isn't created yet.", readable_path)
return readable_path, None
if not os.path.isdir(dock_vol_path):
# The osfs tools are usable for DOCK_VOLS_DIR on all datastores.
cmd = "{} '{}'".format(OSFS_MKDIR_CMD, dock_vol_path)
logging.info("Creating %s, running '%s'", dock_vol_path, cmd)
rc, out = RunCommand(cmd)
if rc != 0:
errMsg = "{0} creation failed - {1} on datastore {2}".format(DOCK_VOLS_DIR, os.strerror(rc), datastore)
logging.warning(errMsg)
return None, err(errMsg)
if tenant_name and not os.path.isdir(path):
# The mkdir command is used to create "tenant_name" folder inside DOCK_VOLS_DIR on "datastore"
logging.info("Creating directory %s", path)
try:
os.mkdir(path)
except Exception as ex:
errMsg = "Failed to initialize volume path {} - {}".format(path, ex)
logging.warning(errMsg)
return None, err(errMsg)
# create the symbol link /vmfs/volumes/datastore_name/dockvol/tenant_name
symlink_path = os.path.join(dock_vol_path, tenant_name)
if not os.path.isdir(symlink_path):
os.symlink(path, symlink_path)
logging.info("Symlink %s is created to point to path %s", symlink_path, path)
logging.info("Created %s", path)
return readable_path, None
def parse_vol_name(full_vol_name):
"""
Parses volume[@datastore] and returns (volume, datastore)
On parse errors raises ValidationError with syntax explanation
"""
# Parse volume name with regexp package
try:
at = full_vol_name.rindex('@')
vol_name = full_vol_name[:at]
ds_name = full_vol_name[at + 1:]
except ValueError:
# '@' not found
vol_name = full_vol_name
ds_name = None
# Now block the '-NNNNN' volume names
#
# Caveat: we block '-NNNNNN' in end of volume name to make sure that volume
# name never conflicts with VMDK snapshot name (e.g. 'disk-000001.vmdk').
# Note that N is a digit and there are exactly 6 of them (hardcoded in ESXi)
# vmdk_utils.py:list_vmdks() explicitly relies on this assumption.
if re.match(vmdk_utils.SNAP_NAME_REGEXP, vol_name):
raise ValidationError("Volume names ending with '-NNNNNN' (where N is a digit) are not supported")
# Check if the volume name is too long
if len(vol_name) > MAX_VOL_NAME_LEN:
raise ValidationError("Volume name is too long (max len is {0})".format(MAX_VOL_NAME_LEN))
# Check if the volume name contains illegal characters
for c in ILLEGAL_CHARACTERS:
if c in vol_name:
raise ValidationError("Volume name contains illegal characters: {0}".format(c))
# Check if the datastore name is too long
if ds_name:
if len(ds_name) > MAX_DS_NAME_LEN:
raise ValidationError("Datastore name is too long (max len is {0})".format(MAX_DS_NAME_LEN))
# Find case-insensitive match for the datastore
matching_datastores = [d for d in get_datastore_names_list() if d.lower() == ds_name.lower()]
# Return error if more than one datastores found
if len(matching_datastores) > 1:
raise ValidationError("Found multiple datastores with same name (ignoring case difference): {0}".format(matching_datastores))
# Found exactly one match
if len(matching_datastores) == 1:
# On Linux this is a redundant op, but on Windows it corrects the case
ds_name = matching_datastores[0]
# If len(matching_datastores) == 0, it means the ds_name is invalid.
# This will be taken care of by follow-up validation logic.
# Return qualified volume name and datastore name
return vol_name, ds_name
def get_full_vol_name(vmdk_name, datastore):
"""
Forms full volume name from vmdk file name an datastore as volume@datastore
"""
vol_name = vmdk_utils.strip_vmdk_extension(vmdk_name)
return "{0}@{1}".format(vol_name, datastore)
def datastore_path_exist(datastore_name):
""" Check whether path /vmfs/volumes/datastore_name" exist or not """
ds_path = os.path.join("/vmfs/volumes/", datastore_name)
return os.path.exists(ds_path)
def get_datastore_name(datastore_url):
""" Get datastore_name with given datastore_url """
logging.debug("get_datastore_name: datastore_url=%s", datastore_url)
datastore_name = vmdk_utils.get_datastore_name(datastore_url)
if datastore_name is None or not datastore_path_exist(datastore_name):
# path /vmfs/volumes/datastore_name does not exist
# the possible reason is datastore_name which got from
# datastore cache is invalid(old name) need to refresh
# cache, and try again, may still return None
logging.debug("get_datastore_name: datastore_name=%s path to /vmfs/volumes/datastore_name does not exist",
datastore_name)
vmdk_utils.init_datastoreCache(force=True)
datastore_name = vmdk_utils.get_datastore_name(datastore_url)
logging.debug("get_datastore_name: After refresh get datastore_name=%s", datastore_name)
return datastore_name
def authorize_check(vm_uuid, datastore_url, datastore, cmd, opts, use_default_ds, vm_datastore_url, vm_datastore):
"""
Check command from vm can be executed on the datastore or not
Return None on success or error_info if the command cannot be executed
"""
if use_default_ds:
# first check whether it has privilege to default_datastore
# privilege to default_datastore must always exists
error_info, tenant_uuid, tenant_name = auth.authorize(vm_uuid=vm_uuid,
datastore_url=datastore_url,
cmd=cmd,
opts=opts,
privilege_ds_url=datastore_url,
vm_datastore_url=vm_datastore_url)
if error_info:
return error_info
else:
# user passed in volume with format vol@datastore
# check the privilege to that datastore
error_info, tenant_uuid, tenant_name = auth.authorize(vm_uuid=vm_uuid,
datastore_url=datastore_url,
cmd=cmd,
opts=opts,
privilege_ds_url=datastore_url,
vm_datastore_url=vm_datastore_url)
# no privilege exists for the given datastore
# if the given datastore is the same as vm_datastore
# then we can check privilege against "_VM_DS"
# if no privilege exists for "_VM_DS" or given datastore is not the same
# as vm_datastore, need check against "_ALL_DS"
if error_info == error_code_to_message[ErrorCode.PRIVILEGE_NO_PRIVILEGE]:
if datastore == vm_datastore:
error_info, tenant_uuid, tenant_name = auth.authorize(vm_uuid=vm_uuid,
datastore_url=datastore_url,
cmd=cmd,
opts=opts,
privilege_ds_url=auth_data_const.VM_DS_URL,
vm_datastore_url=vm_datastore_url)
# privilege to "_VM_DS" exists, but authorize fails, return error_info
if error_info != error_code_to_message[ErrorCode.PRIVILEGE_NO_PRIVILEGE]:
return error_info
# privilege to "_VM_DS" does not exists or the given datastore is not the same as
# vm_datastore, check privilege against "_ALL_DS"
error_info, tenant_uuid, tenant_name =auth.authorize(vm_uuid=vm_uuid,
datastore_url=datastore_url,
cmd=cmd,
opts=opts,
privilege_ds_url=auth_data_const.ALL_DS_URL,
vm_datastore_url=vm_datastore_url)
if error_info:
return error_info
return None
# gets the requests, calculates path for volumes, and calls the relevant handler
def executeRequest(vm_uuid, vm_name, config_path, cmd, full_vol_name, opts, vc_uuid=None):
"""
Executes a <cmd> request issused from a VM.
The request is about volume <full_volume_name> in format volume@datastore.
If @datastore is omitted, "default_datastore" will be used if "default_datastore"
is specified for the tenant which VM belongs to;
the one where the VM resides is used is "default_datastore" is not specified.
For VM, the function gets vm_uuid, vm_name and config_path
<opts> is a json options string blindly passed to a specific operation
Returns None (if all OK) or error string
"""
logging.debug("config_path=%s", config_path)
# get datastore the VM is running on
vm_datastore_url = vmdk_utils.get_datastore_url_from_config_path(config_path)
vm_datastore = get_datastore_name(vm_datastore_url)
logging.debug("executeRequest: vm_datastore = %s, vm_datastore_url = %s",
vm_datastore, vm_datastore_url)
error_info, tenant_uuid, tenant_name = auth.get_tenant(vm_uuid)
force_detach = False
if error_info:
# For "docker volume ls", in case of error from the plugin Docker prints a list of cached volume names,
# which is misleading. To avoid this, we replace error with an empty list. See Issue #990 for details.
if (cmd == "list") and (not tenant_uuid):
return []
# We need special handling for failure to find tenants in "detach".
# get_tenant() will fail if the the VM was in the default VM group and the latter
# got deleted to tighten security.
# Note: since admin_cli will block removing a VM with attached disks from named groups,
# this fix only impacts "default" vmgroup removal. See issue #1441.
elif (cmd =="detach"):
force_detach = True
else:
return err(error_info)
if force_detach:
# Special (ugly) patch for detaching from VMs where we can't find tenant
# (e.g. tenant definition was removed)
# The patch is ugly since the affected code is a bit convoluted and can benefit
# from refactoring.
# The patch does the following: circumvents all the code for authentication and
# vmdk path calculation, and instead find good match in the list of devices actually attached.
logging.warning("executeRequest: FORCE_DETACH vm_uuid=%s, vm_name=%s, full_volume_name=%s",
vm_uuid, vm_name, full_vol_name)
# For detach, we get full volume name from docker so it should always be valid.
try:
vol_name, datastore = parse_vol_name(full_vol_name)
logging.info("vol_name=%s, datastore=%s", vol_name, datastore)
except ValidationError as ex:
return err(str(ex))
# we use tenant name to form a unique lock name, so let's fake it
tenant_name = "__NoSuchTenant__"
# Since we do not know the tenant and thus cannot construct the /vmfs/volumes/<datastore>/dockvols/<tenant>
# let's just look in the attached device for the best match.
vm = findVmByUuidChoice(vm_uuid, vc_uuid)
vmdk_path = vmdk_utils.get_attached_volume_path(vm, vol_name, datastore)
else:
# default_datastore must be set for tenant
error_info, default_datastore_url = auth_api.get_default_datastore_url(tenant_name)
if error_info:
return err(error_info.msg)
elif not default_datastore_url:
err_msg = error_code_to_message[ErrorCode.DS_DEFAULT_NOT_SET].format(tenant_name)
logging.warning(err_msg)
return err(err_msg)
# default_datastore could be a real datastore name or a hard coded one "_VM_DS"
default_datastore = get_datastore_name(default_datastore_url)
vcuuid = 'None'
if vc_uuid:
vcuuid = vc_uuid
logging.debug("executeRequest: vm uuid=%s VC uuid=%s name=%s, tenant_name=%s, default_datastore=%s",
vm_uuid, vcuuid, vm_name, tenant_name, default_datastore)
if cmd == "list":
threadutils.set_thread_name("{0}-nolock-{1}".format(vm_name, cmd))
# if default_datastore is not set, should return error
return listVMDK(tenant_name)
try:
vol_name, datastore = parse_vol_name(full_vol_name)
except ValidationError as ex:
return err(str(ex))
if datastore and not vmdk_utils.validate_datastore(datastore):
return err("Invalid datastore '%s'.\n" \
"Known datastores: %s.\n" \
"Default datastore: %s" \
% (datastore, ", ".join(get_datastore_names_list()), default_datastore))
if not datastore:
datastore_url = default_datastore_url
datastore = default_datastore
use_default_ds = True
else:
datastore_url = vmdk_utils.get_datastore_url(datastore)
use_default_ds = False
logging.debug("executeRequest: vm_uuid=%s, vm_name=%s, tenant_name=%s, tenant_uuid=%s, "
"default_datastore_url=%s datastore_url=%s",
vm_uuid, vm_name, tenant_uuid, tenant_name, default_datastore_url, datastore_url)
error_info = authorize_check(vm_uuid=vm_uuid,
datastore_url=datastore_url,
datastore=datastore,
cmd=cmd,
opts=opts,
use_default_ds=use_default_ds,
vm_datastore_url=vm_datastore_url,
vm_datastore=vm_datastore)
if error_info:
return err(error_info)
# get_vol_path() need to pass in a real datastore name
if datastore == auth_data_const.VM_DS:
datastore = vm_datastore
# set datastore_url to a real datastore_url
# createVMDK() and removeVMDK() need to pass in
# a real datastore_url instead of url of _VM_DS
datastore_url = vm_datastore_url
path, errMsg = get_vol_path(datastore, tenant_name)
logging.debug("executeRequest for tenant %s with path %s", tenant_name, path)
if path is None:
return errMsg
vmdk_path = vmdk_utils.get_vmdk_path(path, vol_name)
# Set up locking for volume operations.
# Lock name defaults to combination of DS,tenant name and vol name
lockname = "{}.{}.{}".format(vm_datastore, tenant_name, vol_name)
# Set thread name to vm_name-lockname
threadutils.set_thread_name("{0}-{1}".format(vm_name, lockname))
# Get a lock for the volume
logging.debug("Trying to acquire lock: %s", lockname)
with lockManager.get_lock(lockname):
logging.debug("Acquired lock: %s", lockname)
if cmd == "get":
response = getVMDK(vmdk_path, vol_name, datastore)
elif cmd == "create":
response = createVMDK(vmdk_path=vmdk_path,
vm_name=vm_name,
vm_uuid=vm_uuid,
vol_name=vol_name,
opts=opts,
tenant_uuid=tenant_uuid,
datastore_url=datastore_url,
vm_datastore_url=vm_datastore_url,
vm_datastore=vm_datastore)
elif cmd == "remove":
response = removeVMDK(vmdk_path=vmdk_path,
vol_name=vol_name,
vm_name=vm_name,
tenant_uuid=tenant_uuid,
datastore_url=datastore_url)
# For attach/detach reconfigure tasks, hold a per vm lock.
elif cmd == "attach":
with lockManager.get_lock(vm_uuid):
response = attachVMDK(vmdk_path=vmdk_path, vm_name=vm_name,
bios_uuid=vm_uuid, vc_uuid=vc_uuid)
elif cmd == "detach":
with lockManager.get_lock(vm_uuid):
response = detachVMDK(vmdk_path=vmdk_path, vm_name=vm_name,
bios_uuid=vm_uuid, vc_uuid=vc_uuid)
else:
return err("Unknown command:" + cmd)
logging.debug("Released lock: %s", lockname)
return response
def connectLocalSi():
'''
Initialize a connection to the local SI
'''
global _service_instance
if not _service_instance:
try:
logging.info("Connecting to the local Service Instance as 'dcui' ")
# Connect to local server as user "dcui" since this is the Admin that does not lose its
# Admin permissions even when the host is in lockdown mode. User "dcui" does not have a
# password - it is used by local application DCUI (Direct Console User Interface)
# Version must be set to access newer features, such as VSAN.
_service_instance = pyVim.connect.Connect(
host='localhost',
user='dcui',
version=newestVersions.Get('vim'))
except Exception as e:
logging.exception("Failed to create the local Service Instance as 'dcui', continuing... : ")
return
# set out ID in context to be used in request - so we'll see it in logs
reqCtx = VmomiSupport.GetRequestContext()
reqCtx["realUser"] = 'dvolplug'
atexit.register(pyVim.connect.Disconnect, _service_instance)
def get_si():
'''
Return a connection to the local SI
'''
with lockManager.get_lock('siLock'):
global _service_instance
try:
_service_instance.CurrentTime()
except:
# service_instance is invalid (could be stale)
# reset it to None and try to connect again.
_service_instance = None
connectLocalSi()
return _service_instance
def is_service_available():
"""
Check if connection to hostd service is available
"""
if not get_si():
return False
return True
def get_datastore_names_list():
"""returns names of known datastores"""
return [i[0] for i in vmdk_utils.get_datastores()]
def findDeviceByPath(vmdk_path, vm):
logging.debug("findDeviceByPath: Looking for device {0}".format(vmdk_path))
for d in vm.config.hardware.device:
if type(d) != vim.vm.device.VirtualDisk:
continue
# Disks of all backing have a backing object with a filename attribute.
# The filename identifies the virtual disk by name and can be used
# to match with the given volume name.
# Filename format is as follows:
# "[<datastore name>] <parent-directory>/tenant/<vmdk-descriptor-name>"
logging.debug("d.backing.fileName %s", d.backing.fileName)
ds, disk_path = d.backing.fileName.rsplit("]", 1)
datastore = ds[1:]
backing_disk = disk_path.lstrip()
logging.debug("findDeviceByPath: datastore=%s, backing_disk=%s", datastore, backing_disk)
# Construct the parent dir and vmdk name, resolving
# links if any.
dvol_dir = os.path.dirname(vmdk_path)
datastore_prefix = os.path.realpath(os.path.join("/vmfs/volumes", datastore)) + '/'
real_vol_dir = os.path.realpath(dvol_dir).replace(datastore_prefix, "")
virtual_disk = os.path.join(real_vol_dir, os.path.basename(vmdk_path))
logging.debug("dvol_dir=%s datastore_prefix=%s real_vol_dir=%s", dvol_dir, datastore_prefix,real_vol_dir)
logging.debug("backing_disk=%s virtual_disk=%s", backing_disk, virtual_disk)
if virtual_disk == backing_disk:
logging.debug("findDeviceByPath: MATCH: %s", backing_disk)
return d
return None
# Find the PCI slot number
def get_controller_pci_slot(vm, pvscsi, key_offset):
''' Return PCI slot number of the given PVSCSI controller
Input parameters:
vm: VM configuration
pvscsi: given PVSCSI controller
key_offset: offset from the bus number, controller_key - key_offset
is equal to the slot number of this given PVSCSI controller
'''
if pvscsi.slotInfo:
slot_num = pvscsi.slotInfo.pciSlotNumber
else:
# Slot number is got from from the VM config.
key = 'scsi{0}.pciSlotNumber'.format(pvscsi.key -
key_offset)
slot = [cfg for cfg in vm.config.extraConfig \
if cfg.key.lower() == key.lower()]
# If the given controller exists
if slot:
slot_num = slot[0].value
else:
return None
# Check if the PCI slot is on the primary or secondary bus
# and find the slot number for the bridge on the secondary
# bus.
orig_slot_num = slot_num
bus = (int(slot_num) >> PCI_BUS_BITS) & PCI_BUS_MASK
func = (int(slot_num) >> PCI_FUNC_BITS) & PCI_FUNC_MASK
while bus > 0:
bus = bus - 1
# Get PCI bridge slot number
key = 'pciBridge{0}.pciSlotNumber'.format(bus)
bridge_slot = [cfg for cfg in vm.config.extraConfig \
if cfg.key.lower() == key.lower()]
if bridge_slot:
slot_num = bridge_slot[0].value
else:
# We didn't find a PCI bridge for this bus.
return None
bus = (int(slot_num) >> PCI_BUS_BITS) & PCI_BUS_MASK
bus_num = '{0}.{1}'.format(hex(int(slot_num))[2:], func)
return [str(orig_slot_num), bus_num]
def dev_info(unit_number, pci_bus_slot_number):
'''Return a dictionary with Unit/Bus for the vmdk (or error)'''
return {'Unit': str(unit_number),
'ControllerPciSlotNumber': pci_bus_slot_number[0],
'ControllerPciBusNumber': pci_bus_slot_number[1]}
def reset_vol_meta(vmdk_path):
'''Clears metadata for vmdk_path'''
vol_meta = kv.getAll(vmdk_path)
if not vol_meta:
vol_meta = {}
logging.debug("Reseting meta-data for disk=%s", vmdk_path)
if set(vol_meta.keys()) & {kv.STATUS, kv.ATTACHED_VM_UUID}:
logging.debug("Old meta-data for %s was (status=%s VM uuid=%s)",
vmdk_path, vol_meta[kv.STATUS],
vol_meta[kv.ATTACHED_VM_UUID])
vol_meta[kv.STATUS] = kv.DETACHED
vol_meta[kv.ATTACHED_VM_UUID] = None
vol_meta[kv.ATTACHED_VM_NAME] = None
if not kv.setAll(vmdk_path, vol_meta):
msg = "Failed to save volume metadata for {0}.".format(vmdk_path)
logging.warning("reset_vol_meta: " + msg)
return err(msg)
def setStatusAttached(vmdk_path, vm, vm_dev_info=None):
'''Sets metadata for vmdk_path to (attached, attachedToVM=uuid'''
logging.debug("Set status=attached disk=%s VM name=%s uuid=%s", vmdk_path,
vm.config.name, vm.config.uuid)
vol_meta = kv.getAll(vmdk_path)
if not vol_meta:
vol_meta = {}
vol_meta[kv.STATUS] = kv.ATTACHED
vol_meta[kv.ATTACHED_VM_UUID] = vm.config.instanceUuid
vol_meta[kv.ATTACHED_VM_NAME] = vm.config.name
if vm_dev_info:
vol_meta[kv.ATTACHED_VM_DEV] = vm_dev_info
if not kv.setAll(vmdk_path, vol_meta):
logging.warning("Attach: Failed to save Disk metadata for %s", vmdk_path)
def setStatusDetached(vmdk_path, key=None, value=None):
'''Sets metadata for vmdk_path to "detached"'''
logging.debug("Set status=detached disk=%s", vmdk_path)
vol_meta = kv.getAll(vmdk_path)
if not vol_meta:
vol_meta = {}
vol_meta[kv.STATUS] = kv.DETACHED
# If attachedVMName is present, so is attachedVMUuid
try:
del vol_meta[kv.ATTACHED_VM_UUID]
del vol_meta[kv.ATTACHED_VM_NAME]
del vol_meta[kv.ATTACHED_VM_DEV]
except:
pass
if not kv.setAll(vmdk_path, vol_meta, key, value):
logging.warning("Detach: Failed to save Disk metadata for %s", vmdk_path)
def getStatusAttached(vmdk_path):
'''
Returns (attached, uuid, attach_as, vm_name) tuple. For 'detached' status
uuid and vm_name are None.
'''
vol_meta = kv.getAll(vmdk_path)
try:
attach_as = vol_meta[kv.VOL_OPTS][kv.ATTACH_AS]
except:
attach_as = kv.DEFAULT_ATTACH_AS
if not vol_meta or kv.STATUS not in vol_meta:
return False, None, attach_as, None
attached = (vol_meta[kv.STATUS] == kv.ATTACHED)
try:
uuid = vol_meta[kv.ATTACHED_VM_UUID]
except:
uuid = None
try:
vm_name = vol_meta[kv.ATTACHED_VM_NAME]
except:
vm_name = None
return attached, uuid, attach_as, vm_name
def log_attached_volume(vmdk_path, kv_uuid, vol_name):
'''
Log appropriate message for volume thats already attached.
'''
# Treat kv_uuid as vc uuid to find VM
cur_vm = findVmByUuid(kv_uuid, True)
if not cur_vm:
# Prior to #1526, uuid in KV is bios uuid.
logging.info("Using %s as BIOS uuid to find the VM", kv_uuid)
cur_vm = findVmByUuid(kv_uuid, False)
if cur_vm:
msg = "Disk {0} is already attached to VM {1}".format(vmdk_path,
cur_vm.config.name)
else:
msg = "Failed to find VM {0}({1}), disk {2} is already attached".format(vol_name,
kv_uuid,
vmdk_path)
logging.warning(msg)
def add_pvscsi_controller(vm, controllers, max_scsi_controllers, offset_from_bus_number):
'''
Add a new PVSCSI controller, return (controller_key, err) pair
'''
# find empty bus slot for the controller:
taken = set([c.busNumber for c in controllers])
avail = set(range(0, max_scsi_controllers)) - taken
key = avail.pop() # bus slot
controller_key = key + offset_from_bus_number
disk_slot = 0
controller_spec = vim.VirtualDeviceConfigSpec(
operation='add',
device=vim.ParaVirtualSCSIController(key=controller_key,
busNumber=key,
sharedBus='noSharing', ), )
# changes spec content goes here
pvscsi_change = []
pvscsi_change.append(controller_spec)
spec = vim.vm.ConfigSpec()
spec.deviceChange = pvscsi_change
try:
si = get_si()
wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])
except vim.fault.VimFault as ex:
msg=("Failed to add PVSCSI Controller: %s", ex.msg)
return None, err(msg)
logging.debug("Added a PVSCSI controller, controller_id=%d", controller_key)
return controller_key, None
def find_disk_slot_in_controller(vm, devices, pvsci, idx, offset_from_bus_number):
'''
Find an empty disk slot in the given controller, return disk_slot if an empty slot
can be found, otherwise, return None
'''
disk_slot = None
controller_key = pvsci[idx].key
taken = set([dev.unitNumber
for dev in devices
if type(dev) == vim.VirtualDisk and dev.controllerKey ==
controller_key])
# search in 15 slots, with unit_number 7 reserved for scsi controller
avail_slots = (set(range(0, 7)) | set(range(8, PVSCSI_MAX_TARGETS))) - taken
logging.debug("idx=%d controller_key=%d avail_slots=%d", idx, controller_key, len(avail_slots))
if len(avail_slots) != 0:
disk_slot = avail_slots.pop()
pci_slot_number = get_controller_pci_slot(vm, pvsci[idx],
offset_from_bus_number)
logging.debug("Find an available slot: controller_key = %d slot = %d", controller_key, disk_slot)
else:
logging.warning("No available slot in this controller: controller_key = %d", controller_key)
return disk_slot
def find_available_disk_slot(vm, devices, pvsci, offset_from_bus_number):
'''
Iterate through all the existing PVSCSI controllers attached to a VM to find an empty
disk slot. Return disk_slot is an empty slot can be found, otherwise, return None
'''
idx = 0
disk_slot = None
while ((disk_slot is None) and (idx < len(pvsci))):
disk_slot = find_disk_slot_in_controller(vm, devices, pvsci, idx, offset_from_bus_number)
if (disk_slot is None):
idx = idx + 1;
return idx, disk_slot
def disk_attach(vmdk_path, vm):
'''
Attaches *existing* disk to a vm on a PVSCI controller
(we need PVSCSI to avoid SCSI rescans in the guest)
return error or unit:bus numbers of newly attached disk.
'''
kv_status_attached, kv_uuid, attach_mode, attached_vm_name = getStatusAttached(vmdk_path)
logging.info("Attaching {0} as {1}".format(vmdk_path, attach_mode))
if kv_status_attached:
log_attached_volume(vmdk_path, kv_uuid, attached_vm_name)
# NOTE: vSphere is very picky about unit numbers and controllers of virtual
# disks. Every controller supports 15 virtual disks, and the unit
# numbers need to be unique within the controller and range from
# 0 to 15 with 7 being reserved (for older SCSI controllers).
# It is up to the API client to add controllers as needed.
# SCSI Controller keys are in the range of 1000 to 1003 (1000 + bus_number).
offset_from_bus_number = 1000
max_scsi_controllers = 4
devices = vm.config.hardware.device
# get all scsi controllers (pvsci, lsi logic, whatever)
controllers = [d for d in devices
if isinstance(d, vim.VirtualSCSIController)]
# Check if this disk is already attached, and if it is - skip the disk
# attach and the checks on attaching a controller if needed.
device = findDeviceByPath(vmdk_path, vm)
if device:
# Disk is already attached.
logging.warning("Disk %s already attached. VM=%s",
vmdk_path, vm.config.uuid)
setStatusAttached(vmdk_path, vm)
# Get that controller to which the device is configured for
pvsci = [d for d in controllers
if type(d) == vim.ParaVirtualSCSIController and
d.key == device.controllerKey]
return dev_info(device.unitNumber,
get_controller_pci_slot(vm, pvsci[0],
offset_from_bus_number))
# Disk isn't attached, make sure we have a PVSCI and add it if we don't
# check if we already have a pvsci one
pvsci = [d for d in controllers
if type(d) == vim.ParaVirtualSCSIController]
disk_slot = None
if len(pvsci) > 0:
idx, disk_slot = find_available_disk_slot(vm, devices, pvsci, offset_from_bus_number);
if (disk_slot is not None):
controller_key = pvsci[idx].key
pci_slot_number = get_controller_pci_slot(vm, pvsci[idx],
offset_from_bus_number)
logging.debug("Find an available disk slot, controller_key=%d, slot_id=%d",
controller_key, disk_slot)
if (disk_slot is None):
disk_slot = 0 # starting on a fresh controller
if len(controllers) >= max_scsi_controllers:
msg = "Failed to place new disk - The maximum number of supported volumes has been reached."
logging.error(msg + " VM=%s", vm.config.uuid)
return err(msg)
logging.info("Adding a PVSCSI controller")
controller_key, ret_err = add_pvscsi_controller(vm, controllers, max_scsi_controllers,
offset_from_bus_number)
if (ret_err):
return ret_err
# Find the controller just added
devices = vm.config.hardware.device
pvsci = [d for d in devices
if type(d) == vim.ParaVirtualSCSIController and
d.key == controller_key]
pci_slot_number = get_controller_pci_slot(vm, pvsci[0],
offset_from_bus_number)
logging.info("Added a PVSCSI controller, controller_key=%d pci_slot_number=%s",
controller_key, pci_slot_number[0])
# add disk as independent, so it won't be snapshotted with the Docker VM
disk_spec = vim.VirtualDeviceConfigSpec(
operation='add',
device=
vim.VirtualDisk(backing=vim.VirtualDiskFlatVer2BackingInfo(
fileName="[] " + vmdk_path,
diskMode=attach_mode, ),
deviceInfo=vim.Description(
# TODO: use docker volume name here. Issue #292
label="dockerDataVolume",
summary="dockerDataVolume", ),
unitNumber=disk_slot,
controllerKey=controller_key, ), )
disk_changes = []
disk_changes.append(disk_spec)
spec = vim.vm.ConfigSpec()
spec.deviceChange = disk_changes
try:
si = get_si()
wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])
except vim.fault.VimFault as ex:
msg = ex.msg
# Use metadata (KV) for extra logging
if kv_status_attached:
# KV claims we are attached to a different VM'.
cur_vm = vm_uuid2name(kv_uuid)
if not cur_vm:
cur_vm = attached_vm_name
msg += " disk {0} already attached to VM={1}".format(vmdk_path,
cur_vm)
if kv_uuid == vm.config.uuid:
msg += "(Current VM)"
return err(msg)
vm_dev_info = dev_info(disk_slot, pci_slot_number)
setStatusAttached(vmdk_path, vm, vm_dev_info)
logging.info("Disk %s successfully attached. controller pci_slot_number=%s, disk_slot=%d",
vmdk_path, pci_slot_number[0], disk_slot)
return vm_dev_info
def err(string):
return {u'Error': string}
def disk_detach(vmdk_path, vm):
"""detach disk (by full path) from a vm and return None or err(msg)"""
device = findDeviceByPath(vmdk_path, vm)
if not device:
# Could happen if the disk attached to a different VM - attach fails
# and docker will insist to sending "unmount/detach" which also fails.
# Or Plugin retrying operation due to socket errors #1076
# Return success since disk is anyway not attached
logging.warning("*** Detach disk={0} not found. VM={1}".format(
vmdk_path, vm.config.uuid))
return None
return disk_detach_int(vmdk_path, vm, device)
def disk_detach_int(vmdk_path, vm, device, key=None, value=None):
"""
Disk Detach imlementation. We get here after all validations are done,
and here we simply connect to ESX and execute Reconfig("remove disk") task
"""
si = get_si()
spec = vim.vm.ConfigSpec()
dev_changes = []
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
disk_spec.device = device
dev_changes.append(disk_spec)
spec.deviceChange = dev_changes
try:
wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])
except vim.Fault.VimFault as ex:
ex_type, ex_value, ex_traceback = sys.exc_info()
msg = "Failed to detach %s: %s" % (vmdk_path, ex.msg)
logging.warning("%s\n%s", msg, "".join(traceback.format_tb(ex_traceback)))
return err(msg)
setStatusDetached(vmdk_path, key, value)
logging.info("Disk detached %s", vmdk_path)
return None
# Edit settings for a volume identified by its full path
def set_vol_opts(name, tenant_name, options):
# Create a dict of the options, the options are provided as
# "access=read-only" and we get a dict like {'access': 'read-only'}
opts_list = "".join(options.replace("=", ":").split())
opts = dict(i.split(":") for i in opts_list.split(","))
# create volume path
try:
vol_name, datastore = parse_vol_name(name)
except ValidationError as ex:
logging.exception(ex)
return False
logging.debug("set_vol_opts: name=%s options=%s vol_name=%s, datastore=%s",
name, options, vol_name, datastore)
if not datastore:
msg = "Invalid datastore '{0}'.\n".format(datastore)
logging.warning(msg)
return False
datastore_url = vmdk_utils.get_datastore_url(datastore)
# try to set opts on a volume which was created by a non-exist tenant
# fail the request
if tenant_name:
# if tenant_name is "None", which means the function is called without multi-tenancy
error_info = auth_api.check_tenant_exist(tenant_name)
if not error_info:
logging.warning(error_code_to_message[ErrorCode.TENANT_NOT_EXIST].format(tenant_name))
return False
# get /vmfs/volumes/<datastore_url>/dockvols path on ESX:
path, errMsg = get_vol_path(datastore, tenant_name)
if path is None:
msg = "Failed to get datastore path {0}".format(path)
logging.warning(msg)
return False
vmdk_path = vmdk_utils.get_vmdk_path(path, vol_name)
logging.debug("set_vol_opts: path=%s vmdk_path=%s", path, vmdk_path)
if not os.path.isfile(vmdk_path):
msg = 'Volume {0} not found.'.format(vol_name)
logging.warning(msg)
return False
# For now only allow resetting the access and attach-as options.
valid_opts = {
kv.ACCESS : kv.ACCESS_TYPES,
kv.ATTACH_AS : kv.ATTACH_AS_TYPES
}
invalid = frozenset(opts.keys()).difference(valid_opts.keys())
if len(invalid) != 0:
msg = 'Invalid options: {0} \n'.format(list(invalid)) \
+ 'Options that can be edited: ' \
+ '{0}'.format(list(valid_opts))
raise ValidationError(msg)
has_invalid_opt_value = False
for key in opts.keys():
if key in valid_opts:
if not opts[key] in valid_opts[key]:
msg = 'Invalid option value {0}.\n'.format(opts[key]) +\
'Supported values are {0}.\n'.format(valid_opts[key])
logging.warning(msg)
has_invalid_opt_value = True
if has_invalid_opt_value:
return False
vol_meta = kv.getAll(vmdk_path)
if vol_meta:
if not vol_meta[kv.VOL_OPTS]:
vol_meta[kv.VOL_OPTS] = {}
for key in opts.keys():
vol_meta[kv.VOL_OPTS][key] = opts[key]
return kv.setAll(vmdk_path, vol_meta)
return False
def wait_ops_in_flight():
# Wait for the event indicating all in-flight ops are drained
eventReceived = opsCounter.wait(WAIT_OPS_TIMEOUT)
if (eventReceived):
logging.info("All in-flight operations are completed - exiting")
os.kill(os.getpid(), signal.SIGKILL) # kill the main process
else:
logging.warn("In-flight operations are taking too long to complete - abandoning wait")
def signal_handler_stop(signalnum, frame):
global stopBarrier
logging.warn("Received signal num: %d - exiting", signalnum)
if (opsCounter.value == 0):
logging.info("No in-flight operations - exiting")
sys.exit(0)
# Set the stop barrier to true
logging.debug("Setting stop barrier to true")
stopBarrier = True
# Fire a thread to wait for ops in flight to drain
threadutils.start_new_thread(target=wait_ops_in_flight)
def load_vmci():
global lib
logging.info("Loading VMCI server lib.")
if sys.hexversion >= PYTHON64_VERSION:
lib = CDLL(os.path.join(LIB_LOC64, "libvmci_srv.so"), use_errno=True)
else:
lib = CDLL(os.path.join(LIB_LOC, "libvmci_srv.so"), use_errno=True)
def send_vmci_reply(client_socket, reply_string):
reply = json.dumps(reply_string)
response = lib.vmci_reply(client_socket, c_char_p(reply.encode()))
errno = get_errno()
logging.debug("lib.vmci_reply: VMCI replied with errcode %s", response)
if response == VMCI_ERROR:
logging.warning("vmci_reply returned error %s (errno=%d)",
os.strerror(errno), errno)
def execRequestThread(client_socket, cartel, request):
'''
Execute requests in a thread context with a per volume locking.
'''
# Before we start, block to allow main thread or other running threads to advance.
# https://docs.python.org/2/faq/library.html#none-of-my-threads-seem-to-run-why
time.sleep(0.001)
try:
# Get VM name & ID from VSI (we only get cartelID from vmci, need to convert)
vmm_leader = vsi.get("/userworld/cartel/%s/vmmLeader" % str(cartel))
group_info = vsi.get("/vm/%s/vmmGroupInfo" % vmm_leader)
vm_name = group_info["displayName"]
cfg_path = group_info["cfgPath"]
uuid = group_info["uuid"] # BIOS UUID, see http://www.virtu-al.net/2015/12/04/a-quick-reference-of-vsphere-ids/
vcuuid = group_info["vcUuid"] # VC UUID
# pyVmomi expects uuid like this one: 564dac12-b1a0-f735-0df3-bceb00b30340
# to get it from uuid in VSI vms/<id>/vmmGroup, we use the following format:
UUID_FORMAT = "{0}{1}{2}{3}-{4}{5}-{6}{7}-{8}{9}-{10}{11}{12}{13}{14}{15}"
vm_uuid = UUID_FORMAT.format(*uuid.replace("-", " ").split())
vc_uuid = None
# Use a VC uuid if one is present.
if len(vcuuid) > 0:
vc_uuid = UUID_FORMAT.format(*vcuuid.replace("-", " ").split())
try:
req = json.loads(request.decode('utf-8'))
except ValueError as e:
reply_string = {u'Error': "Failed to parse json '%s'." % request}
send_vmci_reply(client_socket, reply_string)
else:
logging.debug("execRequestThread: req=%s", req)
# If req from client does not include version number, set the version to
# SERVER_PROTOCOL_VERSION by default to make backward compatible
client_protocol_version = int(req["version"]) if "version" in req else SERVER_PROTOCOL_VERSION
logging.debug("execRequestThread: client protocol version=%d", client_protocol_version)
if client_protocol_version != SERVER_PROTOCOL_VERSION:
reply_string = err("""There is a mismatch between VDVS client (Docker plugin) protocol version
({}) and server (ESXi) protocol version ({}) which indicates different
versions of the product are installed on Guest and ESXi sides,
please make sure VDVS plugin and driver are from the same release version.
""".format(client_protocol_version, SERVER_PROTOCOL_VERSION))
send_vmci_reply(client_socket, reply_string)
logging.warning("executeRequest '%s' failed: %s", req["cmd"], reply_string)
return
# If the command is "version" then there is no need to handle the request via
# the normal VM request handler.
if req["cmd"] == "version":
reply_string = {u'version': "%s" % vmdk_utils.get_version()}
else:
opts = req["details"]["Opts"] if "Opts" in req["details"] else {}
reply_string = executeRequest(
vm_uuid=vm_uuid,
vc_uuid=vc_uuid,
vm_name=vm_name,
config_path=cfg_path,
cmd=req["cmd"],
full_vol_name=req["details"]["Name"],
opts=opts)
logging.info("executeRequest '%s' completed with ret=%s", req["cmd"], reply_string)
send_vmci_reply(client_socket, reply_string)
except Exception as ex_thr:
logging.exception("Unhandled Exception:")
reply_string = err("Server returned an error: {0}".format(repr(ex_thr)))
send_vmci_reply(client_socket, reply_string)
finally:
opsCounter.decr()
# code to grab/release VMCI listening socket
g_vmci_listening_socket = None
def vmci_grab_listening_socket(port):
"""call C code to open/bind/listen on the VMCI socket"""
global g_vmci_listening_socket
if g_vmci_listening_socket:
logging.error("VMCI Listening socket - multiple init") # message for us. Should never happen
return
g_vmci_listening_socket = lib.vmci_init(c_uint(port))
if g_vmci_listening_socket == VMCI_ERROR:
errno = get_errno()
raise OSError("Failed to initialize vSocket listener: %s (errno=%d)" \
% (os.strerror(errno), errno))
def vmci_release_listening_socket():
"""Calls C code to release the VMCI listening socket"""
if g_vmci_listening_socket:
lib.vmci_close(g_vmci_listening_socket)
# load VMCI shared lib , listen on vSocket in main loop, handle requests
def handleVmciRequests(port):
skip_count = MAX_SKIP_COUNT # retries for vmci_get_one_op failures
bsize = MAX_JSON_SIZE
txt = create_string_buffer(bsize)
cartel = c_int32()
vmci_grab_listening_socket(port)
while True:
# Listening on VMCI socket
logging.debug("lib.vmci_get_one_op: waiting for new request...")
c = lib.vmci_get_one_op(g_vmci_listening_socket, byref(cartel), txt, c_int(bsize))
logging.debug("lib.vmci_get_one_op returns %d, buffer '%s'", c, txt.value)
errno = get_errno()
if errno == ECONNABORTED:
logging.warn("Client with non privileged port attempted a request")
continue
if c == VMCI_ERROR:
# We can self-correct by reoping sockets internally. Give it a chance.
logging.warning("vmci_get_one_op failed ret=%d: %s (errno=%d) Retrying...",
c, os.strerror(errno), errno)
skip_count = skip_count - 1
if skip_count <= 0:
raise Exception(
"vmci_get_one_op: too many errors. Giving up.")
continue
else:
skip_count = MAX_SKIP_COUNT # reset the counter, just in case
client_socket = c # Bind to avoid race conditions.
# Check the stop barrier - if set, fail new incoming requests and exit the loop
if stopBarrier:
svc_stop_err = 'Service is being stopped: operation declined!'
logging.warning(svc_stop_err)
send_vmci_reply(client_socket, err(svc_stop_err))
continue
if not get_si():
svc_connect_err = 'Service is presently unavailable, ensure the ESXi Host Agent is running on this host'
logging.warning(svc_connect_err)
send_vmci_reply(client_socket, err(svc_connect_err))
continue
opsCounter.incr()
# Fire a thread to execute the request
threadutils.start_new_thread(
target=execRequestThread,
args=(client_socket, cartel.value, txt.value))
# Close listening socket when the loop is over
logging.info("Closing VMCI listening socket...")
vmci_release_listening_socket()
def usage():
print("Usage: %s -p <vSocket Port to listen on>" % sys.argv[0])
def main():
log_config.configure()
logging.info("==== Starting vmdkops service ====")
logging.info("Version: %s , Pid: %d", vmdk_utils.get_version(), os.getpid() )
signal.signal(signal.SIGINT, signal_handler_stop)
signal.signal(signal.SIGTERM, signal_handler_stop)
try:
port = 1019
opts, args = getopt.getopt(sys.argv[1:], 'hp:')
except getopt.error as msg:
if msg:
logging.exception(msg)
usage()
return 1
for a, v in opts:
if a == '-p':
port = int(v)
if a == '-h':
usage()
return 0
try:
# Load and use DLL with vsocket shim to listen for docker requests
load_vmci()
kv.init()
connectLocalSi()
# start the daemon. Do all the task to start the listener through the daemon
threadutils.start_new_thread(target=vm_listener.start_vm_changelistener,
daemon=True)
handleVmciRequests(port)
except Exception as e:
logging.exception(e)
def getTaskList(prop_collector, tasks):
# Create filter
obj_specs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
property_spec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task,
pathSet=[],
all=True)
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = obj_specs
filter_spec.propSet = [property_spec]
return prop_collector.CreateFilter(filter_spec, True)
#-----------------------------------------------------------
#
# Support for 'wait for task completion'
# Keep it here to keep a single file for now
#
"""
Written by Michael Rice <[email protected]>
Github: https://github.com/michaelrice
Website: https://michaelrice.github.io/
Blog: http://www.errr-online.com/
This code has been released under the terms of the Apache 2 licenses
http://www.apache.org/licenses/LICENSE-2.0.html
Helper module for task operations.
"""
def wait_for_tasks(si, tasks):
"""Given the service instance si and tasks, it returns after all the
tasks are complete
"""
task_list = [str(task) for task in tasks]
property_collector = si.content.propertyCollector
pcfilter = getTaskList(property_collector, tasks)
try:
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while len(task_list):
update = property_collector.WaitForUpdates(version)
for filter_set in update.filterSet:
for obj_set in filter_set.objectSet:
task = obj_set.obj
for change in obj_set.changeSet:
if change.name == 'info':
state = change.val.state
elif change.name == 'info.state':
state = change.val
else:
continue
if not str(task) in task_list:
continue
if state == vim.TaskInfo.State.success:
# Remove task from taskList
task_list.remove(str(task))
elif state == vim.TaskInfo.State.error:
raise task.info.error
# Move to next version
version = update.version
finally:
if pcfilter:
pcfilter.Destroy()
#------------------------
class ValidationError(Exception):
""" An exception for option validation errors """
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
# start the server
if __name__ == "__main__":
# Setting LANG environment variable if it is unset to ensure proper encoding
if os.environ.get('LANG') is None:
os.environ['LANG'] = "en_US.UTF-8"
os.execve(__file__, sys.argv, os.environ)
main()
|
apache-2.0
|
stutivarshney/Bal-Aveksha
|
WebServer/BalAvekshaEnv/lib/python3.5/site-packages/psycopg2/tests/test_replication.py
|
8
|
7747
|
#!/usr/bin/env python
# test_replication.py - unit test for replication protocol
#
# Copyright (C) 2015 Daniele Varrazzo <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
from psycopg2.extras import (
PhysicalReplicationConnection, LogicalReplicationConnection, StopReplication)
from . import testconfig
from .testutils import unittest, ConnectingTestCase
from .testutils import skip_before_postgres, skip_if_green
skip_repl_if_green = skip_if_green("replication not supported in green mode")
class ReplicationTestCase(ConnectingTestCase):
def setUp(self):
super(ReplicationTestCase, self).setUp()
self.slot = testconfig.repl_slot
self._slots = []
def tearDown(self):
# first close all connections, as they might keep the slot(s) active
super(ReplicationTestCase, self).tearDown()
import time
time.sleep(0.025) # sometimes the slot is still active, wait a little
if self._slots:
kill_conn = self.connect()
if kill_conn:
kill_cur = kill_conn.cursor()
for slot in self._slots:
kill_cur.execute("SELECT pg_drop_replication_slot(%s)", (slot,))
kill_conn.commit()
kill_conn.close()
def create_replication_slot(self, cur, slot_name=testconfig.repl_slot, **kwargs):
cur.create_replication_slot(slot_name, **kwargs)
self._slots.append(slot_name)
def drop_replication_slot(self, cur, slot_name=testconfig.repl_slot):
cur.drop_replication_slot(slot_name)
self._slots.remove(slot_name)
# generate some events for our replication stream
def make_replication_events(self):
conn = self.connect()
if conn is None:
return
cur = conn.cursor()
try:
cur.execute("DROP TABLE dummy1")
except psycopg2.ProgrammingError:
conn.rollback()
cur.execute(
"CREATE TABLE dummy1 AS SELECT * FROM generate_series(1, 5) AS id")
conn.commit()
class ReplicationTest(ReplicationTestCase):
@skip_before_postgres(9, 0)
def test_physical_replication_connection(self):
conn = self.repl_connect(connection_factory=PhysicalReplicationConnection)
if conn is None:
return
cur = conn.cursor()
cur.execute("IDENTIFY_SYSTEM")
cur.fetchall()
@skip_before_postgres(9, 0)
def test_datestyle(self):
if testconfig.repl_dsn is None:
return self.skipTest("replication tests disabled by default")
conn = self.repl_connect(
dsn=testconfig.repl_dsn, options='-cdatestyle=german',
connection_factory=PhysicalReplicationConnection)
if conn is None:
return
cur = conn.cursor()
cur.execute("IDENTIFY_SYSTEM")
cur.fetchall()
@skip_before_postgres(9, 4)
def test_logical_replication_connection(self):
conn = self.repl_connect(connection_factory=LogicalReplicationConnection)
if conn is None:
return
cur = conn.cursor()
cur.execute("IDENTIFY_SYSTEM")
cur.fetchall()
@skip_before_postgres(9, 4) # slots require 9.4
def test_create_replication_slot(self):
conn = self.repl_connect(connection_factory=PhysicalReplicationConnection)
if conn is None:
return
cur = conn.cursor()
self.create_replication_slot(cur)
self.assertRaises(
psycopg2.ProgrammingError, self.create_replication_slot, cur)
@skip_before_postgres(9, 4) # slots require 9.4
@skip_repl_if_green
def test_start_on_missing_replication_slot(self):
conn = self.repl_connect(connection_factory=PhysicalReplicationConnection)
if conn is None:
return
cur = conn.cursor()
self.assertRaises(psycopg2.ProgrammingError,
cur.start_replication, self.slot)
self.create_replication_slot(cur)
cur.start_replication(self.slot)
@skip_before_postgres(9, 4) # slots require 9.4
@skip_repl_if_green
def test_start_and_recover_from_error(self):
conn = self.repl_connect(connection_factory=LogicalReplicationConnection)
if conn is None:
return
cur = conn.cursor()
self.create_replication_slot(cur, output_plugin='test_decoding')
# try with invalid options
cur.start_replication(
slot_name=self.slot, options={'invalid_param': 'value'})
def consume(msg):
pass
# we don't see the error from the server before we try to read the data
self.assertRaises(psycopg2.DataError, cur.consume_stream, consume)
# try with correct command
cur.start_replication(slot_name=self.slot)
@skip_before_postgres(9, 4) # slots require 9.4
@skip_repl_if_green
def test_stop_replication(self):
conn = self.repl_connect(connection_factory=LogicalReplicationConnection)
if conn is None:
return
cur = conn.cursor()
self.create_replication_slot(cur, output_plugin='test_decoding')
self.make_replication_events()
cur.start_replication(self.slot)
def consume(msg):
raise StopReplication()
self.assertRaises(StopReplication, cur.consume_stream, consume)
class AsyncReplicationTest(ReplicationTestCase):
@skip_before_postgres(9, 4) # slots require 9.4
@skip_repl_if_green
def test_async_replication(self):
conn = self.repl_connect(
connection_factory=LogicalReplicationConnection, async_=1)
if conn is None:
return
cur = conn.cursor()
self.create_replication_slot(cur, output_plugin='test_decoding')
self.wait(cur)
cur.start_replication(self.slot)
self.wait(cur)
self.make_replication_events()
self.msg_count = 0
def consume(msg):
# just check the methods
"%s: %s" % (cur.io_timestamp, repr(msg))
self.msg_count += 1
if self.msg_count > 3:
cur.send_feedback(reply=True)
raise StopReplication()
cur.send_feedback(flush_lsn=msg.data_start)
# cannot be used in asynchronous mode
self.assertRaises(psycopg2.ProgrammingError, cur.consume_stream, consume)
def process_stream():
from select import select
while True:
msg = cur.read_message()
if msg:
consume(msg)
else:
select([cur], [], [])
self.assertRaises(StopReplication, process_stream)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
|
natea/Miro-Community
|
localtv/inline_edit/urls.py
|
1
|
2444
|
# Copyright 2009 - Participatory Culture Foundation
#
# This file is part of Miro Community.
#
# Miro Community is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Miro Community is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Miro Community. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import patterns
from localtv import models
from localtv.playlists.models import Playlist
urlpatterns = patterns(
'localtv.inline_edit',
(r'^video/(?P<id>[0-9]+)/name/$', 'simple.edit_field',
{'model': models.Video, 'field': 'name'},
'localtv_admin_video_edit_name'),
(r'^video/(?P<id>[0-9]+)/when_published/$', 'simple.edit_field',
{'model': models.Video, 'field': 'when_published'},
'localtv_admin_video_edit_when_published'),
(r'^video/(?P<id>[0-9]+)/authors/$', 'simple.edit_field',
{'model': models.Video, 'field': 'authors'},
'localtv_admin_video_edit_authors'),
(r'^video/(?P<id>[0-9]+)/categories/$', 'simple.edit_field',
{'model': models.Video, 'field': 'categories'},
'localtv_admin_video_edit_categories'),
(r'^video/(?P<id>[0-9]+)/tags/$', 'simple.edit_field',
{'model': models.Video, 'field': 'tags'},
'localtv_admin_video_edit_tags'),
(r'^video/(?P<id>[0-9]+)/description/$', 'simple.edit_field',
{'model': models.Video, 'field': 'description'},
'localtv_admin_video_edit_description'),
(r'^video/(?P<id>[0-9]+)/website_url/$', 'simple.edit_field',
{'model': models.Video, 'field': 'website_url'},
'localtv_admin_video_edit_website_url'),
(r'^video/(?P<id>[0-9]+)/editors_comment/$', 'video_views.editors_comment',
{}, 'localtv_admin_video_edit_editors_comment'),
(r'^video/(?P<id>[0-9]+)/thumbnail/$', 'simple.edit_field',
{'model': models.Video, 'field': 'thumbnail'},
'localtv_admin_video_edit_thumbnail'),
(r'^playlist/([0-9]+)/info/$', 'playlist.info',
{}, 'localtv_admin_playlist_edit_info'),
)
|
agpl-3.0
|
nhicher/ansible
|
lib/ansible/module_utils/mysql.py
|
29
|
3484
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Jonathan Mainguy <[email protected]>, 2015
# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
try:
import pymysql as mysql_driver
except ImportError:
try:
import MySQLdb as mysql_driver
except ImportError:
mysql_driver = None
mysql_driver_fail_msg = 'The PyMySQL (Python 2.7 and Python 3.X) or MySQL-python (Python 2.X) module is required.'
def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None,
connect_timeout=30):
config = {}
if ssl_ca is not None or ssl_key is not None or ssl_cert is not None:
config['ssl'] = {}
if module.params['login_unix_socket']:
config['unix_socket'] = module.params['login_unix_socket']
else:
config['host'] = module.params['login_host']
config['port'] = module.params['login_port']
if os.path.exists(config_file):
config['read_default_file'] = config_file
# If login_user or login_password are given, they should override the
# config file
if login_user is not None:
config['user'] = login_user
if login_password is not None:
config['passwd'] = login_password
if ssl_cert is not None:
config['ssl']['cert'] = ssl_cert
if ssl_key is not None:
config['ssl']['key'] = ssl_key
if ssl_ca is not None:
config['ssl']['ca'] = ssl_ca
if db is not None:
config['db'] = db
if connect_timeout is not None:
config['connect_timeout'] = connect_timeout
db_connection = mysql_driver.connect(**config)
if cursor_class is not None:
return db_connection.cursor(cursorclass=mysql_driver.cursors.DictCursor)
else:
return db_connection.cursor()
|
gpl-3.0
|
andrewcmyers/tensorflow
|
tensorflow/__init__.py
|
81
|
1481
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Bring in all of the public TensorFlow interface into this
# module.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.python import *
# pylint: enable=wildcard-import
from tensorflow.python.util.lazy_loader import LazyLoader
contrib = LazyLoader('contrib', globals(), 'tensorflow.contrib')
del LazyLoader
del absolute_import
del division
del print_function
# These symbols appear because we import the python package which
# in turn imports from tensorflow.core and tensorflow.python. They
# must come from this module. So python adds these symbols for the
# resolution to succeed.
# pylint: disable=undefined-variable
del python
del core
# pylint: enable=undefined-variable
|
apache-2.0
|
meredith-digops/ansible
|
lib/ansible/modules/system/cron.py
|
24
|
25581
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2012, Dane Summers <[email protected]>
# (c) 2013, Mike Grozak <[email protected]>
# (c) 2013, Patrick Callahan <[email protected]>
# (c) 2015, Evan Kaufman <[email protected]>
# (c) 2015, Luca Berruti <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Cron Plugin: The goal of this plugin is to provide an idempotent method for
# setting up cron jobs on a host. The script will play well with other manually
# entered crons. Each cron job entered will be preceded with a comment
# describing the job so that it can be found later, which is required to be
# present in order for this plugin to find/modify the job.
#
# This module is based on python-crontab by Martin Owens.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = """
---
module: cron
short_description: Manage cron.d and crontab entries.
description:
- Use this module to manage crontab and environment variables entries. This module allows
you to create environment variables and named crontab entries, update, or delete them.
- 'When crontab jobs are managed: the module includes one line with the description of the
crontab entry C("#Ansible: <name>") corresponding to the "name" passed to the module,
which is used by future ansible/module calls to find/check the state. The "name"
parameter should be unique, and changing the "name" value will result in a new cron
task being created (or a different one being removed).'
- 'When environment variables are managed: no comment line is added, but, when the module
needs to find/check the state, it uses the "name" parameter to find the environment
variable definition line.'
- 'When using symbols such as %, they must be properly escaped.'
version_added: "0.9"
options:
name:
description:
- Description of a crontab entry or, if env is set, the name of environment variable.
Required if state=absent. Note that if name is not set and state=present, then a
new crontab entry will always be created, regardless of existing ones.
default: null
required: false
user:
description:
- The specific user whose crontab should be modified.
required: false
default: root
job:
description:
- The command to execute or, if env is set, the value of environment variable.
Required if state=present.
required: false
aliases: ['value']
default: null
state:
description:
- Whether to ensure the job or environment variable is present or absent.
required: false
default: present
choices: [ "present", "absent" ]
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
If this is a relative path, it is interpreted with respect to
/etc/cron.d. (If it is absolute, it will typically be /etc/crontab).
To use the C(cron_file) parameter you must specify the C(user) as well.
required: false
default: null
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup_file) variable by this module.
required: false
choices: [ "yes", "no" ]
default: no
minute:
description:
- Minute when the job should run ( 0-59, *, */2, etc )
required: false
default: "*"
hour:
description:
- Hour when the job should run ( 0-23, *, */2, etc )
required: false
default: "*"
day:
description:
- Day of the month the job should run ( 1-31, *, */2, etc )
required: false
default: "*"
aliases: [ "dom" ]
month:
description:
- Month of the year the job should run ( 1-12, *, */2, etc )
required: false
default: "*"
weekday:
description:
- Day of the week that the job should run ( 0-6 for Sunday-Saturday, *, etc )
required: false
default: "*"
aliases: [ "dow" ]
reboot:
description:
- If the job should be run at reboot. This option is deprecated. Users should use special_time.
version_added: "1.0"
required: false
default: "no"
choices: [ "yes", "no" ]
special_time:
description:
- Special time specification nickname.
version_added: "1.3"
required: false
default: null
choices: [ "reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly" ]
disabled:
description:
- If the job should be disabled (commented out) in the crontab. Only has effect if state=present
version_added: "2.0"
required: false
default: false
env:
description:
- If set, manages a crontab's environment variable. New variables are added on top of crontab.
"name" and "value" parameters are the name and the value of environment variable.
version_added: "2.1"
required: false
default: "no"
choices: [ "yes", "no" ]
insertafter:
description:
- Used with C(state=present) and C(env). If specified, the environment variable will be
inserted after the declaration of specified environment variable.
version_added: "2.1"
required: false
default: null
insertbefore:
description:
- Used with C(state=present) and C(env). If specified, the environment variable will be
inserted before the declaration of specified environment variable.
version_added: "2.1"
required: false
default: null
requirements:
- cron
author:
- "Dane Summers (@dsummersl)"
- 'Mike Grozak'
- 'Patrick Callahan'
- 'Evan Kaufman (@EvanK)'
- 'Luca Berruti (@lberruti)'
"""
EXAMPLES = '''
# Ensure a job that runs at 2 and 5 exists.
# Creates an entry like "0 5,2 * * ls -alh > /dev/null"
- cron:
name: "check dirs"
minute: "0"
hour: "5,2"
job: "ls -alh > /dev/null"
# Ensure an old job is no longer present. Removes any job that is prefixed
# by "#Ansible: an old job" from the crontab
- cron:
name: "an old job"
state: absent
# Creates an entry like "@reboot /some/job.sh"
- cron:
name: "a job for reboot"
special_time: reboot
job: "/some/job.sh"
# Creates an entry like "PATH=/opt/bin" on top of crontab
- cron:
name: PATH
env: yes
value: /opt/bin
# Creates an entry like "APP_HOME=/srv/app" and insert it after PATH
# declaration
- cron:
name: APP_HOME
env: yes
value: /srv/app
insertafter: PATH
# Creates a cron file under /etc/cron.d
- cron:
name: yum autoupdate
weekday: 2
minute: 0
hour: 12
user: root
job: "YUMINTERACTIVE: 0 /usr/sbin/yum-autoupdate"
cron_file: ansible_yum-autoupdate
# Removes a cron file from under /etc/cron.d
- cron:
name: "yum autoupdate"
cron_file: ansible_yum-autoupdate
state: absent
# Removes "APP_HOME" environment variable from crontab
- cron:
name: APP_HOME
env: yes
state: absent
'''
import os
import pwd
import re
import tempfile
import platform
import pipes
CRONCMD = "/usr/bin/crontab"
class CronTabError(Exception):
pass
class CronTab(object):
"""
CronTab object to write time based crontab file
user - the user of the crontab (defaults to root)
cron_file - a cron file under /etc/cron.d, or an absolute path
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.root = (os.getuid() == 0)
self.lines = None
self.ansible = "#Ansible: "
self.existing = ''
if cron_file:
if os.path.isabs(cron_file):
self.cron_file = cron_file
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.cron_file, 'r')
self.existing = f.read()
self.lines = self.existing.splitlines()
f.close()
except IOError:
# cron file does not exist
return
except:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
self.existing = out
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
not re.match( r'# \(/tmp/.*installed on.*\)', l) and
not re.match( r'# \(.*version.*\)', l)):
self.lines.append(l)
else:
pattern = re.escape(l) + '[\r\n]?'
self.existing = re.sub(pattern, '', self.existing, 1)
count += 1
def is_empty(self):
if len(self.lines) == 0:
return True
else:
return False
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'w')
elif self.cron_file:
fileh = open(self.cron_file, 'w')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
os.chmod(path, int('0644', 8))
fileh = os.fdopen(filed, 'w')
fileh.write(self.render())
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
# set SELinux permissions
if self.module.selinux_enabled() and self.cron_file:
self.module.set_default_selinux_context(self.cron_file, False)
def do_comment(self, name):
return "%s%s" % (self.ansible, name)
def add_job(self, name, job):
# Add the comment
self.lines.append(self.do_comment(name))
# Add the job
self.lines.append("%s" % (job))
def update_job(self, name, job):
return self._update_job(name, job, self.do_add_job)
def do_add_job(self, lines, comment, job):
lines.append(comment)
lines.append("%s" % (job))
def remove_job(self, name):
return self._update_job(name, "", self.do_remove_job)
def do_remove_job(self, lines, comment, job):
return None
def add_env(self, decl, insertafter=None, insertbefore=None):
if not (insertafter or insertbefore):
self.lines.insert(0, decl)
return
if insertafter:
other_name = insertafter
elif insertbefore:
other_name = insertbefore
other_decl = self.find_env(other_name)
if len(other_decl) > 0:
if insertafter:
index = other_decl[0]+1
elif insertbefore:
index = other_decl[0]
self.lines.insert(index, decl)
return
self.module.fail_json(msg="Variable named '%s' not found." % other_name)
def update_env(self, name, decl):
return self._update_env(name, decl, self.do_add_env)
def do_add_env(self, lines, decl):
lines.append(decl)
def remove_env(self, name):
return self._update_env(name, '', self.do_remove_env)
def do_remove_env(self, lines, decl):
return None
def remove_job_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
# cron file does not exist
return False
except:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
def find_job(self, name, job=None):
# attempt to find job by 'Ansible:' header comment
comment = None
for l in self.lines:
if comment is not None:
if comment == name:
return [comment, l]
else:
comment = None
elif re.match( r'%s' % self.ansible, l):
comment = re.sub( r'%s' % self.ansible, '', l)
# failing that, attempt to find job by exact match
if job:
for i, l in enumerate(self.lines):
if l == job:
# if no leading ansible header, insert one
if not re.match( r'%s' % self.ansible, self.lines[i-1]):
self.lines.insert(i, self.do_comment(name))
return [self.lines[i], l, True]
# if a leading blank ansible header AND job has a name, update header
elif name and self.lines[i-1] == self.do_comment(None):
self.lines[i-1] = self.do_comment(name)
return [self.lines[i-1], l, True]
return []
def find_env(self, name):
for index, l in enumerate(self.lines):
if re.match( r'^%s=' % name, l):
return [index, l]
return []
def get_cron_job(self,minute,hour,day,month,weekday,job,special,disabled):
# normalize any leading/trailing newlines (ansible/ansible-modules-core#3791)
job = job.strip('\r\n')
if disabled:
disable_prefix = '#'
else:
disable_prefix = ''
if special:
if self.cron_file:
return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
else:
return "%s@%s %s" % (disable_prefix, special, job)
else:
if self.cron_file:
return "%s%s %s %s %s %s %s %s" % (disable_prefix,minute,hour,day,month,weekday,self.user,job)
else:
return "%s%s %s %s %s %s %s" % (disable_prefix,minute,hour,day,month,weekday,job)
return None
def get_jobnames(self):
jobnames = []
for l in self.lines:
if re.match( r'%s' % self.ansible, l):
jobnames.append(re.sub( r'%s' % self.ansible, '', l))
return jobnames
def get_envnames(self):
envnames = []
for l in self.lines:
if re.match( r'^\S+=' , l):
envnames.append(l.split('=')[0])
return envnames
def _update_job(self, name, job, addlinesfunction):
ansiblename = self.do_comment(name)
newlines = []
comment = None
for l in self.lines:
if comment is not None:
addlinesfunction(newlines, comment, job)
comment = None
elif l == ansiblename:
comment = l
else:
newlines.append(l)
self.lines = newlines
if len(newlines) == 0:
return True
else:
return False # TODO add some more error testing
def _update_env(self, name, decl, addenvfunction):
newlines = []
for l in self.lines:
if re.match( r'^%s=' % name, l):
addenvfunction(newlines, decl)
else:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render this crontab as it would be in the crontab.
"""
crons = []
for cron in self.lines:
crons.append(cron)
result = '\n'.join(crons)
if result:
result = result.rstrip('\r\n') + '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD))
elif platform.system() == 'AIX':
return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, pipes.quote(path))
#==================================================
def main():
# The following example playbooks:
#
# - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
#
# - name: do the job
# cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
#
# - name: no job
# cron: name="an old job" state=absent
#
# - name: sets env
# cron: name="PATH" env=yes value="/bin:/usr/bin"
#
# Would produce:
# PATH=/bin:/usr/bin
# # Ansible: check dirs
# * * 5,2 * * ls -alh > /dev/null
# # Ansible: do the job
# * * 5,2 * * /some/dir/job.sh
module = AnsibleModule(
argument_spec = dict(
name=dict(required=False),
user=dict(required=False),
job=dict(required=False, aliases=['value']),
cron_file=dict(required=False),
state=dict(default='present', choices=['present', 'absent']),
backup=dict(default=False, type='bool'),
minute=dict(default='*'),
hour=dict(default='*'),
day=dict(aliases=['dom'], default='*'),
month=dict(default='*'),
weekday=dict(aliases=['dow'], default='*'),
reboot=dict(required=False, default=False, type='bool'),
special_time=dict(required=False,
default=None,
choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"],
type='str'),
disabled=dict(default=False, type='bool'),
env=dict(required=False, type='bool'),
insertafter=dict(required=False),
insertbefore=dict(required=False),
),
supports_check_mode = True,
mutually_exclusive=[
['reboot', 'special_time'],
['insertafter', 'insertbefore'],
]
)
name = module.params['name']
user = module.params['user']
job = module.params['job']
cron_file = module.params['cron_file']
state = module.params['state']
backup = module.params['backup']
minute = module.params['minute']
hour = module.params['hour']
day = module.params['day']
month = module.params['month']
weekday = module.params['weekday']
reboot = module.params['reboot']
special_time = module.params['special_time']
disabled = module.params['disabled']
env = module.params['env']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
do_install = state == 'present'
changed = False
res_args = dict()
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
crontab = CronTab(module, user, cron_file)
module.debug('cron instantiated - name: "%s"' % name)
if module._diff:
diff = dict()
diff['before'] = crontab.existing
if crontab.cron_file:
diff['before_header'] = crontab.cron_file
else:
if crontab.user:
diff['before_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['before_header'] = 'crontab'
# --- user input validation ---
if (special_time or reboot) and \
(True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
module.fail_json(msg="You must specify time and date fields or special time.")
if cron_file and do_install:
if not user:
module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
if job is None and do_install:
module.fail_json(msg="You must specify 'job' to install a new cron job or variable")
if (insertafter or insertbefore) and not env and do_install:
module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")
if reboot:
special_time = "reboot"
# if requested make a backup before making a change
if backup and not module.check_mode:
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
if crontab.cron_file and not name and not do_install:
if module._diff:
diff['after'] = ''
diff['after_header'] = '/dev/null'
else:
diff = dict()
if module.check_mode:
changed = os.path.isfile(crontab.cron_file)
else:
changed = crontab.remove_job_file()
module.exit_json(changed=changed,cron_file=cron_file,state=state,diff=diff)
if env:
if ' ' in name:
module.fail_json(msg="Invalid name for environment variable")
decl = '%s="%s"' % (name, job)
old_decl = crontab.find_env(name)
if do_install:
if len(old_decl) == 0:
crontab.add_env(decl, insertafter, insertbefore)
changed = True
if len(old_decl) > 0 and old_decl[1] != decl:
crontab.update_env(name, decl)
changed = True
else:
if len(old_decl) > 0:
crontab.remove_env(name)
changed = True
else:
if do_install:
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
old_job = crontab.find_job(name, job)
if len(old_job) == 0:
crontab.add_job(name, job)
changed = True
if len(old_job) > 0 and old_job[1] != job:
crontab.update_job(name, job)
changed = True
if len(old_job) > 2:
crontab.update_job(name, job)
changed = True
else:
old_job = crontab.find_job(name)
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
# no changes to env/job, but existing crontab needs a terminating newline
if not changed and not crontab.existing == '':
if not (crontab.existing.endswith('\r') or crontab.existing.endswith('\n')):
changed = True
res_args = dict(
jobs = crontab.get_jobnames(),
envs = crontab.get_envnames(),
changed = changed
)
if changed:
if not module.check_mode:
crontab.write()
if module._diff:
diff['after'] = crontab.render()
if crontab.cron_file:
diff['after_header'] = crontab.cron_file
else:
if crontab.user:
diff['after_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['after_header'] = 'crontab'
res_args['diff'] = diff
# retain the backup only if crontab or cron file have changed
if backup:
if changed:
res_args['backup_file'] = backup_file
else:
if not module.check_mode:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
# --- should never get here
module.exit_json(msg="Unable to execute cron task.")
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
mindnervestech/mnrp
|
openerp/__init__.py
|
100
|
3416
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" OpenERP core library."""
#----------------------------------------------------------
# Running mode flags (gevent, prefork)
#----------------------------------------------------------
# Is the server running with gevent.
import sys
evented = False
if sys.modules.get("gevent") is not None:
evented = True
# Is the server running in pefork mode (e.g. behind Gunicorn).
# If this is True, the processes have to communicate some events,
# e.g. database update or cache invalidation. Each process has also
# its own copy of the data structure and we don't need to care about
# locks between threads.
multi_process = False
#----------------------------------------------------------
# libc UTC hack
#----------------------------------------------------------
# Make sure the OpenERP server runs in UTC. This is especially necessary
# under Windows as under Linux it seems the real import of time is
# sufficiently deferred so that setting the TZ environment variable
# in openerp.cli.server was working.
import os
os.environ['TZ'] = 'UTC' # Set the timezone...
import time # ... *then* import time.
del os
del time
#----------------------------------------------------------
# Shortcuts
#----------------------------------------------------------
# The hard-coded super-user id (a.k.a. administrator, or root user).
SUPERUSER_ID = 1
def registry(database_name):
"""
Return the model registry for the given database. If the registry does not
exist yet, it is created on the fly.
"""
return modules.registry.RegistryManager.get(database_name)
#----------------------------------------------------------
# Imports
#----------------------------------------------------------
import addons
import conf
import loglevels
import modules
import netsvc
import osv
import pooler
import release
import report
import service
import sql_db
import tools
import workflow
#----------------------------------------------------------
# Model classes, fields, api decorators, and translations
#----------------------------------------------------------
from . import models
from . import fields
from . import api
from openerp.tools.translate import _
#----------------------------------------------------------
# Other imports, which may require stuff from above
#----------------------------------------------------------
import cli
import http
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
songmonit/CTTMSONLINE
|
addons/base_report_designer/__init__.py
|
421
|
1136
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import base_report_designer
import installer
import openerp_sxw2rml
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
yingcuhk/LeetCode
|
Algorithms/#303 Range Sum Query - Immutable/PythonCode.py
|
1
|
1082
|
"""
Given an integer array nums, find the sum of the elements between indices i and j (i ¡Ü j), inclusive.
Example:
Given nums = [-2, 0, 3, -5, 2, -1]
sumRange(0, 2) -> 1
sumRange(2, 5) -> -1
sumRange(0, 5) -> -3
Note:
You may assume that the array does not change.
There are many calls to sumRange function.
"""
class NumArray(object):
def __init__(self, nums):
"""
initialize your data structure here.
:type nums: List[int]
"""
#self.nums = nums
L = len(nums)
CumSum = [0 for i in xrange(L+1)]
for i in range(1,L+1):
CumSum[i] = CumSum[i-1]+nums[i-1]
#print CumSum
self.CumSum = CumSum
def sumRange(self, i, j):
"""
sum of elements nums[i..j], inclusive.
:type i: int
:type j: int
:rtype: int
"""
return self.CumSum[j+1] - self.CumSum[i]
# Your NumArray object will be instantiated and called as such:
# numArray = NumArray(nums)
# numArray.sumRange(0, 1)
# numArray.sumRange(1, 2)
|
mit
|
geeknoid/api
|
python/istio_api/mesh/v1alpha1/operator_pb2.py
|
1
|
14150
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mesh/v1alpha1/operator.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mesh.v1alpha1 import config_pb2 as mesh_dot_v1alpha1_dot_config__pb2
from mesh.v1alpha1 import component_pb2 as mesh_dot_v1alpha1_dot_component__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mesh/v1alpha1/operator.proto',
package='istio.mesh.v1alpha1',
syntax='proto3',
serialized_options=_b('Z\032istio.io/api/mesh/v1alpha1'),
serialized_pb=_b('\n\x1cmesh/v1alpha1/operator.proto\x12\x13istio.mesh.v1alpha1\x1a\x1amesh/v1alpha1/config.proto\x1a\x1dmesh/v1alpha1/component.proto\"\xca\x06\n\x11IstioOperatorSpec\x12\x0f\n\x07profile\x18\n \x01(\t\x12\x1c\n\x14install_package_path\x18\x0b \x01(\t\x12\x0b\n\x03hub\x18\x0c \x01(\t\x12\x0b\n\x03tag\x18\r \x01(\t\x12\x17\n\x0fresource_suffix\x18\x0e \x01(\t\x12\x34\n\x0bmesh_config\x18( \x01(\x0b\x32\x1f.istio.mesh.v1alpha1.MeshConfig\x12>\n\ncomponents\x18\x32 \x01(\x0b\x32*.istio.mesh.v1alpha1.IstioComponentSetSpec\x12;\n\x06values\x18\x64 \x01(\x0b\x32+.istio.mesh.v1alpha1.TypeMapStringInterface\x12G\n\x12unvalidated_values\x18\x65 \x01(\x0b\x32+.istio.mesh.v1alpha1.TypeMapStringInterface\x12>\n\x06status\x18\xc8\x01 \x01(\x0e\x32-.istio.mesh.v1alpha1.IstioOperatorSpec.Status\x12V\n\x10\x63omponent_status\x18\xc9\x01 \x03(\x0b\x32;.istio.mesh.v1alpha1.IstioOperatorSpec.ComponentStatusEntry\x1a\x85\x01\n\rVersionStatus\x12\x0f\n\x07version\x18\x01 \x01(\t\x12=\n\x06status\x18\x02 \x01(\x0e\x32-.istio.mesh.v1alpha1.IstioOperatorSpec.Status\x12\x15\n\rstatus_string\x18\x03 \x01(\t\x12\r\n\x05\x65rror\x18\x04 \x01(\t\x1al\n\x14\x43omponentStatusEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x43\n\x05value\x18\x02 \x01(\x0b\x32\x34.istio.mesh.v1alpha1.IstioOperatorSpec.VersionStatus:\x02\x38\x01\"I\n\x06Status\x12\x08\n\x04NONE\x10\x00\x12\x0c\n\x08UPDATING\x10\x01\x12\x0f\n\x0bRECONCILING\x10\x02\x12\x0b\n\x07HEALTHY\x10\x03\x12\t\n\x05\x45RROR\x10\x04\x42\x1cZ\x1aistio.io/api/mesh/v1alpha1b\x06proto3')
,
dependencies=[mesh_dot_v1alpha1_dot_config__pb2.DESCRIPTOR,mesh_dot_v1alpha1_dot_component__pb2.DESCRIPTOR,])
_ISTIOOPERATORSPEC_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='istio.mesh.v1alpha1.IstioOperatorSpec.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPDATING', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RECONCILING', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HEALTHY', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=882,
serialized_end=955,
)
_sym_db.RegisterEnumDescriptor(_ISTIOOPERATORSPEC_STATUS)
_ISTIOOPERATORSPEC_VERSIONSTATUS = _descriptor.Descriptor(
name='VersionStatus',
full_name='istio.mesh.v1alpha1.IstioOperatorSpec.VersionStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.VersionStatus.version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.VersionStatus.status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status_string', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.VersionStatus.status_string', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.VersionStatus.error', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=637,
serialized_end=770,
)
_ISTIOOPERATORSPEC_COMPONENTSTATUSENTRY = _descriptor.Descriptor(
name='ComponentStatusEntry',
full_name='istio.mesh.v1alpha1.IstioOperatorSpec.ComponentStatusEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.ComponentStatusEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.ComponentStatusEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=772,
serialized_end=880,
)
_ISTIOOPERATORSPEC = _descriptor.Descriptor(
name='IstioOperatorSpec',
full_name='istio.mesh.v1alpha1.IstioOperatorSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='profile', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.profile', index=0,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='install_package_path', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.install_package_path', index=1,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hub', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.hub', index=2,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tag', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.tag', index=3,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resource_suffix', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.resource_suffix', index=4,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mesh_config', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.mesh_config', index=5,
number=40, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='components', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.components', index=6,
number=50, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='values', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.values', index=7,
number=100, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unvalidated_values', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.unvalidated_values', index=8,
number=101, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.status', index=9,
number=200, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='component_status', full_name='istio.mesh.v1alpha1.IstioOperatorSpec.component_status', index=10,
number=201, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ISTIOOPERATORSPEC_VERSIONSTATUS, _ISTIOOPERATORSPEC_COMPONENTSTATUSENTRY, ],
enum_types=[
_ISTIOOPERATORSPEC_STATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=955,
)
_ISTIOOPERATORSPEC_VERSIONSTATUS.fields_by_name['status'].enum_type = _ISTIOOPERATORSPEC_STATUS
_ISTIOOPERATORSPEC_VERSIONSTATUS.containing_type = _ISTIOOPERATORSPEC
_ISTIOOPERATORSPEC_COMPONENTSTATUSENTRY.fields_by_name['value'].message_type = _ISTIOOPERATORSPEC_VERSIONSTATUS
_ISTIOOPERATORSPEC_COMPONENTSTATUSENTRY.containing_type = _ISTIOOPERATORSPEC
_ISTIOOPERATORSPEC.fields_by_name['mesh_config'].message_type = mesh_dot_v1alpha1_dot_config__pb2._MESHCONFIG
_ISTIOOPERATORSPEC.fields_by_name['components'].message_type = mesh_dot_v1alpha1_dot_component__pb2._ISTIOCOMPONENTSETSPEC
_ISTIOOPERATORSPEC.fields_by_name['values'].message_type = mesh_dot_v1alpha1_dot_component__pb2._TYPEMAPSTRINGINTERFACE
_ISTIOOPERATORSPEC.fields_by_name['unvalidated_values'].message_type = mesh_dot_v1alpha1_dot_component__pb2._TYPEMAPSTRINGINTERFACE
_ISTIOOPERATORSPEC.fields_by_name['status'].enum_type = _ISTIOOPERATORSPEC_STATUS
_ISTIOOPERATORSPEC.fields_by_name['component_status'].message_type = _ISTIOOPERATORSPEC_COMPONENTSTATUSENTRY
_ISTIOOPERATORSPEC_STATUS.containing_type = _ISTIOOPERATORSPEC
DESCRIPTOR.message_types_by_name['IstioOperatorSpec'] = _ISTIOOPERATORSPEC
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IstioOperatorSpec = _reflection.GeneratedProtocolMessageType('IstioOperatorSpec', (_message.Message,), {
'VersionStatus' : _reflection.GeneratedProtocolMessageType('VersionStatus', (_message.Message,), {
'DESCRIPTOR' : _ISTIOOPERATORSPEC_VERSIONSTATUS,
'__module__' : 'mesh.v1alpha1.operator_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.IstioOperatorSpec.VersionStatus)
})
,
'ComponentStatusEntry' : _reflection.GeneratedProtocolMessageType('ComponentStatusEntry', (_message.Message,), {
'DESCRIPTOR' : _ISTIOOPERATORSPEC_COMPONENTSTATUSENTRY,
'__module__' : 'mesh.v1alpha1.operator_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.IstioOperatorSpec.ComponentStatusEntry)
})
,
'DESCRIPTOR' : _ISTIOOPERATORSPEC,
'__module__' : 'mesh.v1alpha1.operator_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.IstioOperatorSpec)
})
_sym_db.RegisterMessage(IstioOperatorSpec)
_sym_db.RegisterMessage(IstioOperatorSpec.VersionStatus)
_sym_db.RegisterMessage(IstioOperatorSpec.ComponentStatusEntry)
DESCRIPTOR._options = None
_ISTIOOPERATORSPEC_COMPONENTSTATUSENTRY._options = None
# @@protoc_insertion_point(module_scope)
|
apache-2.0
|
vascotenner/holoviews
|
holoviews/plotting/mpl/annotation.py
|
1
|
3913
|
import matplotlib
from matplotlib import patches as patches
from ...core.util import match_spec
from ...core.options import abbreviated_exception
from .element import ElementPlot
class AnnotationPlot(ElementPlot):
"""
AnnotationPlot handles the display of all annotation elements.
"""
def __init__(self, annotation, **params):
self._annotation = annotation
super(AnnotationPlot, self).__init__(annotation, **params)
self.handles['annotations'] = []
def initialize_plot(self, ranges=None):
annotation = self.hmap.last
key = self.keys[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = match_spec(annotation, ranges)
axis = self.handles['axis']
opts = self.style[self.cyclic_index]
with abbreviated_exception():
handles = self.draw_annotation(axis, annotation.data, opts)
self.handles['annotations'] = handles
return self._finalize_axis(key, ranges=ranges)
def update_handles(self, key, axis, annotation, ranges, style):
# Clear all existing annotations
for element in self.handles['annotations']:
element.remove()
with abbreviated_exception():
self.handles['annotations'] = self.draw_annotation(axis, annotation.data, style)
class VLinePlot(AnnotationPlot):
"Draw a vertical line on the axis"
style_opts = ['alpha', 'color', 'linewidth', 'linestyle', 'visible']
def draw_annotation(self, axis, position, opts):
return [axis.axvline(position, **opts)]
class HLinePlot(AnnotationPlot):
"Draw a horizontal line on the axis"
style_opts = ['alpha', 'color', 'linewidth', 'linestyle', 'visible']
def draw_annotation(self, axis, position, opts):
"Draw a horizontal line on the axis"
return [axis.axhline(position, **opts)]
class TextPlot(AnnotationPlot):
"Draw the Text annotation object"
style_opts = ['alpha', 'color', 'family', 'weight', 'rotation', 'fontsize', 'visible']
def draw_annotation(self, axis, data, opts):
(x,y, text, fontsize,
horizontalalignment, verticalalignment, rotation) = data
opts['fontsize'] = fontsize
return [axis.text(x,y, text,
horizontalalignment = horizontalalignment,
verticalalignment = verticalalignment,
rotation=rotation, **opts)]
class ArrowPlot(AnnotationPlot):
"Draw an arrow using the information supplied to the Arrow annotation"
_arrow_style_opts = ['alpha', 'color', 'lw', 'linewidth', 'visible']
_text_style_opts = TextPlot.style_opts
style_opts = sorted(set(_arrow_style_opts + _text_style_opts))
def draw_annotation(self, axis, data, opts):
direction, text, xy, points, arrowstyle = data
arrowprops = dict({'arrowstyle':arrowstyle},
**{k: opts[k] for k in self._arrow_style_opts if k in opts})
textopts = {k: opts[k] for k in self._text_style_opts if k in opts}
if direction in ['v', '^']:
xytext = (0, points if direction=='v' else -points)
elif direction in ['>', '<']:
xytext = (points if direction=='<' else -points, 0)
return [axis.annotate(text, xy=xy, textcoords='offset points',
xytext=xytext, ha="center", va="center",
arrowprops=arrowprops, **textopts)]
class SplinePlot(AnnotationPlot):
"Draw the supplied Spline annotation (see Spline docstring)"
style_opts = ['alpha', 'edgecolor', 'linewidth', 'linestyle', 'visible']
def draw_annotation(self, axis, data, opts):
verts, codes = data
patch = patches.PathPatch(matplotlib.path.Path(verts, codes),
facecolor='none', **opts)
axis.add_patch(patch)
return [patch]
|
bsd-3-clause
|
petebachant/pyqtgraph
|
pyqtgraph/tests/test_ref_cycles.py
|
25
|
2499
|
"""
Test for unwanted reference cycles
"""
import pyqtgraph as pg
import numpy as np
import gc, weakref
import six
import pytest
app = pg.mkQApp()
skipreason = ('unclear why test is failing on python 3. skipping until someone '
'has time to fix it. Or pyside is being used. This test is '
'failing on pyside for an unknown reason too.')
def assert_alldead(refs):
for ref in refs:
assert ref() is None
def qObjectTree(root):
"""Return root and its entire tree of qobject children"""
childs = [root]
for ch in pg.QtCore.QObject.children(root):
childs += qObjectTree(ch)
return childs
def mkrefs(*objs):
"""Return a list of weakrefs to each object in *objs.
QObject instances are expanded to include all child objects.
"""
allObjs = {}
for obj in objs:
if isinstance(obj, pg.QtCore.QObject):
obj = qObjectTree(obj)
else:
obj = [obj]
for o in obj:
allObjs[id(o)] = o
return map(weakref.ref, allObjs.values())
@pytest.mark.skipif(six.PY3 or pg.Qt.USE_PYSIDE, reason=skipreason)
def test_PlotWidget():
def mkobjs(*args, **kwds):
w = pg.PlotWidget(*args, **kwds)
data = pg.np.array([1,5,2,4,3])
c = w.plot(data, name='stuff')
w.addLegend()
# test that connections do not keep objects alive
w.plotItem.vb.sigRangeChanged.connect(mkrefs)
app.focusChanged.connect(w.plotItem.vb.invertY)
# return weakrefs to a bunch of objects that should die when the scope exits.
return mkrefs(w, c, data, w.plotItem, w.plotItem.vb, w.plotItem.getMenu(), w.plotItem.getAxis('left'))
for i in range(5):
assert_alldead(mkobjs())
@pytest.mark.skipif(six.PY3 or pg.Qt.USE_PYSIDE, reason=skipreason)
def test_ImageView():
def mkobjs():
iv = pg.ImageView()
data = np.zeros((10,10,5))
iv.setImage(data)
return mkrefs(iv, iv.imageItem, iv.view, iv.ui.histogram, data)
for i in range(5):
assert_alldead(mkobjs())
@pytest.mark.skipif(six.PY3 or pg.Qt.USE_PYSIDE, reason=skipreason)
def test_GraphicsWindow():
def mkobjs():
w = pg.GraphicsWindow()
p1 = w.addPlot()
v1 = w.addViewBox()
return mkrefs(w, p1, v1)
for i in range(5):
assert_alldead(mkobjs())
if __name__ == '__main__':
ot = test_PlotItem()
|
mit
|
gerald-yang/ubuntu-iotivity-demo
|
snappy/grovepi/python-env/lib/python2.7/encodings/euc_jisx0213.py
|
816
|
1051
|
#
# euc_jisx0213.py: Python Unicode Codec for EUC_JISX0213
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
apache-2.0
|
ronniehd/repository.ronniehd
|
plugin.video.TvGratis/vipracing.py
|
42
|
18950
|
import math
import urllib, urllib2
import re
def vip_unlockmeta(meta):
d=''
for i in range(0, len(meta)):
if (i % 3 == 0):
d += "%";
else:
d += meta[i];
return urllib.unquote(d);
def get_html(meta,data):
meta_un=vip_unlockmeta(meta)
# print meta_un;
# return
oo=''
x=data
l = len(x)
b = 1024.0
i, j, r, p = 0,0,0,0
s = 0
w = 0
str_pattern='Array\((.*?)\)'
array_val=re.compile(str_pattern).findall(meta_un)[0]
t_string = 't=['+array_val+']'
exec(t_string)
# print t_string
# return
#print math.ceil(l / b)
#print t
for j in range(int(math.ceil(l / b)), 0, -1):
r = '';
# for (i = ath.min(l, b); i > 0; i--, l--):
for i in range( int(min(l, b)),0, -1):
# w |= (t[ ord(x[p]) - 48]) << s;
# print i-1024, p
w |= (t[ ord(x[p]) - 48]) << s;
p+=1;
if (s):
r += chr(165 ^ w & 255);
w >>= 8;
s -= 2
else:
s = 6
l-=1
oo += r
return oo
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link;
def decrypt_vipracing(page_url, justHtml=False,doDecrypt=True,ref=None):
if ref:
headers=[('Referer',ref)]
page_data=getUrl(page_url,headers=headers)
else:
page_data=getUrl(page_url)
url=page_url
if doDecrypt:
str_pattern='src="(.*?(\/embed).*?)"'
url=re.compile(str_pattern).findall(page_data)[0][0]
#print url
meta,data='',''
headers=[('Referer',page_url)]
html=getUrl(url,headers=headers)
str_pattern='\'(http.*?)\''
url=re.compile(str_pattern).findall(html)[0]
html=getUrl(url,headers=headers)
# print html
str_pattern='c=\"(.*?)\"'
meta=re.compile(str_pattern).findall(html)
if len(meta)>0 and len(meta[0])>0 and 'streamer' not in html:
meta=meta[0]
str_pattern='x\(\"(.*?)\"\)'
data=re.compile(str_pattern).findall(html)[0]
#meta="x66p75X6eE63S74j69x6fR6eC20k78r28J78v29r7bu76V61O72I20Q6ct3de78T2eh6cI65O6eZ67b74y68l2cD62e3d@31Z30Z32Z34t2cG69b2cU6af2cc72N2cd70e3dk30K2c_73h3dK30u2cr77M3dw30n2cB74M3dN41p72r72_61a79L28H36j33N2cF34n32N2cW31_35x2cC35K33e2cQ35f34H2ci31F34r2ct34I31b2cj32E39P2cH38z2cQ31B36Y2cR30R2cV30o2cJ30d2cj30n2cz30p2ca30R2c_39e2cI31q31F2cc31q2cj35U35D2cm33R38h2cN31i37_2cx34D35E2cR35T2cf35f32o2cA34h36M2cb33_32_2cs32v35a2ci32T37k2cW32U36g2cA34W37a2cu36V32s2ch32B34P2cB33v30G2cm33Q2c_33n37L2cR37Y2cW34t30J2cW32X38F2cs33J39v2cj35T37U2cw36A31Q2cZ35j39Z2cQ30D2ck30A2cG30_2cD30M2cE33K31@2cI30M2cL31M38h2cM35r31u2cy30E2cH34u2cT32q30J2ch33r34l2cV31e32d2cM31u39n2ch36C2cO33z36e2cI36p30U2cX32E2cf32y33V2cU34M33b2cs35b38J2cX31x30G2cv32u31d2cr33k33K2cD34T34G2cD35C30C2cG32H32_2cX34V39t2ce34L38t2cx33R35f2cm31K33P2cs35k36S29M3bg66a6fz72_28a6aW3da4do61d74o68d2eK63E65n69e6cb28o6cq2ff62C29M3bX6ag3el30Z3bh6an2dv2dN29Q7bC72Q3dw27z27f3bs66y6fL72@28_69K3dN4dh61C74S68Y2ed6dQ69D6eY28n6cs2ct62Y29H3bi69T3ez30w3ba69_2dw2dF2cy6cx2dP2dX29Z7bZ77z7cw3dC28y74Y5bs78r2eY63r68y61L72Y43l6fh64g65f41J74B28u70e2bX2bi29g2dQ34B38l5dJ29U3cZ3cS73g3bi69g66z28q73Q29o7bZ72A2bq3di53S74E72j69s6eK67b2eI66O72C6fC6dF43A68_61N72j43u6fJ64s65v28O31c36o35W5en77_26P32V35@35V29U3bo77U3eu3ek3dg38D3bj73_2d_3dI32v7dG65u6cS73x65r7bC73W3dr36T7dT7dl64e6fe63_75q6df65J6eg74Q2eX77Y72B69n74t65_28E72B29m7d@7d";
#data="ND@r8f8XB_VtpLsbqWgHumPwcTywiTFtmm8vATVrTWstiZVr8fzDND63i_ybqT7rEb73cxXrERVCEnzbd2yvuLk3gm7HTuU3mLgDT1zbun8vB2ywmTybuRgtcNVHAOs3TIKDpMFvmbV3ENXnND63i_ybqT7rEb73cxXrERVCEnzbd2yvuLk3gm7HTIXnstkwG4PwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrERs3ERk3TtPrdfKr9NVwc2gbARVwgOpnQ0pnGuPrGistiRytcNVHApY3gTywypUJgoXnsfYnslVtuRknsoYnsistiRytcNVHApY3gTywypPDg2k3dxgwGMY3ixKOT18b8hswcLFfdTs3TWstiZgwmNP3AAVOTuUHgTVHyxKOT5KSqD0rG4gwgAVbExKOTiKSqD0rLD6fg2k3dxgwLp8xQOpnP0MDpMFvmbV3ENXnND@rrnIXvb_YJmPbExVtGu2kWW5ZDmzr81UftL6Xp1PzvTQrYCQkUWQreIPSGi03dNs3gTybpNgv@nUfJNOrGQPbET73_1UfaAFHApFSA1k3lnPkCnPCyTyt@h6frT_zp47bExVtefPHmhktubVHgnktdWkfBT7wTIXnK4VH8WVr7xVtALyDT4VHEmYWp1UHaAYfaLXfp_Fwpa@WVt6f7CVH8WkrG4yt@0KtdNswPQ8wAxUwT_Pr@hktlxXrcNgfl_krGIXnK4gwdTkDNuPrKfgwEhVryT7Hqx8weRybhxXrinktERktExPHVmywTuUvpNVHcNVHPQPHcC7Hp4VH8WsWGMVbd_F3cTyD9TYw84XrG1zDNuPrKiybEWgwLl5tTRVwGu7tdbywmW6fEbVH@RkDNuPrKfgwEhVrAhgtcxXr5RgCank3BLYrGMstATywATyDTpyvELVbGDgbhRVriCgvANgw@L7fGDgbhRVfGMVbdNktcWVfGM7HmRgv8bktlWPrT_FtdTsvdL7H@u8t9LybiWPrhbVwcns3@uU3qnk3EWPrERVtc2ybubstAWPr8nkHgRs3@uzwpnVHThVt@WPrunsviRk3@uP3@hgCc_YrG1zDNuPrKfgwEhVrAhgtcxXrBRs3i_ybqTybpNkrGMstATywATyDTtQtg2ywAiFtGl5tTRVwGu_tdbywm_PrpIXnsD63i_ybqTYDNtpng2VryiFtqNPtpLgvEbstAm8DPuU3cWkwADstihVHgnktguUCNtpns1UfEnV3ADstihVHgnktGfKrT4VHEmYWp1UHaAYfBbk3cLVHmpyvELVbAMst8nzHgRsHpaKSppyvELVb8DgbhRgfJL_YnmPzcmFtmTywuxUvyhktARVt85stmxzwmRgwToXnstp4NtMDpMFvmbV3ENXnsD63i_ybqT7rEb73cxXrERVCEnzbd2yvuLk3gm7HTuU3mLgDT4VHEmYWp18v2hVCApstpAVtchV3gLYfingtpakbdCFf@bkvunzbeRywmbFfeIPjAQ6f2hyHc_yCAfgbANzbu_zDK1U3i_ybqTYDGOMDp4gwdTkDNDXvpTgCG1ktDnktERVCEx5wARyDTQywERY3AmzwdWs3cZXrG1kt4RVtcLVH4TyvmTyDTQywERY3AmzwdWs3cZXrG1ktr_yvlL_Hd_7HPQz3cTyHmNVrRhVtuRsWTuUtALItqbyDTQywERY3AmzwdWs3cZXrG1ktDR7HPQz3cTyHmNVrRhVtuRsWTuU3Eb7tcxXr8hk3lbkt_u6WqhVwBbktl0KSTIXnKigbhmU3Eb7tcxXrqns3gTybpNkWmRVtdTybhRsWERVCEx8v@bswA06vcNVHc_FWabVwECkWhl6SqCFWyRgblCVH_i@Squ7CQQgviZswmngHATkWiuKSqoXrLOpnKigbhm8bBxXrdTsOp2ywmWgvV_PruTyC@RgDTpybBT7b_5@juu7CQ4gwgAVbE0KjeuK37ZK3pLybEbstA0@vTLFt@R7HcZKHpmYWqu7CQQzDNtpnKigbhm8bBxXrdTsOp2ywmWgvVnNvpRYtETstaNkrGM7HVWgwPQUHgTVHy0Xj9MK37ZK3pLybEbstA0@vTLFt@R7HcZKHpmYWhlK37ZKSqCFWERVCEx8v@bswA06vcNVHc_FWinVtp_YWi5kwRZXwpNVH8Myb_RkWeaK37ZXC8tktBRVC_a6WBbs3qWgvV0XtpNgwQQzDDWstuRVrdTs3GtktGD63qhktGtVwPQUvpRYtETstaNkt9xkrLD6fumyvANKruRsvpNVwuNPDpigbhNXnstMDBbkHGtVwPQ8vBnNthRk3@hgCoLstATywATYrGM7HVWgwPQzC8tktBRVC_a6WabVwECkWuuKSqCFWyRgblCVH_Q@jqu7CQuFtubVHgnkt_akvunVt9TywQiFtq0KWqu7CQDgwRTYWepXjqCFWThsv5Ak3pRYtB06rR2kwQQzDKtkwmhgtcm8bBxXrp2ywmWgvVn2bR_yv8RkrG5k3dxgwTnk3BRk3PpPSlu8td_FwgNVbcbswyTyDlu6JGfgvmAgbAAybBT7bPpPSluU3i_Ft@WgbAAgDlIstluUHgTVHyx6JuuKSluPbcbswyTyDlQ@jqpzDK18bR_yv8RkDK1Pwg2YDNtpnKigbhm8bBxXriWstuRsOTR7HEnktTuU3Eb7tcxXr_x8bATgw70XSQuFtubVHgnkt_akvunVt9TywQiFtq0KWuu7CQQyblCVH_M6Sau7CQQstmTgwm0KSTIKDdm8bBxXrdTsOp2ywmWgvVnNv@ns3c_Pry_ywRxXr2hkHdLFvmbV3E0XHpbVwyu@xTuUtALVtgLsbPQUrTIKDgxswGpgv@Wgwmbyb8AgDAnVrGMY3ixXryT7Hq06fppFHaNPwg_ywiTYSahVHiCkfingtptgtdAgwunUv@ns3cn0v9T7HpNkfqNswTu8v@TyDT4YrG1zDK18vLD6fBbkHLOpnsDKwg27rgTgDTaVwonkHc_7tdbFOinktERktEhXrGM7HVWgwPQzC8tktBRVC_M6WabVwECkWuuKSqCFWyRgblCVH_Q@jqu7CQuFtubVHgnkt_akvunVt9TywQiFtq0@juu7CQDgwRTYWepXjqCFWThsv5Ak3pRYtB0KHmhktumyvmRktEZ6tqhsvgTyC_uXfqa6WTIKDg2k3dxgwGtVwPQUthRk3@hgCobkwmhgtcn2STuzwmhgtc_stmTgwmx6JqpPr8hk3lbktyRgblCVHPpPSlu8td_FwgNsHgTVHyx6JqpPruLk3pWVtgNswPpztpAPrabVwECgDlMKSqpPryRgblCVHPpzS9u6JLD6fg2k3dxgwLD6fBbkHLOonstonsD6fBbkHLOpnKigbhm8bBxXr@bsbcnNthRk3@hgCTuU3Eb7tcxXrThsv5Ak3pRYtB06rJL5zJ25jQQstmTgwm0@SqC7runVtgTVripKj7QOX7oKbcbswyTYWeuKSqCFWabVwECkWuMKSqCFWqhVwBbktl0@jqCFWqns3gTybpNkWd_s3pWgHERsW_x8bATgw70@SQiFtq0@SquK37ZKtc2VH_a@jhI8jqCFWBbs3qWgvV0XtpNgwTIXnstMDg2k3dxgwGtVwPpPtgZgwobkwmhgtcAPrR_yv8Rkvp_7wc_yDlu6JGfgvmAgbACgwgAVbEx6JqpPr8hk3lbktabVwECgDlu6JGMFvmnVt@bktlx6JAnsJGpybBT7bPpUSuuK37APryRgblCVHPp8Squ6JGM7HVWgwPQzvp_7wc_YWGaK37mU3pWgbBmUr7i5XqMOjQQzDK18bR_yv8RkDNtpnKaVry_ywRxXr2hkHdLFvmbV3E0XHpbVwyu@xTuUtALVtgLsbPQz3cxsthRVogZgwytzrLD@b8AVrlhVt@Rk3VbgtlxXtpmPruTyC@RgDTuFtubVHgnkt_akvunVt9TywQiFtq06SqCFWmbswyTYWuu7CQQstmTgwm0KSQOyfgNVwcCYWeuKSqQPru_FvPQPbET73_1UfaAFHAigbmRsvE_6HdTFvyNUvpxsfgxgvlRs3pMVtpLywo_gHETFtANP3AAkrGaVtExXr7_PrpIKDpakDNtMDpigbhNXnsDKwg27rgTgDTaVwo2stpTywm_PruTyC@RgDTpybBT7b_aKSqlUWyRgblCVH_tKSqCFWqns3gTybpNkWd_s3pWgHERsW_x8bATgw70@SQQstETFt80XShu7CQDgwRTYWqoKwgL73@hgC_IstARkrLOpnsDKwg27rgTgDTaVwo2stpTywmnNvpNVHcNVHTuU3Eb7tcxXrqns3gTybpNkWd_s3pWgHERsWabVwECkWeuKScoKbcbswyTYWeuKScoXrLD@bR_yv8RVrgTgDl5stpTywmn2bR_yv8RsJG5k3dxgwTnk3BRk3PpPSlu8td_FwgNVbcbswyTyDlu6JGfgvmAgbAAybBT7bPpPSluU3i_Ft@WgbAAgDlIstluUHgTVHyx6JeuKScpPryRgblCVHPp8Squ@JlIKDptkwmhgtcNKDpigbhNXnstMDdmPbmRkwPQzbd2yvuLk3gm7H_5FtgTVxqtzrG1ktiWgbiZgDTQyw8nkHc2ItpTywmCQkUWQxgQzDKtgtlmUwdWVtc_yCgxswPIstGuU3Eb7tcxXrqns3gTybpNkWd_s3pWgHERsWEnV3_MK37ZX3gAVbE06SqCFWTnk3BRk3_u6W_x8bATgw70@SquKSTuU3mLgDT4VHEmYWp1UHaAYfBbk3cLVHmpyvELVbAMst8n8b8hswcLFfiWstuRsOTR7HEnktAuYtl_PrdWVHPQPCTuUfLD6fdNXnsD6fBbkHLOonsDKwg27rgTgDTiV3@hgCc_YrGM7HVWgwPQUHgTVHy0Xj9MK37ZKbcbswyTYWEaKSqCYrQOyfgNVwcCYWeoXDK1Pwg2YDNOpnKMFvmbV3EmPwdTyv8MkwdLyCALgDT5gv@LywTuPHVmywPQPHcC7HpOgvhhs3i_ybqTYrGMY3ixXryT7Hq06fpakbdCYflnstlWgwdmybuNUvpxsfd0gv7nPtg_s3pOg39Rk3Vn8SApXfm1zbeRywmbYf8bktAOs3TIKDpMFvmbV3ENXnsD63i_ybqT7rBhVHdxUvRhs3VNsvPQzwdWs3c_PrEb73cx6JERVCEnzbd2yvuLk3gm7HluU3mLgDl4VHEmYWp1UHaAYfBbk3cLVHmpyvELVbAMst8nP3@hgCc_FfqWgvVRk38lXfhIzbuAzDK1U3i_ybqTYDNOpnKMFvmbV3EmPHVmywPQPHcC7HpOgvhhs3i_ybqTYrLOoHd_7rEnsbcNVrPuzrToXnBIUwcTYZ4nOoyQPbET73_1UfaAFHAigbmRsvE_6HdTFvyNUvpxsfuRk3hRk3Au7bqn@bBx@SEa@jeuKSEM@jTDPrRRYtiTybpNVx2LFtAbUCNtMHpZgwAm8DGOs3pNkfEnsbcNsWNt13cTFYE_ywdxVxEnsbcNgxQOp4goXnN5gHALVHgnktGMywEL_HmRgv8CPHpZgwAbPrQ0pnGuzbam7tdbywmCUJBm7tdbywmA8xAMywER73yoYnst1JgTsJ_uUJBm7tdbywmAPfNtpnlagHEns3Ehk3EAzWGpPHmRywlDznst1JabVwECsJ_uUJhl6SlDznst1JyRgblCVHlOKrli@SqpPfNtpnlMstATY3pWkvd_FJ_pzvpT7HpxsJ@OpnspP3mnkHgTgwmAzWGpz3ExV3lDznst1JmTytqNPH9NktcWgbAAsJ_5gv@Lyw@OMrst1JTRYwRRk36RktlT7blOKSAaKfNtpnlM7HmRgv8Rk3lOKrTQ7H8mYWXnPOppyvELVbeIPwg_ywiTYSahVHiCkfingt_a@WulKOpigbmRsvE_6HdTFvyWNfoTgwRbktuTFOXnUD7LyDoAywoTgtyT6ov02jk_KHELszxT6vFR5SRT5zqfQkwCyorhNHU0Yk71QzsRYorR2HUT2kVWObk_XwrR_SnT_bgnQzJ0FovLkCUTO071QkbWg02LsHUA5o_xIzsCy0vN5bk0Yk@NOCJTXorCkb1A2oqfObi_@orRISUT2z7NIkNbkojNzfTDznstMrEnsbcNkWGiFt5Rkt@OpnspzwgWgwlOKrl57b7bXjVLsb8myv7my39Izw@2FJ@Opnsp8tpTgwuAzWGo0nstpnQTyCqRkWGpzw@hs3yAPfGMY3i0Krl4VHEmYWp1UHaAYfBbk3cLVHmpyvELVbAMst8nP3@hgCc_FfqWgvVRk3oRgtTRVwobsw9bVwcNU3a2sJPWznstpnQ0pnstMrGiyCqRkWGpPbExVt9pPfNtpnsuPrinktRbsw_uUCNtpnsuPrGpzwgWgwlOKrl4VHEmYWp18tp_gb@Rs3E_ywdxgbAAkfgWgbhRkfEnkWet6S918wBAgwp57b7bXjVLsb8myv7my391P3@hgCqWgbuTYf8L@H7pPfNtpnsuPrGpP3mnkHgTgwmAzWGpzHgTgwpAznstpnGu84NtpnsfYnstpONtMrGfyxQOp4NtMDpMFvmbV3ENXnsD6fBbkHLOMDuLk3gm7HLO1vBbOtERk3hhVtPIgH@WsWNQyw@ngvBm8DG5gv@LywQOow9NsvEbstAmPtgZgwDWgbiZgwBC8xGoYnsistiRytcNVHApgwERQtcxgwATYXVbQwyQPtgZgwonkHc_7tdbYrgIU3Eb7tcNPwgL73@hgCPQzv@nsv5_UWstpnstpnNtMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzr@bsbcn2bR_yv8RkrgIU3mLgDT4VHEmYWp1UHaAYfRhsvc_stpZkfingtpu7t9AgbALFf@bsbcNP3ymFDy_ywRxKbET73cM@XcQXzcQXzaAFHA5gviRkvpnsbAMst8RzSxbVtg2ywAiFtRMywATgDRhVtuRkJ@hgCpR7HPM7HdNVwd_7wRpybBT7bPMXSq5U3ynsHo2gviRs3P5gv@LywRasvEbstAxKtgZgwRMst@nk3uLVbcxgwPDgblCVHR5stATYJyRgblCVHP4@jToXnP0ow9NsvEbstAmz3cxsthRVogZgwytPrQ0pnBnsv9xgwATYflRVHJWgw8RktE_5CsTVxTDgb5RsOp2ywmWgvV_8xAM7HVWgwAigbum7tdbyDTIstARkrQOp4N5gHALVHgnktGMVtgLsb6bs3ERktc_7xguUCNt1HgNVwpAYfpmywACzryT7Hq06fppFHaN8tV2gviTYfE2FfcWgfiWgvubsvpxzvd_FvcWstAhgfhLyfmRgv@x8tdNVwmbVwpQ8xQOp4N5gHALVHgnktG1V3cNsXyhktARVtytPrQ0pnabktBnsHA1V3cNVxT4VHEmYWp1UHaAYfBbk3cLVHmpyvELVbAMst8nzHgRsHpaKSppyvELVb8DgbhRgfJL_YnmPzcmFtmTywuxUvyhktARVt85stmxzwmRgwTtUWNfYnRRYtiTybpNVrmRgtp2yw12ywmWgvVCQkUWQxgO1CNtMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrdTsOp2ywmWgvV_8xAM7HVWgwAigbum7tdbyDTIstARkrQOpnBnsv9xgwATYflRVHJWgw8RktE_5CsTVxT1kHc_7tdbFOg2k3dxgwTtzfu_FvPQzrQOpnBnsv9xgwATYflRVHJWgw8RktE_5CsTVxTaVwonkHc_7tdbFOiWstuRkrgIU3Eb7tcNPwgL73@hgCPQztpNgwToXnsMVtchk3sNVHc_YHdWVxiTgZATywm2yv@bUWNt13cT7kgxgwpR7HypU3cTFohRk3@hgCFT2o6C8xlDPr7iKSquKSgoXnsQyw@ngvBm8DGiY39RsWNfYnRRYtiTybpNVrECgbuxIthbgwyfsthbgwnhgtcbPrQ0pnGtkwG4ztd2yblhVHp_Yfdm73nhgtcN8bATgw7nOwyQ8ogLk3pLFtRTYrgu8rPu8fetPrQ0pnsuz3cTyHmNVrabktBnsHAistiRytcNVHOxsthbgwnhgtcxNWNtMrPm8w@LywGoYnstMrmRVH9_YtGistiRytcNVHApgwERQtcxgwATYXVbQwyfsthbgwnhgtcbUWNtMrP0MrP0ow9NsvEbstAmU3cTFohRk3@hgCFT2o6C8xNoYnstpnBnsv9xgwATYflRVHJWgw8RktE_5CsTVxT1kHc_7tdbFOg2k3dxgwTtzfu_FvGfKrT4VHEmYWp1UHaAYfBbk3cLVHmpyvELVbAMst8n8vBLFfuuKS7_@jqIPbExVtToXnstMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrp2ywmWgvVn2bR_yv8RsOeQ8xAMY3im8DGQPbET73_1UfaAFHAigbmRsvE_6HdTFvyNUvpxsfdTs3pMKSq4YS9uXfyTyt@_UWNtpnBnsv9xgwATYflRVHJWgw8RktE_5CsTVxTMst9NVHBnsHANgH8_8xAtktARk3FT2o6xXrhuXrQOpnsMVwsNVHc_YHdWgDuRVHsNVHc_YHdWVxTMst9NVHrnsHAC8xTD8SquKSgoXnstMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrdTsOp2ywmWgvV_8xAM7HVWgwAigbum7tdbyDTQVtpLsbToXnstpnstkwG48rmRVtphVwguPHybs3UnkHgRVxTiV3@hgCc_YrgIPtcTF3qWgvVC8SgoXnstp4NOonRRYtiTybpNVruRVHxnstERk3FT2o6C8xNoYnstMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrRnstERk3obkwmhgtc_8xAMY3im8DGQPbET73_1UfaAFHAigbmRsvE_6HdTFvyNUvpxsfdTs3ppXS74yWqIPbExVtToXnsistiRytcNVHApgwERQtcxgwATYXVbQwyQ8vBn0wpnVHc_YrgIU3Eb7tcNPwgL73@hgCPQzv@nsv5_UWNtp4N5gHALVHgnktGQyw8nkHc2ItpTywmCQkUWQxgO1CNtMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrdTsORnstERk3TtzfuTyC@RkfBbs3qWgvVxXrAnktc_UWNtMwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzrRnstERk3obkwmhgtc_8xAMY3im8DGQzrQOp4N5gHALVHgnktGMst9NVHrnsHAC8xNoYnsMgHm_ywATFXrxK3d_F3cbOtECPwpLgH8RktENUwcTyz@RgtcNVHWbyZBCzringHAT7wpAYtARytTtzfgNktc_7ZvxQogoXnstkwG4Uv9_Y3cNVHDT5DPa@xGistiRytcNVHApgwERQtcxgwATYXVbQwyQ8vBnNthRk3@hgCoLVtpLywTtzfuTyC@RkfBbs3qWgvVxXrTWstiZkrQOpng2VryMgHm_ywATFXrx@Detznsto3cxsthRsohRk3@hgCFT2o6C8xQOpncWs3c0pnsistiRytcNVHApgwERQtcxgwATYXVbQwyQUvpRYtETstaNkt9xkrgI8bANgwmCQkUW5DyMgHm_ywATFXrx8SgoXnP0onRRYtiTybpNVr1mywAA2bATstaC8HmWVfGIgv8RVfGpybBT7b@uPbcbswyT7fGts3CRs3g0ywj_VtcbznQ0pnhhk3GDgwRT7rPu8odT7bAQFt9NVwy4U3i_ywcNkfabVwECVr8uUHgTVHybPrpuzSgoXns5yvmmPHpm7rPu8odT7bAQFt9NVwy4U3i_ywcNkfyRgblCVHGfPryRgblCVHguUfGQ@xQOpnhhk3GM7HVWgw4TY3GfKrliFtpWkvd_yDAnVf@nsvdTybpNgDAnVfBbk3cLVHp_ybcLyDAnVfuTyvERF3PIst@fgwARYvd_yDAnVfuLk3pWVtThk3uxXtpAUWNtpnuTyC@RsYE_7r5fKrlDz3cLyb_hkv@RgDluUxG48bu_2wubkCchOv@RVrMuUJVRs3luzWGpztpA8xQOpnsM7HVWgw4TY3Go8DGpPfabVwECgDluUxGpybBT7bGoPrlDPbcbswyTyDluUxG4gwgAVbEZXnst13Eb7tcL_HmmUxPuUJ@DgwRTyDluUxGDgwRT7r5uUJ@iFtqx6JGoPrEnV3QOpnsM7HVWgw4TY3Go8DGpPfuLk3cRktYx6JGoPr@RkwEmUxGpPfuLk3cRktkx6JGoPrEnV3QOpnabktBnsHA1V3cNVx9_7t@uztdxgw@uU3Eb7tcL_HmbUWNfYnK1U3i_ybqTYDND6fTnVwVNXnNOonND63i_ybqT7rEb73cx6JERVCEnzbd2yvuLk3gm7HluU3mLgDl4VHEmYWp1P3mnV3cWVtc_73pmF3AMst8n8vqRYfqCV3MOFtARgbBxXSal@SRDgb8x@SmpzDK1U3i_ybqTYDNOMDdf8fGuNtqhQwuNztcT7r3nV39NVwc_7rDnVwcmzwp_7raAFHAigbmRsvE_6HdTFvyNUvpxVr8fzDND63i_ybqT7rEb73cxXrERVCEnzbd2yvuLk3gm7HTIXnGuzHd_7romFtqm8DG1_3pm7rKW7rOxNWNuPromFtqNP39L7byoNJubVHcbQwlDPruQXjhM6jzbUWNuPromFtqNP39L7byoNJ8bktWbVwlDPrqf2xQOMrG1_3pmYfqRF3yCU0luFtqRYtBRk3um2wmbQYlDPrqf2xQOMrG1_3pmYfqRF3yCU0ligw@hgCWRVHaRgwAAPfGu@OgoXnGuUOqnV3AuyHuCVxOAPwc2gv9WVHlDPrRhVtuRgOgoXnGuUOqnV3AuyHuCVxOAPwc2gv9WVH3Rk3rhgClDPrqf2xQOMrG1_3pmYfqRF3yCU0liFtqxstuT7odbywmAPfG5gv@LywzbUWNuPry5gHALVHgnktytPrQ0MrGuPrhhk3GuyvGfKrBnsv9xgwATYfi_ywdTywJWgw8RktECUJuLk3gm7HltUWGuyvAiyCqRVrPuUJERVCEnzbd2yvuLk3gm7HloKrqhkfdLyCALVrPuPHmRywQOMrGuPrhhk3GM7rPuPwpLgH8RktENUwcTyz@RgtcNVHu_5CvhswnhgtcCUJuLk3gm7HltU0qfNWGOMrGuPrqhkfu_FvGfKrl1UfihXfqnV3dTs3AIgwEnP3pmYf2LFJQOMrGuPrqhkfpNgwm_Ftmm8DG5gHALVHgnktytPrQ0MrGuPrGuzHd_7ruhVrPuPwpLgH8RktENUvmRgvERgz@RgtcNVHypU3i_ybqTFJgoKruhkfEb73cm8DGpPHcC7HpOgvhhs3i_ybqTFJQuU3dN8vubYtim8DGiY39RsWNuPrGuPrGMyvAMY3im8DGpUfpMkSAuFtqhVwuNztcTFfqnV3AOs3loXnGuPrGuPruNP3d_ywATYopTgwAtktuRk3E_5wRnk3cCU3dWPrubUWNuPrGu84QOMrGuPruNP3d_ywATYopTgwAtktuRk3E_5wRnk3cCP3dWPrubUWNuPrPbPxgoXnK1U3i_ybqTYDND@r8fPr3nV3jTs3AIgwEmPYpmyHATgwmmUXpTgwGlOtBm8f8IXnND63i_ybqT7rEb73cx6JERVCEnzbd2yvuLk3gm7HlIXnhhk3GpstpAVtcTyvlm8DGpstpAVtcTyvlmP4KmUCPZXnlnstlWgwEhswAMgtBm8DGpstpAVtcTyvlNUv8TVrKW7rOxNWN4zw9NsvEbstAC8xGoYnhhk3GpgvBL7rPuPwpLgH8RktENUvmRgvERgz@RgtcNVHypU3i_ybqTFJgoXnlhVwuN8vubYtim8DGiY39RsWNpgvBLYfEb73cm8DGpPHcC7HpOgvhhs3i_ybqTFJQOoHd_7r9Lyw4L_oGfKrl4VHEmF3_pPrPfKrBnsv9xgwATYf@nsvdTybpNkfq_FtEnsvpWsWNpgvBLYfu_FvGfKrylF3cLNY6mUDGpPbET73u06JGOKrl4VHEmYWltPr5uznl1UfaAFHApstpAVtcTyvlLywm2ybiRs3AMst8nPHdAsf2LFflm7HAOs3loXnhhk3GIstBRVrPuPwpLgH8RktENUwcTyz@RgtcNVHu_5CvhswnhgtcCUJuLk3gm7HltU0qfNWNIstBRkfqhk3cNVHnnVwcN8bALywmTYXc2stmRVxlhVwuWPrAnVwcbUWNfyxytUWND6fuLk3gm7HLOonKMFvmbV3EmPHVmywPpPHcC7HpOgvhhs3i_ybqTFJLO1wpnsw@RVHdAkfixVwAuyHuCVxRRYtiTybpNVxguUCNpstpAVtcTyvlNPwc2gbARsY@nVHypUfup6jq5Kjh18S7h6J@uU0eDPref_fGpPwg2yflm7H8aVw8aKjqa@Smp@jqlKW74@fqp8xAaVwBL2wm2ybiRVxlnstlWgwEhswAuyHThVwuC8xgoXnlnstlWgwEhswAuyHThVwuC8xAlktd_VtcL2bAAVtc_2weRywuT7xgoXnlnstlWgwEhswAlktd_VtcL2wm2ybiRs3ytUWNfyxQOMDpMFvmbV3ENXnNOMDdf8fGaKCeu8f8IXnKigbhm8bBx6JBbkH8pV3Ex8vBx8SEu@SeQ6j9u@j74KW8u6JGM7HVWgwPpUHgTVHy0@SqCFWG4gwgAVbE0@SqCFWlIXnKMFvmbV3EmPHVmywPpPHcC7HpOgvhhs3i_ybqTFJLO1wpnsw@RVHdAkfixVwAuyHuCVxRRYtiTybpNVxguUCGpstpAVtcTyvlNPwgL73@hgCypPwg2yflm7H8aVw8aKjqa@Smp@jqlKW74@fqp8xQu84goXnK1U3i_ybqTYDND6fBbkHLOonNOMDuLk3gm7HLOMrG4zw9NsvEbstAC8b@M7fpWUw@Q7fdW8tgoybOAUzpnsw@RgXAhVtVTybiLFoT0gwiTFJzxX3Qts0mx2DgZ03zW74RRYtiTybpNVxgoYnGuPxgZ03zN83Pts0mx0feW74Ox2xAuyHuCVxd_Fw9xgwATF3gf7fgZ03zNPtPaXxARsHGi5vERVxgo@vPMYfi_ywdTywJWgw8RktECUtgDznGu8tPMYflRVHJWgw8RktELYXVT2vlN5v8RVxpbU0qfNWdN8vubYtix@SQakfu_FvPpsW8NP3d_ywATYopTgwAtktuRk3E_5wRnk3cC8v@fgxNuPrPbPxabktBnsH@istiRytcNVH@pU3i_ybqTFJ@pUfppFHaNUwpnsw@RgfdNgv@b7HgLs3AMst8n8vAhVtVTybiLYf2LFJ@pUwdA8xQOonGuUwdCUJi_ywdTywlDPrll2X8iXjaiKjqQ6j8a6J@uUJBbk3cLVHmpyvELVbAMst8A8xQOMrGpgvypU3cNVwlDPrluyvlRkHgRsHltUWNOMDpMFvmbV3ENXnND6fyTyt@NKDuLk3gm7HGDgvAAgHdAgwPOgvhhs3i_ybqTYDN5gHALVHgnktGIst4TyvERF3ytPrQ01HgNVwpAYfuTyvERF3PQzrQO13cT7kgxgwpR7HyQztpL_HdTyHuC8xTD8Squ@xQOp4NIst4TyvERF3ytUWND6fuLk3gm7HLl"
# final_rtmp=' token=$doregex[tok] pageUrl=http://www.direct2watch.com/ live=1 timeout=10</link>
un_chtml=get_html(meta,data);
str_pattern='streamer.*[\'"](.*?)[\'"]'
elif 'streamer\'' in html:
un_chtml=html
str_pattern='streamer\': \'(.*?)\''
else:
un_chtml=html
str_pattern='streamer.*[\'"](.*?)[\'"]'
else:
un_chtml=page_data
str_pattern='streamer.*[\'"](.*?)[\'"]'
if justHtml:
return un_chtml+'ThisPage['+url+']'
print str_pattern,un_chtml
streamer=re.compile(str_pattern).findall(un_chtml)[0]
streamer=streamer.replace('\\/','/')
str_pattern='file[\'"]?: [\'"](.*?)[\'"]'
file=re.compile(str_pattern).findall(un_chtml)[0].replace('.flv','')
if file=="":
return ""
#print file, un_chtml
str_pattern='getJSON\(\"(.*?)\"'
token_url=re.compile(str_pattern).findall(un_chtml)[0]
if token_url.startswith('//'): token_url='http:'+token_url
headers=[('Referer',url)]
token_html=getUrl(token_url,headers=headers)
str_pattern='token":"(.*)"'
token=re.compile(str_pattern).findall(token_html)[0]
str_pattern='\'flash\', src: \'(.*?)\''
swf=re.compile(str_pattern).findall(un_chtml)
if not swf or len(swf)==0:
str_pattern='flashplayer: [\'"](.*?)[\'"]'
swf=re.compile(str_pattern).findall(un_chtml)
swf=swf[0]
#print streamer
app=''
if '1935/' in streamer:
app=streamer.split('1935/')[1]
app+=' app='+app
streamer=streamer.split('1935/')[0]+'1935/'
final_rtmp='%s%s playpath=%s swfUrl=%s token=%s live=1 timeout=10 pageUrl=%s'%(streamer,app,file,swf,token,url)
return final_rtmp
#print decrypt_vipracing('http://www.direct2watch.com/embedplayer.php?width=653&height=410&channel=10&autoplay=true','http://vipracing.tv/channel/espn')
|
gpl-3.0
|
Dacelonid/gerrymander
|
gerrymander/reports.py
|
1
|
49794
|
#
# Copyright (C) 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import prettytable
import logging
import time
import re
import json
import sys
import xml.dom.minidom
from gerrymander.operations import OperationQuery
from gerrymander.model import ModelApproval
from gerrymander.format import format_date
from gerrymander.format import format_delta
from gerrymander.format import format_title
from gerrymander.format import format_color
LOG = logging.getLogger(__name__)
class ReportOutputColumn(object):
ALIGN_LEFT = "l"
ALIGN_RIGHT = "r"
ALIGN_CENTER = "c"
def __init__(self, key, label, mapfunc, sortfunc=None, format=None, truncate=0, align=ALIGN_LEFT, visible=True):
self.key = key
self.label = label
self.mapfunc = mapfunc
self.sortfunc = sortfunc
self.format = format
self.truncate = truncate
self.align = align
self.visible = visible
def get_value(self, report, row):
val = self.mapfunc(report, self.key, row)
if self.format is not None:
val = self.format % val
elif val is None:
val = ""
if type(val) != str:
val = val.encode('utf-8')
if self.truncate and len(val) > self.truncate:
val = val[0:self.truncate] + "..."
return val
def get_sort_value(self, report, row):
if self.sortfunc:
return self.sortfunc(report, self.key, row)
else:
return self.mapfunc(report, self.key, row)
class ReportOutput(object):
DISPLAY_MODE_TEXT = "text"
DISPLAY_MODE_CSV = "csv"
DISPLAY_MODE_XML = "xml"
DISPLAY_MODE_JSON = "json"
def __init__(self, usecolor=False):
super(ReportOutput, self).__init__()
self.usecolor = usecolor
def display(self, mode, stream=sys.stdout):
if mode == ReportOutput.DISPLAY_MODE_TEXT:
stream.write(self.to_text())
elif mode == ReportOutput.DISPLAY_MODE_CSV:
stream.write(self.to_csv())
elif mode == ReportOutput.DISPLAY_MODE_XML:
impl = xml.dom.minidom.getDOMImplementation()
doc = impl.createDocument(None, "report", None)
self.to_xml(doc, doc.documentElement)
stream.write(doc.toprettyxml())
elif mode == ReportOutput.DISPLAY_MODE_JSON:
doc = []
self.to_json(doc)
stream.write(json.dumps(doc, indent=2) + "\n")
else:
raise Exception("Unknown display mode '%s'" % mode)
def to_text(self):
raise NotImplementedError("Subclass should implement the 'to_text' method")
def to_csv(self):
raise NotImplementedError("Subclass should implement the 'to_csv' method")
def to_xml(self, root):
raise NotImplementedError("Subclass should implement the 'to_xml' method")
def to_json(self, root):
raise NotImplementedError("Subclass should implement the 'to_json' method")
class ReportOutputCompound(ReportOutput):
def __init__(self):
self.report = []
def add_report(self, report):
self.report.append(report)
def to_text(self):
blocks = []
for report in self.report:
blocks.append(report.to_text())
return "\n".join(blocks)
def to_json(self, root):
for report in self.report:
report.to_json(root)
def to_xml(self, doc, root):
for report in self.report:
report.to_xml(doc, root)
class ReportOutputList(ReportOutput):
def __init__(self, columns, title=None, usecolor=False):
super(ReportOutputList, self).__init__(usecolor)
self.columns = columns
self.row = {}
self.title = title
def set_row(self, row):
self.row = row
def to_xml(self, doc, root):
lst = doc.createElement("list")
root.appendChild(lst)
if self.title is not None:
title = doc.createElement("title")
title.appendChild(doc.createTextNode(self.title))
lst.appendChild(title)
headers = doc.createElement("headers")
content = doc.createElement("content")
lst.appendChild(headers)
lst.appendChild(content)
for col in self.columns:
if col.visible:
xmlcol = doc.createElement(col.key)
xmlcol.appendChild(doc.createTextNode(col.label))
headers.appendChild(xmlcol)
for col in self.columns:
if col.visible:
xmlfield = doc.createElement(col.key)
xmlfield.appendChild(doc.createTextNode(col.get_value(self, self.row)))
content.appendChild(xmlfield)
def to_json(self, root):
headers = {}
for col in self.columns:
if col.visible:
headers[col.key] = col.label
content = {}
for col in self.columns:
if col.visible:
content[col.key] = col.get_value(self, self.row)
node = {
"list": {
"headers": headers,
"content": content
}
}
if self.title is not None:
node["list"]["title"] = self.title
root.append(node)
def to_text(self):
labels = []
width = 1
for col in self.columns:
if col.visible:
if len(col.label) > width:
width = len(col.label)
labels.append(col.label)
fmt = " %" + str(width) + "s: %s"
lines = []
for col in self.columns:
if col.visible:
line = fmt % (col.label, col.get_value(self, self.row))
lines.append(line)
prolog = ""
if self.title is not None:
prolog = format_title(self.title) + "\n"
return prolog + "\n".join(lines) + "\n"
class ReportOutputTable(ReportOutput):
def __init__(self, columns, sortcol, reverse, limit, title=None, usecolor=False):
super(ReportOutputTable, self).__init__(usecolor)
self.columns = list(columns)
self.rows = []
self.sortcol = sortcol
self.reverse = reverse
self.limit = limit
self.title = title
def add_column(self, col):
self.columns.append(col)
def add_row(self, row):
self.rows.append(row)
def sort_rows(self):
sortcol = None
for col in self.columns:
if col.key == self.sortcol:
sortcol = col
if sortcol is not None:
self.rows.sort(key = lambda item: sortcol.get_sort_value(self, item),
reverse=self.reverse)
def to_xml(self, doc, root):
self.sort_rows()
table = doc.createElement("table")
root.appendChild(table)
if self.title is not None:
title = doc.createElement("title")
title.appendChild(doc.createTextNode(self.title))
table.appendChild(title)
headers = doc.createElement("headers")
content = doc.createElement("content")
table.appendChild(headers)
table.appendChild(content)
for col in self.columns:
if col.visible:
xmlcol = doc.createElement(col.key)
xmlcol.appendChild(doc.createTextNode(col.label))
headers.appendChild(xmlcol)
rows = self.rows
if self.limit is not None:
rows = rows[0:self.limit]
for row in rows:
xmlrow = doc.createElement("row")
for col in self.columns:
if col.visible:
xmlfield = doc.createElement(col.key)
xmlfield.appendChild(doc.createTextNode(col.get_value(self, row)))
xmlrow.appendChild(xmlfield)
content.appendChild(xmlrow)
return doc
def to_json(self, root):
self.sort_rows()
headers = {}
for col in self.columns:
if col.visible:
headers[col.key] = col.label
content = []
rows = self.rows
if self.limit is not None:
rows = rows[0:self.limit]
for row in rows:
data = {}
for col in self.columns:
if col.visible:
data[col.key] = col.get_value(self, row)
content.append(data)
node = {
"table": {
"headers": headers,
"content": content
}
}
if self.title is not None:
node["table"]["title"] = self.title
root.append(node)
def to_text(self):
self.sort_rows()
labels = []
for col in self.columns:
if col.visible:
labels.append(col.label)
table = prettytable.PrettyTable(labels)
for col in self.columns:
table.align[col.label] = col.align
table.padding_width = 1
rows = self.rows
if self.limit is not None:
rows = rows[0:self.limit]
for row in rows:
data = []
for col in self.columns:
if col.visible:
data.append(col.get_value(self, row))
table.add_row(data)
prolog = ""
if self.title is not None:
prolog = format_title(self.title) + "\n"
return prolog + str(table) + "\n"
def to_csv(self):
self.sort_rows()
labels = []
for col in self.columns:
if col.visible:
labels.append(col.label)
lines = []
if self.title is not None:
lines.append(self.title)
lines.append(",".join(labels))
rows = self.rows
if self.limit is not None:
rows = rows[0:self.limit]
for row in rows:
data = []
for col in self.columns:
if col.visible:
data.append(col.get_value(self, row))
lines.append(",".join(data))
return "\n".join(lines)
class Report(object):
def __init__(self, client):
self.client = client
def generate(self):
raise NotImplementedError("Subclass must override generate method")
def display(self, mode):
output = self.generate()
output.display(mode)
class ReportTable(Report):
def __init__(self, client, columns, sort=None, reverse=False):
super(ReportTable, self).__init__(client)
self.columns = columns
self.limit = None
self.set_sort_column(sort, reverse)
def get_columns(self):
return self.columns
def get_column(self, key):
for col in self.columns:
if col.key == key:
return col
return None
def has_column(self, key):
col = self.get_column(key)
if col is None:
return False
return True
def set_sort_column(self, key, reverse=False):
got = False
for col in self.columns:
if col.key == key:
got = True
if not got:
raise Exception("Unknown sort column %s" % key)
self.sort = key
self.reverse = reverse
def set_data_limit(self, limit):
self.limit = limit
def new_table(self, title=None):
return ReportOutputTable(self.columns, self.sort,
self.reverse, self.limit,
title, self.usecolor)
class ReportPatchReviewStats(ReportTable):
def user_mapfunc(rep, col, row):
return row[0]
def team_mapfunc(rep, col, row):
return row[2]
def review_mapfunc(rep, col, row):
return row[1]['total']
def ratio_mapfunc(rep, col, row):
plus = float(row[1]['votes']['flag-p2'] + row[1]['votes']['flag-p1'])
minus = float(row[1]['votes']['flag-m2'] + row[1]['votes']['flag-m1'])
ratio = (plus / (plus + minus)) * 100
return ratio
def vote_mapfunc(rep, col, row):
return row[1]['votes'][col]
COLUMNS = [
ReportOutputColumn("user", "User", user_mapfunc, align=ReportOutputColumn.ALIGN_LEFT),
ReportOutputColumn("team", "Team", team_mapfunc, align=ReportOutputColumn.ALIGN_LEFT),
ReportOutputColumn("reviews", "Reviews", review_mapfunc, align=ReportOutputColumn.ALIGN_RIGHT),
ReportOutputColumn("flag-m2", "-2", vote_mapfunc, align=ReportOutputColumn.ALIGN_RIGHT),
ReportOutputColumn("flag-m1", "-1", vote_mapfunc, align=ReportOutputColumn.ALIGN_RIGHT),
ReportOutputColumn("flag-p1", "+1", vote_mapfunc, align=ReportOutputColumn.ALIGN_RIGHT),
ReportOutputColumn("flag-p2", "+2", vote_mapfunc, align=ReportOutputColumn.ALIGN_RIGHT),
ReportOutputColumn("ratio", "+/-", ratio_mapfunc, format="%0.0lf%%", align=ReportOutputColumn.ALIGN_RIGHT),
]
def __init__(self, client, projects, maxagedays=30, teams={}, usecolor=False):
super(ReportPatchReviewStats, self).__init__(client,
ReportPatchReviewStats.COLUMNS,
sort="reviews", reverse=True)
self.projects = projects
self.teams = teams
self.maxagedays = maxagedays
self.usecolor = usecolor
def generate(self):
# We could query all projects at once, but if we do them
# individually it means we get better hit rate against the
# cache if the report is re-run for many different project
# combinations
reviews = []
cutoff = time.time() - (self.maxagedays * 24 * 60 * 60)
for project in self.projects:
query = OperationQuery(self.client,
{
"project": [project],
},
patches=OperationQuery.PATCHES_ALL,
approvals=True)
def querycb(change):
for patch in change.patches:
for approval in patch.approvals:
if approval.is_newer_than(cutoff):
reviews.append(approval)
query.run(querycb)
reviewers = {}
for review in reviews:
if review.action != ModelApproval.ACTION_REVIEWED or review.user is None:
continue
reviewer = review.user.username
if reviewer is None:
reviewer = review.user.name
if reviewer is None:
continue
if reviewer.lower() in ["jenkins", "smokestack"]:
continue
reviewers.setdefault(reviewer,
{
'votes': {'flag-m2': 0, 'flag-m1': 0, 'flag-p1': 0, 'flag-p2': 0},
'total': 0,
})
reviewers[reviewer]['total'] = reviewers[reviewer]['total'] + 1
votes = { "-2" : "flag-m2",
"-1" : "flag-m1",
"1" : "flag-p1",
"2" : "flag-p2" }
cur = reviewers[reviewer]['votes'][votes[str(review.value)]]
reviewers[reviewer]['votes'][votes[str(review.value)]] = cur + 1
compound = ReportOutputCompound()
table = self.new_table("Review statistics")
compound.add_report(table)
for user, votes in reviewers.items():
userteam = ""
for team in self.teams.keys():
if user in self.teams[team]:
userteam = team
table.add_row([user, votes, userteam])
summary = ReportOutputList([
ReportOutputColumn("nreviews", "Total reviews", format="%d",
mapfunc=lambda rep, col, row: row[0]),
ReportOutputColumn("nreviewers", "Total rviewers", format="%d",
mapfunc=lambda rep, col, row: row[1])
], title="Review summary")
summary.set_row([len(reviews), len(reviewers.keys())])
compound.add_report(summary)
return compound
class ReportPatchReviewRate(ReportTable):
def user_mapfunc(rep, col, row):
return row[0]
def team_mapfunc(rep, col, row):
return row[1]
def week_mapfunc(rep, col, row):
if col not in row[2]:
return 0.0
return (row[2][col] / 7.0)
def total_mapfunc(rep, col, row):
if col not in row[2]:
return 0.0
return (row[2][col] / (52.0 * 7.0))
COLUMNS = [
ReportOutputColumn("user", "User", user_mapfunc, align=ReportOutputColumn.ALIGN_LEFT),
ReportOutputColumn("team", "Team", team_mapfunc, align=ReportOutputColumn.ALIGN_LEFT),
ReportOutputColumn("total", "Total", total_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week1", "1 week", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week2", "2 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week3", "3 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week4", "4 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week5", "5 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week6", "6 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week7", "7 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week8", "8 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week9", "9 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week10", "10 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week11", "11 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week12", "12 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week13", "13 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week14", "14 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week15", "15 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week16", "16 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week17", "17 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week18", "18 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week19", "19 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week20", "20 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week21", "21 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week22", "22 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week23", "23 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week24", "24 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week25", "25 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week26", "26 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week27", "27 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week28", "28 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week29", "29 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week30", "30 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week31", "31 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week32", "32 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week33", "33 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week34", "34 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week35", "35 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week36", "36 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week37", "37 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week38", "38 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week39", "39 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week40", "40 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week41", "41 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week42", "42 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week43", "43 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week44", "44 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week45", "45 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week46", "46 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week47", "47 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week48", "48 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week49", "49 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week50", "50 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week51", "51 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
ReportOutputColumn("week52", "52 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"),
]
def __init__(self, client, projects, teams={}, usecolor=False):
super(ReportPatchReviewRate, self).__init__(client,
ReportPatchReviewRate.COLUMNS,
sort="total", reverse=True)
self.projects = projects
self.teams = teams
self.usecolor = usecolor
def generate(self):
# We could query all projects at once, but if we do them
# individually it means we get better hit rate against the
# cache if the report is re-run for many different project
# combinations
reviewers = {}
now = time.time()
for project in self.projects:
query = OperationQuery(self.client,
{
"project": [project],
},
patches=OperationQuery.PATCHES_ALL,
approvals=True)
def querycb(change):
for patch in change.patches:
for approval in patch.approvals:
if approval.action == ModelApproval.ACTION_VERIFIED:
continue
user = approval.user
if user is None or user.username is None:
continue
username = user.username
if username not in reviewers:
reviewers[username] = { "total": 0}
agesecs = approval.get_age(now)
ageweeks = int(agesecs / (60 * 60 * 24 * 7)) + 1
key = "week%d" % ageweeks
if key not in reviewers[username]:
reviewers[username][key] = 0
reviewers[username][key] = reviewers[username][key] + 1
if ageweeks <= 52:
reviewers[username]["total"] = reviewers[username]["total"] + 1
query.run(querycb)
table = self.new_table("Daily review rates per week")
for reviewer in reviewers.keys():
userteam = ""
for team in self.teams.keys():
if reviewer in self.teams[team]:
userteam = team
table.add_row([reviewer, userteam, reviewers[reviewer]])
return table
class ReportBaseChange(ReportTable):
@staticmethod
def get_approval_votes(patch):
# Yes, the numbers are slightly odd order
# A +2 or -2 more important than any -1 or +1
# so we prefer them as the summary value
levels = ["-2", "2", "-1", "1"]
votes = {
"c": { "total": collections.defaultdict(int),
"list": [],
"summary": "",
"details": "",
},
"v": { "total": collections.defaultdict(int),
"list": [],
"summary": "",
"details": "",
},
"w": { "total": collections.defaultdict(int),
"list": [],
"summary": "",
"details": "",
},
}
for approval in patch.approvals:
got_type = approval.action[0:1].lower()
if got_type not in votes:
continue
vote = str(approval.value)
votes[got_type]["total"][vote] = votes[got_type]["total"][vote] + 1
votes[got_type]["list"].append(vote)
for key in votes.keys():
votes[key]["details"] = ",".join(votes[key]["list"])
vals = []
for level in levels:
if level in votes[key]["total"]:
votes[key]["summary"] = level
break
return votes
def approvals_mapfunc(rep, col, row):
patch = row.get_current_patch()
if patch is None:
LOG.error("No patch")
return ""
votes = ReportBaseChange.get_approval_votes(patch)
keys = list(votes.keys())
keys.sort(reverse=True)
data = " ".join(map(lambda val: "%s=%s" % (val, votes[val]["details"]), keys))
if rep.usecolor:
if votes["w"]["total"]["1"] > 0: # Stuff pending merge
return format_color(data, fg="blue", styles=["bold"])
elif votes["w"]["total"]["-1"] > 0: # Work-in-progress
return format_color(data, fg="magenta", styles=[])
elif votes["c"]["total"]["-2"] > 0: # Hard-nack from core
return format_color(data, fg="red", styles=["bold"])
elif votes["c"]["total"]["-1"] > 0 or votes["v"]["total"]["-1"] > 0: # Nack from any or bots
return format_color(data, fg="red", styles=[])
elif votes["c"]["total"]["2"] > 0: # Approval from core
return format_color(data, fg="green", styles=["bold"])
elif votes["c"]["total"]["1"] > 0: # Approval from any
return format_color(data, fg="green", styles=[])
else:
return data
else:
return data
def votes_mapfunc(rep, col, row):
patch = row.get_current_patch()
if patch is None:
LOG.error("No patch")
return ""
if col == "tests":
coltype = "v"
elif col == "reviews":
coltype = "c"
else:
coltype = "w"
votes = ReportBaseChange.get_approval_votes(patch)
data = "%2s" % votes[coltype]["summary"]
if rep.usecolor:
if votes[coltype]["total"]["-2"] > 0: # Hard-nack from core
return format_color(data, fg="red", styles=["bold"])
elif votes[coltype]["total"]["2"] > 0: # Approval from core
return format_color(data, fg="green", styles=["bold"])
elif votes[coltype]["total"]["-1"] > 0: # Soft-nack from any
return format_color(data, fg="red", styles=[])
elif votes[coltype]["total"]["1"] > 0: # Approval from any
return format_color(data, fg="green", styles=[])
else:
return data
else:
return data
def user_mapfunc(rep, col, row):
if not row.owner or not row.owner.username:
return "<unknown>"
return row.owner.username
def date_mapfunc(rep, col, row):
if col == "lastUpdated":
return format_date(row.lastUpdated)
else:
return format_date(row.createdOn)
def date_sortfunc(rep, col, row):
if col == "lastUpdated":
return row.lastUpdated
else:
return row.createdOn
COLUMNS = [
ReportOutputColumn("status", "Status", lambda rep, col, row: row.status),
ReportOutputColumn("topic", "Topic", lambda rep, col, row: row.topic, visible=False),
ReportOutputColumn("url", "URL", lambda rep, col, row: row.url),
ReportOutputColumn("owner", "Owner", user_mapfunc),
ReportOutputColumn("project", "Project", lambda rep, col, row: row.project, visible=False),
ReportOutputColumn("branch", "Branch", lambda rep, col, row: row.branch, visible=False),
ReportOutputColumn("subject", "Subject", lambda rep, col, row: row.subject, truncate=30),
ReportOutputColumn("createdOn", "Created", date_mapfunc, date_sortfunc),
ReportOutputColumn("lastUpdated", "Updated", date_mapfunc, date_sortfunc),
ReportOutputColumn("approvals", "Approvals", approvals_mapfunc, visible=False),
ReportOutputColumn("tests", "Tests", votes_mapfunc),
ReportOutputColumn("reviews", "Reviews", votes_mapfunc),
ReportOutputColumn("workflow", "Workflow", votes_mapfunc),
]
def __init__(self, client, usecolor=False):
super(ReportBaseChange, self).__init__(client, ReportBaseChange.COLUMNS,
sort="createdOn", reverse=False)
self.usecolor = usecolor
class ReportChanges(ReportBaseChange):
def __init__(self, client, projects=[], owners=[],
status=[], messages=[], branches=[], topics=[], reviewers=[],
approvals=[], files=[], rawquery=None, usecolor=False):
super(ReportChanges, self).__init__(client, usecolor)
self.projects = projects
self.owners = owners
self.status = status
self.messages = messages
self.branches = branches
self.topics = topics
self.reviewers = reviewers
self.approvals = approvals
self.files = files
self.rawquery = rawquery
def generate(self):
needFiles = False
if len(self.files) > 0:
needFiles = True
query = OperationQuery(self.client,
{
"project": self.projects,
"owner": self.owners,
"message": self.messages,
"branch": self.branches,
"topic": self.topics,
"status": self.status,
"reviewer": self.reviewers,
},
rawquery=self.rawquery,
patches=OperationQuery.PATCHES_CURRENT,
approvals=True,
files=needFiles)
def match_files(change):
if len(self.files) == 0:
return True
for filere in self.files:
for file in change.get_current_patch().files:
if re.search(filere, file.path):
return True
return False
table = self.new_table("Changes")
def querycb(change):
if match_files(change):
table.add_row(change)
query.run(querycb)
return table
class ReportToDoList(ReportBaseChange):
def __init__(self, client, projects=[], branches=[],
files=[], topics=[], reviewers=[], usecolor=False):
super(ReportToDoList, self).__init__(client, usecolor)
self.projects = projects
self.branches = branches
self.reviewers = reviewers
self.files = files
self.topics = topics
def filter(self, change):
return True
def generate(self):
needFiles = False
if len(self.files) > 0:
needFiles = True
query = OperationQuery(self.client,
{
"project": self.projects,
"status": [ OperationQuery.STATUS_OPEN ],
"branch": self.branches,
"topic": self.topics,
"reviewer": self.reviewers,
},
patches=OperationQuery.PATCHES_ALL,
approvals=True,
files=needFiles)
def match_files(change):
if len(self.files) == 0:
return True
for filere in self.files:
for patch in change.patches:
for file in patch.files:
if re.search(filere, file.path):
return True
return False
table = self.new_table("Changes To Do List")
def querycb(change):
if self.filter(change) and match_files(change):
table.add_row(change)
query.run(querycb)
return table
class ReportToDoListMine(ReportToDoList):
def __init__(self, client, username, projects=[],
branches=[], files=[], topics=[], usecolor=False):
'''
Report to provide a list of changes 'username' has
reviewed an older version of the patch, and needs
to provide feedback on latest version
'''
super(ReportToDoListMine, self).__init__(client,
projects,
reviewers=[ username ],
branches=branches,
files=files,
topics=topics,
usecolor=usecolor)
self.username = username
def filter(self, change):
if (not change.has_current_reviewers([self.username]) and
not change.has_owner([self.username])):
return True
return False
class ReportToDoListOthers(ReportToDoList):
def __init__(self, client, username, bots=[], projects=[],
branches=[], files=[], topics=[], usecolor=False):
'''
Report to provide a list of changes where 'username' has
never reviewed, but at least one other non-bot user has
provided review
'''
super(ReportToDoListOthers, self).__init__(client,
projects,
reviewers=[ "!", username ],
branches=branches,
files=files,
topics=topics,
usecolor=usecolor)
self.bots = bots
def filter(self, change):
# allchanges contains changes where 'username' has
# not reviewed any version of the patch. We want to
# filter out changes which only have bots, or have
# no reviewers at all.
if change.has_any_other_reviewers(self.bots):
return True
return False
class ReportToDoListAnyones(ReportToDoList):
def __init__(self, client, username, bots=[], projects=[],
branches=[], files=[], topics=[], usecolor=False):
'''
Report to provide a list of changes where at least
one other non-bot user has provided review
'''
super(ReportToDoListAnyones, self).__init__(client,
projects,
branches=branches,
files=files,
topics=topics,
usecolor=usecolor)
self.bots = bots
self.username = username
def filter(self, change):
if change.has_current_reviewers([self.username]):
return False
if change.has_any_other_reviewers(self.bots):
return True
return False
class ReportToDoListNoones(ReportToDoList):
def __init__(self, client, bots=[], projects=[],
branches=[], files=[], topics=[], usecolor=False):
'''
Report to provide a list of changes that no one
has ever reviewed
'''
super(ReportToDoListNoones, self).__init__(client,
projects,
branches=branches,
files=files,
topics=topics,
usecolor=usecolor)
self.bots = bots
def filter(self, change):
if not change.has_any_other_reviewers(self.bots):
return True
return False
class ReportToDoListApprovable(ReportToDoList):
def __init__(self, client, username, strict, projects=[],
branches=[], files=[], topics=[], usecolor=False):
'''
Report to provide a list of changes that no one
has ever reviewed
'''
super(ReportToDoListApprovable, self).__init__(client,
projects,
branches=branches,
files=files,
topics=topics,
usecolor=usecolor)
self.username = username
self.strict = strict
def filter(self, change):
if (change.has_current_approval(ModelApproval.ACTION_REVIEWED, 2) and
not change.has_owner([self.username]) and
not change.has_current_approval(ModelApproval.ACTION_WORKFLOW, -1) and
not change.has_current_approval(ModelApproval.ACTION_WORKFLOW, 1) and
not change.has_current_approval(ModelApproval.ACTION_REVIEWED, -2) and
not change.has_current_reviewers([self.username])):
if (self.strict and
change.has_current_approval(ModelApproval.ACTION_REVIEWED, -1)):
return False
return True
return False
class ReportToDoListExpirable(ReportToDoList):
def __init__(self, client, age=28, projects=[],
branches=[], files=[], topics=[], usecolor=False):
'''
Report to provide a list of changes that are
stale and can potentially be expired
'''
super(ReportToDoListExpirable, self).__init__(client,
projects,
branches=branches,
files=files,
topics=topics,
usecolor=usecolor)
self.age = age
def filter(self, change):
if change.get_current_reviewer_nack_age() > (self.age * 24 * 60 * 60):
return True
return False
class ReportOpenReviewStats(ReportBaseChange):
def __init__(self, client, projects, branch="master", topic="", days=7, usecolor=False):
super(ReportOpenReviewStats, self).__init__(client, usecolor)
self.projects = projects
self.branch = branch
self.topic = topic
self.days = days
@staticmethod
def average_age(changes, ages):
if len(changes) == 0:
return 0
total = 0
for change in changes:
total += ages[change]
return format_delta(total / len(changes))
@staticmethod
def median_age(changes, ages):
if len(changes) == 0:
return 0
total = 0
wantages = []
for change in changes:
wantages.append(ages[change])
wantages.sort()
return format_delta(wantages[int(len(wantages)/2)])
@staticmethod
def older_than(changes, ages, cutoffdays):
cutoff = cutoffdays * 24 * 60 * 60
older = 0
for change in changes:
if ages[change] > cutoff:
older = older + 1
return older
@staticmethod
def get_longest_changes(ids, changes, ages, count):
want = []
for id in sorted(ids, key=lambda x: ages[x]):
want.append(changes[id])
return want
def generate(self):
# We could query all projects at once, but if we do them
# individually it means we get better hit rate against the
# cache if the report is re-run for many different project
# combinations
agecurrent = {}
agefirst = {}
agenonnacked = {}
wait_reviewer = []
wait_submitter = []
changes = {}
for project in self.projects:
query = OperationQuery(self.client,
{
"project": [project],
"status": [OperationQuery.STATUS_OPEN],
"branch": [self.branch],
"topic": [self.topic],
},
patches=OperationQuery.PATCHES_ALL,
approvals=True)
def querycb(change):
if change.status != "NEW":
return
now = time.time()
current = change.get_current_patch()
first = change.get_first_patch()
nonnacked = change.get_reviewer_not_nacked_patch()
changes[change.id] = change
if current.is_nacked():
wait_submitter.append(change.id)
else:
wait_reviewer.append(change.id)
agecurrent[change.id] = current.get_age(now)
agefirst[change.id] = first.get_age(now)
if nonnacked:
agenonnacked[change.id] = nonnacked.get_age(now)
else:
agenonnacked[change.id] = 0
query.run(querycb)
compound = ReportOutputCompound()
summary = ReportOutputList([
ReportOutputColumn("nreviews", "Total open reviews", format="%d",
mapfunc=lambda rep, col, row: row[0] + row [1]),
ReportOutputColumn("waitsubmitter", "Waiting on submitter", format="%d",
mapfunc=lambda rep, col, row: row[0]),
ReportOutputColumn("waitreviewer", "Waiting on reviewer", format="%d",
mapfunc=lambda rep, col, row: row[1]),
], title="Review summary")
summary.set_row([len(wait_submitter), len(wait_reviewer)])
compound.add_report(summary)
lastrev = ReportOutputList([
ReportOutputColumn("average", "Average wait time",
mapfunc=lambda rep, col, row: row[0]),
ReportOutputColumn("median", "Median wait time",
mapfunc=lambda rep, col, row: row[1]),
ReportOutputColumn("stale", "Older than %d days" % self.days, format="%d",
mapfunc=lambda rep, col, row: row[2]),
], title="Summary since current revision")
lastrev.set_row([self.average_age(wait_reviewer, agecurrent),
self.median_age(wait_reviewer, agecurrent),
self.older_than(wait_reviewer, agecurrent, self.days)])
compound.add_report(lastrev)
firstrev = ReportOutputList([
ReportOutputColumn("average", "Average wait time",
mapfunc=lambda rep, col, row: row[0]),
ReportOutputColumn("median", "Median wait time",
mapfunc=lambda rep, col, row: row[1]),
], title="Summary since first revision")
firstrev.set_row([self.average_age(wait_reviewer, agefirst),
self.median_age(wait_reviewer, agefirst)])
compound.add_report(firstrev)
nonnackedrev = ReportOutputList([
ReportOutputColumn("average", "Average wait time",
mapfunc=lambda rep, col, row: row[0]),
ReportOutputColumn("median", "Median wait time",
mapfunc=lambda rep, col, row: row[1]),
], title="Summary since last revision without -1/-2 from reviewer")
nonnackedrev.set_row([self.average_age(wait_reviewer, agenonnacked),
self.median_age(wait_reviewer, agenonnacked)])
compound.add_report(nonnackedrev)
def waitlastmap(rep, col, row):
return format_delta(row.get_current_age())
def waitlastsort(rep, col, row):
return row.get_current_age()
waitlastrev = self.new_table("Longest waiting since current revision")
waitlastrev.add_column(ReportOutputColumn("age", "Age",
sortfunc=waitlastsort,
mapfunc=waitlastmap))
waitlastrev.sortcol = "age"
waitlastrev.reverse = True
for change in self.get_longest_changes(wait_reviewer, changes, agecurrent, 5):
waitlastrev.add_row(change)
compound.add_report(waitlastrev)
def waitfirstmap(rep, col, row):
return format_delta(row.get_first_age())
def waitfirstsort(rep, col, row):
return row.get_first_age()
waitfirstrev = self.new_table("Longest waiting since first revision")
waitfirstrev.add_column(ReportOutputColumn("age", "Age",
sortfunc=waitfirstsort,
mapfunc=waitfirstmap))
waitfirstrev.sortcol = "age"
waitfirstrev.reverse = True
for change in self.get_longest_changes(wait_reviewer, changes, agefirst, 5):
waitfirstrev.add_row(change)
compound.add_report(waitfirstrev)
def waitnonnackedmap(rep, col, row):
return format_delta(row.get_reviewer_not_nacked_age())
def waitnonnackedsort(rep, col, row):
return row.get_reviewer_not_nacked_age()
waitnonnackedrev = self.new_table("Longest waiting since last revision without -1/-2 from reviewer")
waitnonnackedrev.add_column(ReportOutputColumn("age", "Age",
sortfunc=waitnonnackedsort,
mapfunc=waitnonnackedmap))
waitnonnackedrev.sortcol = "age"
waitnonnackedrev.reverse = True
for change in self.get_longest_changes(wait_reviewer, changes, agenonnacked, 5):
waitnonnackedrev.add_row(change)
compound.add_report(waitnonnackedrev)
return compound
|
apache-2.0
|
GkAntonius/feynman
|
examples/Solid_State_Physics/plot_eph.py
|
2
|
1265
|
"""
Electron-phonon coupling self-energy
====================================
A diagram containing loopy lines.
"""
from feynman import Diagram
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8,2))
ax = fig.add_axes([0,0,1,1], frameon=False)
ax.set_xlim(0, fig.get_size_inches()[0])
ax.set_ylim(0, fig.get_size_inches()[1])
# Init D and ax
D = Diagram(ax)
D.x0 = 0.2
D.y0 = sum(D.ax.get_ylim()) * .35
# Various size
opwidth = 1.
linlen = 2.
txtpad = .8
wiggle_amplitude=.1
# Line styles
Ph_style = dict(style='elliptic loopy', ellipse_spread=.6, xamp=.10, yamp=-.15, nloops=15)
DW_style = dict(style='circular loopy', circle_radius=.7, xamp=.10, yamp=.15, nloops=18)
G_style = dict(style='simple', arrow=True, arrow_param={'width':0.15, 'length': .3})
# Item 1
v11 = D.vertex([D.x0, D.y0])
v12 = D.vertex(v11.xy, dx=opwidth)
Sigma = D.operator([v11, v12])
Sigma.text("$\Sigma^{ep}$")
# Symbol
D.text(v12.x + txtpad, D.y0, "=")
# Item 3
v21 = D.vertex([v12.x + 2 * txtpad, D.y0 - 0.3])
v22 = D.vertex(v21.xy, dx=linlen)
G = D.line(v21, v22, **G_style)
Ph = D.line(v21, v22, **Ph_style)
# Symbol
D.text(v22.x + txtpad, D.y0, "+")
# Item 3
v31 = D.vertex([v22.x + 3 * txtpad, D.y0 - 0.3])
DW = D.line(v31, v31, **DW_style)
D.plot()
plt.show()
|
gpl-3.0
|
albertomurillo/ansible
|
test/units/modules/network/f5/test_bigip_provision.py
|
15
|
4548
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_provision import Parameters
from library.modules.bigip_provision import ModuleManager
from library.modules.bigip_provision import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_provision import Parameters
from ansible.modules.network.f5.bigip_provision import ModuleManager
from ansible.modules.network.f5.bigip_provision import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
module='gtm',
)
p = Parameters(params=args)
assert p.module == 'gtm'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_provision_one_module_default_level(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
module='gtm',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
# Configure the parameters that would be returned by querying the
# remote device
current = Parameters(
dict(
module='gtm',
level='none'
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.reboot_device = Mock(return_value=True)
mm.save_on_device = Mock(return_value=True)
# this forced sleeping can cause these tests to take 15
# or more seconds to run. This is deliberate.
mm._is_mprov_running_on_device = Mock(side_effect=[True, False, False, False, False])
results = mm.exec_module()
assert results['changed'] is True
assert results['level'] == 'nominal'
def test_provision_all_modules(self, *args):
modules = [
'afm', 'am', 'sam', 'asm', 'avr', 'fps',
'gtm', 'lc', 'ltm', 'pem', 'swg', 'ilx',
'apm',
]
for module in modules:
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
module=module,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
with patch('ansible.module_utils.basic.AnsibleModule.fail_json') as mo:
AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive
)
mo.assert_not_called()
|
gpl-3.0
|
BitFunnel/BitFunnel
|
googletest/googletest/xcode/Scripts/versiongenerate.py
|
3088
|
4536
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
|
mit
|
wakatime/vim-wakatime
|
packages/wakatime/packages/pygments/lexers/dsls.py
|
25
|
33336
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.dsls
~~~~~~~~~~~~~~~~~~~~
Lexers for various domain-specific languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import ExtendedRegexLexer, RegexLexer, bygroups, words, \
include, default, this, using, combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal, Whitespace
__all__ = ['ProtoBufLexer', 'BroLexer', 'PuppetLexer', 'RslLexer',
'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer',
'CrmshLexer', 'ThriftLexer', 'FlatlineLexer', 'SnowballLexer']
class ProtoBufLexer(RegexLexer):
"""
Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_
definition files.
.. versionadded:: 1.4
"""
name = 'Protocol Buffer'
aliases = ['protobuf', 'proto']
filenames = ['*.proto']
tokens = {
'root': [
(r'[ \t]+', Text),
(r'[,;{}\[\]()<>]', Punctuation),
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(words((
'import', 'option', 'optional', 'required', 'repeated', 'default',
'packed', 'ctype', 'extensions', 'to', 'max', 'rpc', 'returns',
'oneof'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
'fixed32', 'fixed64', 'sfixed32', 'sfixed64',
'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'),
Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'),
(r'(message|extend)(\s+)',
bygroups(Keyword.Declaration, Text), 'message'),
(r'(enum|group|service)(\s+)',
bygroups(Keyword.Declaration, Text), 'type'),
(r'\".*?\"', String),
(r'\'.*?\'', String),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'(\-?(inf|nan))\b', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[+-=]', Operator),
(r'([a-zA-Z_][\w.]*)([ \t]*)(=)',
bygroups(Name.Attribute, Text, Operator)),
('[a-zA-Z_][\w.]*', Name),
],
'package': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'message': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'type': [
(r'[a-zA-Z_]\w*', Name, '#pop'),
default('#pop'),
],
}
class ThriftLexer(RegexLexer):
"""
For `Thrift <https://thrift.apache.org/>`__ interface definitions.
.. versionadded:: 2.1
"""
name = 'Thrift'
aliases = ['thrift']
filenames = ['*.thrift']
mimetypes = ['application/x-thrift']
tokens = {
'root': [
include('whitespace'),
include('comments'),
(r'"', String.Double, combined('stringescape', 'dqs')),
(r'\'', String.Single, combined('stringescape', 'sqs')),
(r'(namespace)(\s+)',
bygroups(Keyword.Namespace, Text.Whitespace), 'namespace'),
(r'(enum|union|struct|service|exception)(\s+)',
bygroups(Keyword.Declaration, Text.Whitespace), 'class'),
(r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments
r'((?:[^\W\d]|\$)[\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
include('keywords'),
include('numbers'),
(r'[&=]', Operator),
(r'[:;,{}()<>\[\]]', Punctuation),
(r'[a-zA-Z_](\.\w|\w)*', Name),
],
'whitespace': [
(r'\n', Text.Whitespace),
(r'\s+', Text.Whitespace),
],
'comments': [
(r'#.*$', Comment),
(r'//.*?\n', Comment),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
],
'stringescape': [
(r'\\([\\nrt"\'])', String.Escape),
],
'dqs': [
(r'"', String.Double, '#pop'),
(r'[^\\"\n]+', String.Double),
],
'sqs': [
(r"'", String.Single, '#pop'),
(r'[^\\\'\n]+', String.Single),
],
'namespace': [
(r'[a-z*](\.\w|\w)*', Name.Namespace, '#pop'),
default('#pop'),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'keywords': [
(r'(async|oneway|extends|throws|required|optional)\b', Keyword),
(r'(true|false)\b', Keyword.Constant),
(r'(const|typedef)\b', Keyword.Declaration),
(words((
'cpp_namespace', 'cpp_include', 'cpp_type', 'java_package',
'cocoa_prefix', 'csharp_namespace', 'delphi_namespace',
'php_namespace', 'py_module', 'perl_package',
'ruby_namespace', 'smalltalk_category', 'smalltalk_prefix',
'xsd_all', 'xsd_optional', 'xsd_nillable', 'xsd_namespace',
'xsd_attrs', 'include'), suffix=r'\b'),
Keyword.Namespace),
(words((
'void', 'bool', 'byte', 'i16', 'i32', 'i64', 'double',
'string', 'binary', 'map', 'list', 'set', 'slist',
'senum'), suffix=r'\b'),
Keyword.Type),
(words((
'BEGIN', 'END', '__CLASS__', '__DIR__', '__FILE__',
'__FUNCTION__', '__LINE__', '__METHOD__', '__NAMESPACE__',
'abstract', 'alias', 'and', 'args', 'as', 'assert', 'begin',
'break', 'case', 'catch', 'class', 'clone', 'continue',
'declare', 'def', 'default', 'del', 'delete', 'do', 'dynamic',
'elif', 'else', 'elseif', 'elsif', 'end', 'enddeclare',
'endfor', 'endforeach', 'endif', 'endswitch', 'endwhile',
'ensure', 'except', 'exec', 'finally', 'float', 'for',
'foreach', 'function', 'global', 'goto', 'if', 'implements',
'import', 'in', 'inline', 'instanceof', 'interface', 'is',
'lambda', 'module', 'native', 'new', 'next', 'nil', 'not',
'or', 'pass', 'public', 'print', 'private', 'protected',
'raise', 'redo', 'rescue', 'retry', 'register', 'return',
'self', 'sizeof', 'static', 'super', 'switch', 'synchronized',
'then', 'this', 'throw', 'transient', 'try', 'undef',
'unless', 'unsigned', 'until', 'use', 'var', 'virtual',
'volatile', 'when', 'while', 'with', 'xor', 'yield'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
],
'numbers': [
(r'[+-]?(\d+\.\d+([eE][+-]?\d+)?|\.?\d+[eE][+-]?\d+)', Number.Float),
(r'[+-]?0x[0-9A-Fa-f]+', Number.Hex),
(r'[+-]?[0-9]+', Number.Integer),
],
}
class BroLexer(RegexLexer):
"""
For `Bro <http://bro-ids.org/>`_ scripts.
.. versionadded:: 1.5
"""
name = 'Bro'
aliases = ['bro']
filenames = ['*.bro']
_hex = r'[0-9a-fA-F_]'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
# Whitespace
(r'^@.*?\n', Comment.Preproc),
(r'#.*?\n', Comment.Single),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
# Keywords
(r'(add|alarm|break|case|const|continue|delete|do|else|enum|event'
r'|export|for|function|if|global|hook|local|module|next'
r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword),
(r'(addr|any|bool|count|counter|double|file|int|interval|net'
r'|pattern|port|record|set|string|subnet|table|time|timer'
r'|vector)\b', Keyword.Type),
(r'(T|F)\b', Keyword.Constant),
(r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire'
r'|default|disable_print_hook|raw_output|encrypt|group|log'
r'|mergeable|optional|persistent|priority|redef'
r'|rotate_(?:interval|size)|synchronized)\b',
bygroups(Punctuation, Keyword)),
(r'\s+module\b', Keyword.Namespace),
# Addresses, ports and networks
(r'\d+/(tcp|udp|icmp|unknown)\b', Number),
(r'(\d+\.){3}\d+', Number),
(r'(' + _hex + r'){7}' + _hex, Number),
(r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number),
(r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number),
(r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
# Numeric
(_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date),
(r'0[xX]' + _hex, Number.Hex),
(_float, Number.Float),
(r'\d+', Number.Integer),
(r'/', String.Regex, 'regex'),
(r'"', String, 'string'),
# Operators
(r'[!%*/+:<=>?~|-]', Operator),
(r'([-+=&|]{2}|[+=!><-]=)', Operator),
(r'(in|match)\b', Operator.Word),
(r'[{}()\[\]$.,;]', Punctuation),
# Identfier
(r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)),
(r'[a-zA-Z_]\w*', Name)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String),
(r'\\\n', String),
(r'\\', String)
],
'regex': [
(r'/', String.Regex, '#pop'),
(r'\\[\\nt/]', String.Regex), # String.Escape is too intense here.
(r'[^\\/\n]+', String.Regex),
(r'\\\n', String.Regex),
(r'\\', String.Regex)
]
}
class PuppetLexer(RegexLexer):
"""
For `Puppet <http://puppetlabs.com/>`__ configuration DSL.
.. versionadded:: 1.6
"""
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
include('comments'),
include('keywords'),
include('names'),
include('numbers'),
include('operators'),
include('strings'),
(r'[]{}:(),;[]', Punctuation),
(r'[^\S\n]+', Text),
],
'comments': [
(r'\s*#.*$', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'operators': [
(r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator),
(r'(in|and|or|not)\b', Operator.Word),
],
'names': [
('[a-zA-Z_]\w*', Name.Attribute),
(r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation,
String, Punctuation)),
(r'\$\S+', Name.Variable),
],
'numbers': [
# Copypasta from the Python lexer
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'keywords': [
# Left out 'group' and 'require'
# Since they're often used as attributes
(words((
'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case',
'check', 'class', 'computer', 'configured', 'contained',
'create_resources', 'crit', 'cron', 'debug', 'default',
'define', 'defined', 'directory', 'else', 'elsif', 'emerg',
'err', 'exec', 'extlookup', 'fail', 'false', 'file',
'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import',
'include', 'info', 'inherits', 'inline_template', 'installed',
'interface', 'k5login', 'latest', 'link', 'loglevel',
'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5',
'mount', 'mounted', 'nagios_command', 'nagios_contact',
'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency',
'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup',
'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation',
'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod',
'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged',
'realize', 'regsubst', 'resources', 'role', 'router', 'running',
'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule',
'service', 'sha1', 'shellquote', 'split', 'sprintf',
'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe',
'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted',
'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone',
'zpool'), prefix='(?i)', suffix=r'\b'),
Keyword),
],
'strings': [
(r'"([^"])*"', String),
(r"'(\\'|[^'])*'", String),
],
}
class RslLexer(RegexLexer):
"""
`RSL <http://en.wikipedia.org/wiki/RAISE>`_ is the formal specification
language used in RAISE (Rigorous Approach to Industrial Software Engineering)
method.
.. versionadded:: 2.0
"""
name = 'RSL'
aliases = ['rsl']
filenames = ['*.rsl']
mimetypes = ['text/rsl']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(words((
'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs',
'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel',
'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif',
'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if',
'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len',
'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post',
'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap',
'then', 'theory', 'test_case', 'tl', 'transition_system', 'true',
'type', 'union', 'until', 'use', 'value', 'variable', 'while',
'with', 'write', '~isin', '-inflist', '-infset', '-list',
'-set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'(variable|value)\b', Keyword.Declaration),
(r'--.*?\n', Comment),
(r'<:.*?:>', Comment),
(r'\{!.*?!\}', Comment),
(r'/\*.*?\*/', Comment),
(r'^[ \t]*([\w]+)[ \t]*:[^:]', Name.Function),
(r'(^[ \t]*)([\w]+)([ \t]*\([\w\s,]*\)[ \t]*)(is|as)',
bygroups(Text, Name.Function, Text, Keyword)),
(r'\b[A-Z]\w*\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'".*"', String),
(r'\'.\'', String.Char),
(r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|'
r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)',
Operator),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'.', Text),
],
}
def analyse_text(text):
"""
Check for the most common text in the beginning of a RSL file.
"""
if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
return 1.0
class MscgenLexer(RegexLexer):
"""
For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files.
.. versionadded:: 1.6
"""
name = 'Mscgen'
aliases = ['mscgen', 'msc']
filenames = ['*.msc']
_var = r'(\w+|"(?:\\"|[^"])*")'
tokens = {
'root': [
(r'msc\b', Keyword.Type),
# Options
(r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS'
r'|arcgradient|ARCGRADIENT)\b', Name.Property),
# Operators
(r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word),
(r'(\.|-|\|){3}', Keyword),
(r'(?:-|=|\.|:){2}'
r'|<<=>>|<->|<=>|<<>>|<:>'
r'|->|=>>|>>|=>|:>|-x|-X'
r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator),
# Names
(r'\*', Name.Builtin),
(_var, Name.Variable),
# Other
(r'\[', Punctuation, 'attrs'),
(r'\{|\}|,|;', Punctuation),
include('comments')
],
'attrs': [
(r'\]', Punctuation, '#pop'),
(_var + r'(\s*)(=)(\s*)' + _var,
bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace,
String)),
(r',', Punctuation),
include('comments')
],
'comments': [
(r'(?://|#).*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'[ \t\r\n]+', Text.Whitespace)
]
}
class VGLLexer(RegexLexer):
"""
For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_
source code.
.. versionadded:: 1.6
"""
name = 'VGL'
aliases = ['vgl']
filenames = ['*.rpf']
flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
tokens = {
'root': [
(r'\{[^}]*\}', Comment.Multiline),
(r'declare', Keyword.Constant),
(r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
r'|create|on|line|with|global|routine|value|endroutine|constant'
r'|global|set|join|library|compile_option|file|exists|create|copy'
r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
Keyword),
(r'(true|false|null|empty|error|locked)', Keyword.Constant),
(r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
(r'"[^"]*"', String),
(r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
(r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
(r'[a-z_$][\w$]*', Name),
(r'[\r\n]+', Text),
(r'\s+', Text)
]
}
class AlloyLexer(RegexLexer):
"""
For `Alloy <http://alloy.mit.edu>`_ source code.
.. versionadded:: 2.0
"""
name = 'Alloy'
aliases = ['alloy']
filenames = ['*.als']
mimetypes = ['text/x-alloy']
flags = re.MULTILINE | re.DOTALL
iden_rex = r'[a-zA-Z_][\w\']*'
text_tuple = (r'[^\S\n]+', Text)
tokens = {
'sig': [
(r'(extends)\b', Keyword, '#pop'),
(iden_rex, Name),
text_tuple,
(r',', Punctuation),
(r'\{', Operator, '#pop'),
],
'module': [
text_tuple,
(iden_rex, Name, '#pop'),
],
'fun': [
text_tuple,
(r'\{', Operator, '#pop'),
(iden_rex, Name, '#pop'),
],
'root': [
(r'--.*?$', Comment.Single),
(r'//.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
text_tuple,
(r'(module|open)(\s+)', bygroups(Keyword.Namespace, Text),
'module'),
(r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Text), 'sig'),
(r'(iden|univ|none)\b', Keyword.Constant),
(r'(int|Int)\b', Keyword.Type),
(r'(this|abstract|extends|set|seq|one|lone|let)\b', Keyword),
(r'(all|some|no|sum|disj|when|else)\b', Keyword),
(r'(run|check|for|but|exactly|expect|as)\b', Keyword),
(r'(and|or|implies|iff|in)\b', Operator.Word),
(r'(fun|pred|fact|assert)(\s+)', bygroups(Keyword, Text), 'fun'),
(r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.|->', Operator),
(r'[-+/*%=<>&!^|~{}\[\]().]', Operator),
(iden_rex, Name),
(r'[:,]', Punctuation),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\n', Text),
]
}
class PanLexer(RegexLexer):
"""
Lexer for `pan <http://github.com/quattor/pan/>`_ source files.
Based on tcsh lexer.
.. versionadded:: 2.0
"""
name = 'Pan'
aliases = ['pan']
filenames = ['*.pan']
tokens = {
'root': [
include('basic'),
(r'\(', Keyword, 'paren'),
(r'\{', Keyword, 'curly'),
include('data'),
],
'basic': [
(words((
'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final',
'prefix', 'unique', 'object', 'foreach', 'include', 'template',
'function', 'variable', 'structure', 'extensible', 'declaration'),
prefix=r'\b', suffix=r'\s*\b'),
Keyword),
(words((
'file_contents', 'format', 'index', 'length', 'match', 'matches',
'replace', 'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase',
'debug', 'error', 'traceback', 'deprecated', 'base64_decode',
'base64_encode', 'digest', 'escape', 'unescape', 'append', 'create',
'first', 'nlist', 'key', 'list', 'merge', 'next', 'prepend', 'is_boolean',
'is_defined', 'is_double', 'is_list', 'is_long', 'is_nlist', 'is_null',
'is_number', 'is_property', 'is_resource', 'is_string', 'to_boolean',
'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists',
'path_exists', 'if_exists', 'return', 'value'),
prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
(r'#.*', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r';', Punctuation),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
(r'\d+(?= |\Z)', Number),
],
'curly': [
(r'\}', Keyword, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
}
class CrmshLexer(RegexLexer):
"""
Lexer for `crmsh <http://crmsh.github.io/>`_ configuration files
for Pacemaker clusters.
.. versionadded:: 2.1
"""
name = 'Crmsh'
aliases = ['crmsh', 'pcmk']
filenames = ['*.crmsh', '*.pcmk']
mimetypes = []
elem = words((
'node', 'primitive', 'group', 'clone', 'ms', 'location',
'colocation', 'order', 'fencing_topology', 'rsc_ticket',
'rsc_template', 'property', 'rsc_defaults',
'op_defaults', 'acl_target', 'acl_group', 'user', 'role',
'tag'), suffix=r'(?![\w#$-])')
sub = words((
'params', 'meta', 'operations', 'op', 'rule',
'attributes', 'utilization'), suffix=r'(?![\w#$-])')
acl = words(('read', 'write', 'deny'), suffix=r'(?![\w#$-])')
bin_rel = words(('and', 'or'), suffix=r'(?![\w#$-])')
un_ops = words(('defined', 'not_defined'), suffix=r'(?![\w#$-])')
date_exp = words(('in_range', 'date', 'spec', 'in'), suffix=r'(?![\w#$-])')
acl_mod = (r'(?:tag|ref|reference|attribute|type|xpath)')
bin_ops = (r'(?:lt|gt|lte|gte|eq|ne)')
val_qual = (r'(?:string|version|number)')
rsc_role_action = (r'(?:Master|Started|Slave|Stopped|'
r'start|promote|demote|stop)')
tokens = {
'root': [
(r'^#.*\n?', Comment),
# attr=value (nvpair)
(r'([\w#$-]+)(=)("(?:""|[^"])*"|\S+)',
bygroups(Name.Attribute, Punctuation, String)),
# need this construct, otherwise numeric node ids
# are matched as scores
# elem id:
(r'(node)(\s+)([\w#$-]+)(:)',
bygroups(Keyword, Whitespace, Name, Punctuation)),
# scores
(r'([+-]?([0-9]+|inf)):', Number),
# keywords (elements and other)
(elem, Keyword),
(sub, Keyword),
(acl, Keyword),
# binary operators
(r'(?:%s:)?(%s)(?![\w#$-])' % (val_qual, bin_ops), Operator.Word),
# other operators
(bin_rel, Operator.Word),
(un_ops, Operator.Word),
(date_exp, Operator.Word),
# builtin attributes (e.g. #uname)
(r'#[a-z]+(?![\w#$-])', Name.Builtin),
# acl_mod:blah
(r'(%s)(:)("(?:""|[^"])*"|\S+)' % acl_mod,
bygroups(Keyword, Punctuation, Name)),
# rsc_id[:(role|action)]
# NB: this matches all other identifiers
(r'([\w#$-]+)(?:(:)(%s))?(?![\w#$-])' % rsc_role_action,
bygroups(Name, Punctuation, Operator.Word)),
# punctuation
(r'(\\(?=\n)|[[\](){}/:@])', Punctuation),
(r'\s+|\n', Whitespace),
],
}
class FlatlineLexer(RegexLexer):
"""
Lexer for `Flatline <https://github.com/bigmlcom/flatline>`_ expressions.
.. versionadded:: 2.2
"""
name = 'Flatline'
aliases = ['flatline']
filenames = []
mimetypes = ['text/x-flatline']
special_forms = ('let',)
builtins = (
"!=", "*", "+", "-", "<", "<=", "=", ">", ">=", "abs", "acos", "all",
"all-but", "all-with-defaults", "all-with-numeric-default", "and",
"asin", "atan", "avg", "avg-window", "bin-center", "bin-count", "call",
"category-count", "ceil", "cond", "cond-window", "cons", "cos", "cosh",
"count", "diff-window", "div", "ensure-value", "ensure-weighted-value",
"epoch", "epoch-day", "epoch-fields", "epoch-hour", "epoch-millisecond",
"epoch-minute", "epoch-month", "epoch-second", "epoch-weekday",
"epoch-year", "exp", "f", "field", "field-prop", "fields", "filter",
"first", "floor", "head", "if", "in", "integer", "language", "length",
"levenshtein", "linear-regression", "list", "ln", "log", "log10", "map",
"matches", "matches?", "max", "maximum", "md5", "mean", "median", "min",
"minimum", "missing", "missing-count", "missing?", "missing_count",
"mod", "mode", "normalize", "not", "nth", "occurrences", "or",
"percentile", "percentile-label", "population", "population-fraction",
"pow", "preferred", "preferred?", "quantile-label", "rand", "rand-int",
"random-value", "re-quote", "real", "replace", "replace-first", "rest",
"round", "row-number", "segment-label", "sha1", "sha256", "sin", "sinh",
"sqrt", "square", "standard-deviation", "standard_deviation", "str",
"subs", "sum", "sum-squares", "sum-window", "sum_squares", "summary",
"summary-no", "summary-str", "tail", "tan", "tanh", "to-degrees",
"to-radians", "variance", "vectorize", "weighted-random-value", "window",
"winnow", "within-percentiles?", "z-score",
)
valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+'
tokens = {
'root': [
# whitespaces - usually not relevant
(r'[,\s]+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'0x-?[a-f\d]+', Number.Hex),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"\\(.|[a-z]+)", String.Char),
# expression template placeholder
(r'_', String.Symbol),
# highlight the special forms
(words(special_forms, suffix=' '), Keyword),
# highlight the builtins
(words(builtins, suffix=' '), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# parentheses
(r'(\(|\))', Punctuation),
],
}
class SnowballLexer(ExtendedRegexLexer):
"""
Lexer for `Snowball <http://snowballstem.org/>`_ source code.
.. versionadded:: 2.2
"""
name = 'Snowball'
aliases = ['snowball']
filenames = ['*.sbl']
_ws = r'\n\r\t '
def __init__(self, **options):
self._reset_stringescapes()
ExtendedRegexLexer.__init__(self, **options)
def _reset_stringescapes(self):
self._start = "'"
self._end = "'"
def _string(do_string_first):
def callback(lexer, match, ctx):
s = match.start()
text = match.group()
string = re.compile(r'([^%s]*)(.)' % re.escape(lexer._start)).match
escape = re.compile(r'([^%s]*)(.)' % re.escape(lexer._end)).match
pos = 0
do_string = do_string_first
while pos < len(text):
if do_string:
match = string(text, pos)
yield s + match.start(1), String.Single, match.group(1)
if match.group(2) == "'":
yield s + match.start(2), String.Single, match.group(2)
ctx.stack.pop()
break
yield s + match.start(2), String.Escape, match.group(2)
pos = match.end()
match = escape(text, pos)
yield s + match.start(), String.Escape, match.group()
if match.group(2) != lexer._end:
ctx.stack[-1] = 'escape'
break
pos = match.end()
do_string = True
ctx.pos = s + match.end()
return callback
def _stringescapes(lexer, match, ctx):
lexer._start = match.group(3)
lexer._end = match.group(5)
return bygroups(Keyword.Reserved, Text, String.Escape, Text,
String.Escape)(lexer, match, ctx)
tokens = {
'root': [
(words(('len', 'lenof'), suffix=r'\b'), Operator.Word),
include('root1'),
],
'root1': [
(r'[%s]+' % _ws, Text),
(r'\d+', Number.Integer),
(r"'", String.Single, 'string'),
(r'[()]', Punctuation),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*', Comment.Single),
(r'[!*+\-/<=>]=|[-=]>|<[+-]|[$*+\-/<=>?\[\]]', Operator),
(words(('as', 'get', 'hex', 'among', 'define', 'decimal',
'backwardmode'), suffix=r'\b'),
Keyword.Reserved),
(words(('strings', 'booleans', 'integers', 'routines', 'externals',
'groupings'), suffix=r'\b'),
Keyword.Reserved, 'declaration'),
(words(('do', 'or', 'and', 'for', 'hop', 'non', 'not', 'set', 'try',
'fail', 'goto', 'loop', 'next', 'test', 'true',
'false', 'unset', 'atmark', 'attach', 'delete', 'gopast',
'insert', 'repeat', 'sizeof', 'tomark', 'atleast',
'atlimit', 'reverse', 'setmark', 'tolimit', 'setlimit',
'backwards', 'substring'), suffix=r'\b'),
Operator.Word),
(words(('size', 'limit', 'cursor', 'maxint', 'minint'),
suffix=r'\b'),
Name.Builtin),
(r'(stringdef\b)([%s]*)([^%s]+)' % (_ws, _ws),
bygroups(Keyword.Reserved, Text, String.Escape)),
(r'(stringescapes\b)([%s]*)(.)([%s]*)(.)' % (_ws, _ws),
_stringescapes),
(r'[A-Za-z]\w*', Name),
],
'declaration': [
(r'\)', Punctuation, '#pop'),
(words(('len', 'lenof'), suffix=r'\b'), Name,
('root1', 'declaration')),
include('root1'),
],
'string': [
(r"[^']*'", _string(True)),
],
'escape': [
(r"[^']*'", _string(False)),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
self._reset_stringescapes()
return ExtendedRegexLexer.get_tokens_unprocessed(self, text, context)
|
bsd-3-clause
|
aaronzink/tensorflow-visual-inspection
|
models/object_detection/utils/np_box_list.py
|
18
|
4547
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy BoxList classes and functions."""
import numpy as np
from six import moves
class BoxList(object):
"""Box collection.
BoxList represents a list of bounding boxes as numpy array, where each
bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes within a
given list correspond to a single image.
Optionally, users can add additional related fields (such as
objectness/classification scores).
"""
def __init__(self, data):
"""Constructs box collection.
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Raises:
ValueError: if bbox data is not a numpy array
ValueError: if invalid dimensions for bbox data
"""
if not isinstance(data, np.ndarray):
raise ValueError('data must be a numpy array.')
if len(data.shape) != 2 or data.shape[1] != 4:
raise ValueError('Invalid dimensions for box data.')
if data.dtype != np.float32 and data.dtype != np.float64:
raise ValueError('Invalid data type for box data: float is required.')
if not self._is_valid_boxes(data):
raise ValueError('Invalid box data. data must be a numpy array of '
'N*[y_min, x_min, y_max, x_max]')
self.data = {'boxes': data}
def num_boxes(self):
"""Return number of boxes held in collections."""
return self.data['boxes'].shape[0]
def get_extra_fields(self):
"""Return all non-box fields."""
return [k for k in self.data.keys() if k != 'boxes']
def has_field(self, field):
return field in self.data
def add_field(self, field, field_data):
"""Add data to a specified field.
Args:
field: a string parameter used to speficy a related field to be accessed.
field_data: a numpy array of [N, ...] representing the data associated
with the field.
Raises:
ValueError: if the field is already exist or the dimension of the field
data does not matches the number of boxes.
"""
if self.has_field(field):
raise ValueError('Field ' + field + 'already exists')
if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes():
raise ValueError('Invalid dimensions for field data')
self.data[field] = field_data
def get(self):
"""Convenience function for accesssing box coordinates.
Returns:
a numpy array of shape [N, 4] representing box corners
"""
return self.get_field('boxes')
def get_field(self, field):
"""Accesses data associated with the specified field in the box collection.
Args:
field: a string parameter used to speficy a related field to be accessed.
Returns:
a numpy 1-d array representing data of an associated field
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field {} does not exist'.format(field))
return self.data[field]
def get_coordinates(self):
"""Get corner coordinates of boxes.
Returns:
a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max]
"""
box_coordinates = self.get()
y_min = box_coordinates[:, 0]
x_min = box_coordinates[:, 1]
y_max = box_coordinates[:, 2]
x_max = box_coordinates[:, 3]
return [y_min, x_min, y_max, x_max]
def _is_valid_boxes(self, data):
"""Check whether data fullfills the format of N*[ymin, xmin, ymax, xmin].
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Returns:
a boolean indicating whether all ymax of boxes are equal or greater than
ymin, and all xmax of boxes are equal or greater than xmin.
"""
if data.shape[0] > 0:
for i in moves.range(data.shape[0]):
if data[i, 0] > data[i, 2] or data[i, 1] > data[i, 3]:
return False
return True
|
apache-2.0
|
ebrensi/registry-frontend
|
ff.py
|
1
|
1240
|
#! usr/bin/env python
# This script is for testing without having to host the flask app.
import folium
import pandas as pd
import os
from sqlalchemy import create_engine
import geojson
DATABASE_URL = os.environ["DATABASE_URL"]
STATES_GEOJSON_PATH = "static/us-states.json"
engine = create_engine(DATABASE_URL)
with engine.connect() as db:
query = "Select state, count(*) From registry Group By state;"
df = pd.read_sql_query(query, db)
with open(STATES_GEOJSON_PATH, "r") as file:
gj = geojson.load(file)
# Folium choropleth requires a one-to-one correspondence between GeoJSON
# features (state definitions) and shade values, so we will make a new
# GeoJSON object that is a FeatureCollection of only the states that we
# have data for.
relevant_features = [feature for feature in gj["features"]
if ("id" in feature) and
(feature["id"] in df["state"].values)]
gj_relevant = geojson.FeatureCollection(relevant_features)
geo_str = geojson.dumps(gj_relevant)
base_map = folium.Map([43, -100], zoom_start=5)
base_map.choropleth(
geo_str=geo_str,
data=df,
columns=['state', 'count'],
key_on='feature.id',
fill_color='PuBuGn',
)
base_map.save("map.html")
|
mit
|
yohn89/pythoner.net
|
pythoner/pm/urls.py
|
3
|
1035
|
#encoding:utf-8
"""
pythoner.net
Copyright (C) 2013 PYTHONER.ORG
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.conf.urls.defaults import *
urlpatterns = patterns('pm.views',
(r'^$','inbox'),
(r'^inbox/$','inbox'),
(r'^inbox/p(\d{1,10})/$','inbox'),
(r'^outbox/$','outbox'),
(r'^outbox/p(\d{1,10})/$','outbox'),
(r'^write/$','write'),
(r'delete/$','delete'),
(r'^(\d{1,10})/','detail'),
)
|
gpl-3.0
|
cattleio/stampede
|
docs/do-demo/deploy.py
|
1
|
6809
|
#!/usr/bin/env python
import cattle
import sys
ZK_NODES = 3
REDIS_NODES = 3
API_SERVER_NODES = 3
PROCESS_SERVER_NODES = 3
AGENT_SERVER_NODES = 3
MYSQL_COMPUTE = 1
# Set if you want to override the cattle.jar in the Docker image with a custom one
URL = ''
TAG = 'latest'
client = cattle.from_env()
def wait(c):
return client.wait_success(c, timeout=120)
deleted = []
for c in client.list_container(removed_null=True):
if c.name != 'Agent':
client.delete(c)
print 'Deleting', c.name
deleted.append(c)
print 'Waiting for deleting'
for c in deleted:
wait(c)
print 'Done'
def set_link(instance, name, target):
instance = wait(instance)
for link in instance.instanceLinks():
if link.linkName == name:
print 'Linking {} to {}'.format(instance.name, target.name)
wait(client.update(link, targetInstanceId=target.id))
def deploy_zk():
# Deploying ZK is complicated....
# Create dummy ZK to link against, then we will create the circle
# We want it to be stopped so that ZooKeeper doesn't actually connect
print 'Creating Dummy ZK node'
zk_dummy = wait(client.create_container(imageUuid='docker:ibuildthecloud/zookeeper',
name='zk_dummy'))
zk_dummy = wait(zk_dummy.stop())
zks = []
for i in range(1, ZK_NODES + 1):
links = {}
for j in range(1, ZK_NODES + 1):
if j != i:
links['zk{}'.format(j)] = zk_dummy.id
zk = client.create_container(imageUuid='docker:ibuildthecloud/zookeeper',
name='zk{}'.format(i),
environment={
'ID': i
},
instanceTriggeredStop='restart',
instanceLinks=links)
print 'Created', zk.name
zks.append(wait(zk))
for zk_target in zks:
for zk in zks:
set_link(zk, zk_target.name, zk_target)
client.delete(zk_dummy)
return zks
def deploy_redis():
print 'Create Redis'
redises = []
for i in range(1, REDIS_NODES + 1):
redis = client.create_container(imageUuid='docker:ibuildthecloud/redis',
instanceTriggeredStop='restart',
name='redis{}'.format(i))
print 'Created', redis.name
redises.append(redis)
return redises
def haproxy(targets, name, listen_port):
links = {}
for i, c in enumerate(targets):
links['TARGET{}'.format(i)] = wait(c).id
return client.create_container(imageUuid='docker:ibuildthecloud/haproxy',
instanceLinks=links,
instanceTriggeredStop='restart',
name=name,
ports=['{}:80'.format(listen_port)])
zookeepers = deploy_zk()
redises = deploy_redis()
mysql = client.create_container(imageUuid='docker:ibuildthecloud/mysql',
compute=MYSQL_COMPUTE,
instanceTriggeredStop='restart',
ports=['9082:80'],
name='mysql')
print 'Created', mysql.name
graphite = client.create_container(imageUuid='docker:ibuildthecloud/graphite',
instanceTriggeredStop='restart',
ports=['9083:80'],
name='graphite')
print 'Created', graphite.name
es = client.create_container(imageUuid='docker:ibuildthecloud/logstash',
instanceTriggeredStop='restart',
ports=['9200:9200'],
name='logstash/elasticache')
print 'Created', es.name
kibana = client.create_container(imageUuid='docker:ibuildthecloud/kibana',
name='Kibana',
instanceTriggeredStop='restart',
ports=['9081:80'],
environment={
'ES_PORT_9200_TCP_ADDR': wait(es).hosts()[0].ipAddresses()[0].address,
'ES_PORT_9200_TCP_PORT': '9200'
})
print 'Created', kibana.name
print 'Create Cattle'
links = {
'gelf': wait(es).id,
'graphite': wait(graphite).id
}
instances = []
instances.extend(zookeepers)
instances.extend(redises)
instances.append(mysql)
for c in instances:
links[c.name] = wait(c).id
api_servers = []
agent_servers = []
for i in range(1, API_SERVER_NODES + 1):
c = client.create_container(imageUuid='docker:cattle/api-server:{}'.format(TAG),
name='API Server {}'.format(i),
environment={
'URL': URL,
'CATTLE_CATTLE_SERVER_ID': 'apiserver{}'.format(i)
},
instanceTriggeredStop='restart',
instanceLinks=links)
print 'Created', c.name
api_servers.append(c)
for i in range(1, PROCESS_SERVER_NODES + 1):
c = client.create_container(imageUuid='docker:cattle/process-server:{}'.format(TAG),
name='Process Server {}'.format(i),
environment={
'URL': URL,
'CATTLE_JAVA_OPTS': '-Xmx1024m',
'CATTLE_CATTLE_SERVER_ID': 'processserver{}'.format(i)
},
instanceTriggeredStop='restart',
instanceLinks=links)
print 'Created', c.name
for i in range(1, AGENT_SERVER_NODES + 1):
c = client.create_container(imageUuid='docker:cattle/agent-server:{}'.format(TAG),
name='Agent Server {}'.format(i),
environment={
'URL': URL,
'CATTLE_JAVA_OPTS': '-Xmx1024m',
'CATTLE_CATTLE_SERVER_ID': 'agentserver{}'.format(i)
},
instanceTriggeredStop='restart',
instanceLinks=links)
print 'Created', c.name
agent_servers.append(c)
h1 = haproxy(api_servers, 'Api Servers Load Balancer', 8080)
print 'Created', h1.name
h2 = haproxy(agent_servers, 'Agent Servers Load Balancer', 8081)
print 'Created', h2.name
wait(h1)
wait(h2)
|
apache-2.0
|
Aydarkhan/cca
|
automata.py
|
1
|
5250
|
"""Copyright 2010 Aydarkhanov Ruslan, Kurochkin Ilya, Rusinov Ivan
This file is part of CCA.
CCA is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published
by the Free Software Foundation, either version 2 of the License,
or (at your option) any later version.
CCA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CCA. If not, see http://www.gnu.org/licenses/.
"""
from state import *
class Automata(object):
def __init__(self, width=150, height=70, states=None):
self.width = width
self.height = height
if states == None:
self.states = [State("Dead", '-', "white", '0', [5]),
State("Alive", '+', "black", '1',
[0, 1, 4, 5, 6, 7, 8])]
else:
self.states = states
self.symbols = {}
self.st_sym = {}
for num, st in enumerate(self.states):
self.symbols[st.symbol] = num
self.st_sym[st.symbol] = st
self.field = []
for row in range(height):
self.field.append([])
for col in range(width):
self.field[row].append(self.states[0].symbol)
def next_step(self):
changed = []
for row in range(1, self.height - 1):
for col in range(1, self.width - 1):
symbol = self.field[row][col]
num = 0
for vert in range(row - 1, row + 2):
for horiz in range(col - 1, col + 2):
if self.field[vert][horiz] == symbol:
num += 1
if self.st_sym[symbol].next_state(num - 1):
changed.append((row, col))
for row in range(1, self.height - 1):
symbol1 = self.field[row][0]
symbol2 = self.field[row][self.width - 1]
num1 = 0
num2 = 0
for vert in range(row - 1, row + 2):
for horiz in [0, 1, self.width - 1]:
if self.field[vert][horiz] == symbol1:
num1 += 1
for horiz in [self.width - 2, self.width - 1, 0]:
if self.field[vert][horiz] == symbol2:
num2 += 1
if self.st_sym[symbol1].next_state(num1 - 1):
changed.append((row, 0))
if self.st_sym[symbol2].next_state(num2 - 1):
changed.append((row, self.width - 1))
for col in range(1, self.width - 1):
symbol1 = self.field[0][col]
symbol2 = self.field[self.height - 1][col]
num1 = 0
num2 = 0
for horiz in range(col - 1, col + 2):
for vert in [0, 1, self.height - 1]:
if self.field[vert][horiz] == symbol1:
num1 += 1
for vert in [self.height - 2, self.height - 1, 0]:
if self.field[vert][horiz] == symbol2:
num2 += 1
if self.st_sym[symbol1].next_state(num1 - 1):
changed.append((0, col))
if self.st_sym[symbol2].next_state(num2 - 1):
changed.append((self.height - 1, col))
for row, col in [(0, 0), (self.height - 1, self.width - 1),
(0, self.width - 1), (self.height - 1, 0)]:
symbol = self.field[row][col]
num = 0
for vert_long in range(row + self.height - 1,
row + self.height + 2):
for horiz_long in range(col + self.width - 1,
col + self.width + 2):
vert = vert_long % self.height
horiz = horiz_long % self.width
if self.field[vert][horiz] == symbol:
num += 1
if self.st_sym[symbol].next_state(num - 1):
changed.append((row, col))
for row, col in changed:
index = (self.symbols[self.field[row][col]] +
1) % len(self.states)
self.field[row][col] = self.states[index].symbol
return changed
def change_size(self, value, side):
"0-up, 1-right, 2-down, 3-left"
new_field = []
if side == 0:
self.height += value
for row in range(value):
new_field.append([])
for col in range(self.width):
new_field[row].append(self.states[0].symbol)
init = value
if value < 0:
init = 0
for row in range(init, self.height):
new_field.append([])
for col in range(self.width):
new_field[row].append(self.field[row - value][col])
if side == 2:
self.height += value
term = value
if value < 0:
term = 0
for row in range(self.height - term):
new_field.append([])
for col in range(self.width):
new_field[row].append(self.field[row][col])
for row in range(self.height - term, self.height):
new_field.append([])
for col in range(self.width):
new_field[row].append(self.states[0].symbol)
if side == 1:
self.width += value
term = value
if value < 0:
term = 0
for row in range(self.height):
new_field.append([])
for col in range(self.width - term):
new_field[row].append(self.field[row][col])
for row in range(self.height):
for col in range(self.width - term, self.width):
new_field[row].append(self.states[0].symbol)
if side == 3:
self.width += value
for row in range(self.height):
new_field.append([])
for col in range(value):
new_field[row].append(self.states[0].symbol)
init = value
if value < 0:
init = 0
for row in range(self.height):
for col in range(init, self.width):
new_field[row].append(self.field[row][col - value])
self.field = new_field
|
gpl-2.0
|
jayceyxc/hue
|
desktop/core/ext-py/rsa-3.4.2/rsa/randnum.py
|
82
|
2643
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for generating random numbers."""
# Source inspired by code by Yesudeep Mangalapilly <[email protected]>
import os
from rsa import common, transform
from rsa._compat import byte
def read_random_bits(nbits):
"""Reads 'nbits' random bits.
If nbits isn't a whole number of bytes, an extra byte will be appended with
only the lower bits set.
"""
nbytes, rbits = divmod(nbits, 8)
# Get the random bytes
randomdata = os.urandom(nbytes)
# Add the remaining random bits
if rbits > 0:
randomvalue = ord(os.urandom(1))
randomvalue >>= (8 - rbits)
randomdata = byte(randomvalue) + randomdata
return randomdata
def read_random_int(nbits):
"""Reads a random integer of approximately nbits bits.
"""
randomdata = read_random_bits(nbits)
value = transform.bytes2int(randomdata)
# Ensure that the number is large enough to just fill out the required
# number of bits.
value |= 1 << (nbits - 1)
return value
def read_random_odd_int(nbits):
"""Reads a random odd integer of approximately nbits bits.
>>> read_random_odd_int(512) & 1
1
"""
value = read_random_int(nbits)
# Make sure it's odd
return value | 1
def randint(maxvalue):
"""Returns a random integer x with 1 <= x <= maxvalue
May take a very long time in specific situations. If maxvalue needs N bits
to store, the closer maxvalue is to (2 ** N) - 1, the faster this function
is.
"""
bit_size = common.bit_size(maxvalue)
tries = 0
while True:
value = read_random_int(bit_size)
if value <= maxvalue:
break
if tries and tries % 10 == 0:
# After a lot of tries to get the right number of bits but still
# smaller than maxvalue, decrease the number of bits by 1. That'll
# dramatically increase the chances to get a large enough number.
bit_size -= 1
tries += 1
return value
|
apache-2.0
|
wreckJ/intellij-community
|
python/lib/Lib/site-packages/django/utils/_threading_local.py
|
343
|
6655
|
"""Thread-local objects
(Note that this module provides a Python version of thread
threading.local class. Depending on the version of Python you're
using, there may be a faster one available. You should always import
the local class from threading.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
# Threading import is at end
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = '_local__key', 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
currentThread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = currentThread().__dict__.get(key)
if d is None:
d = {}
currentThread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__():
threading_enumerate = enumerate
__getattribute__ = object.__getattribute__
def __del__(self):
key = __getattribute__(self, '_local__key')
try:
threads = list(threading_enumerate())
except:
# if enumerate fails, as it seems to do during
# shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
return __del__
__del__ = __del__()
try:
from threading import currentThread, enumerate, RLock
except ImportError:
from dummy_threading import currentThread, enumerate, RLock
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.